aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic
diff options
context:
space:
mode:
authorblogic <blogic@3c298f89-4303-0410-b956-a3cf2f4a3e73>2012-10-05 10:12:53 +0000
committerblogic <blogic@3c298f89-4303-0410-b956-a3cf2f4a3e73>2012-10-05 10:12:53 +0000
commit5c105d9f3fd086aff195d3849dcf847d6b0bd927 (patch)
tree1229a11f725bfa58aa7c57a76898553bb5f6654a /target/linux/generic
downloadopenwrt-5c105d9f3fd086aff195d3849dcf847d6b0bd927.tar.gz
openwrt-5c105d9f3fd086aff195d3849dcf847d6b0bd927.zip
branch Attitude Adjustment
git-svn-id: svn://svn.openwrt.org/openwrt/branches/attitude_adjustment@33625 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux/generic')
-rw-r--r--target/linux/generic/PATCHES15
-rwxr-xr-xtarget/linux/generic/base-files/init96
-rw-r--r--target/linux/generic/config-3.33473
-rw-r--r--target/linux/generic/files/Documentation/networking/adm6996.txt110
-rw-r--r--target/linux/generic/files/Documentation/pwm.txt260
-rw-r--r--target/linux/generic/files/arch/mips/fw/myloader/Makefile5
-rw-r--r--target/linux/generic/files/arch/mips/fw/myloader/myloader.c63
-rw-r--r--target/linux/generic/files/crypto/ocf/Config.in38
-rw-r--r--target/linux/generic/files/crypto/ocf/Kconfig125
-rw-r--r--target/linux/generic/files/crypto/ocf/Makefile148
-rw-r--r--target/linux/generic/files/crypto/ocf/c7108/Makefile12
-rw-r--r--target/linux/generic/files/crypto/ocf/c7108/aes-7108.c841
-rw-r--r--target/linux/generic/files/crypto/ocf/c7108/aes-7108.h134
-rw-r--r--target/linux/generic/files/crypto/ocf/criov.c215
-rw-r--r--target/linux/generic/files/crypto/ocf/crypto.c1766
-rw-r--r--target/linux/generic/files/crypto/ocf/cryptocteon/Makefile17
-rw-r--r--target/linux/generic/files/crypto/ocf/cryptocteon/README.txt11
-rw-r--r--target/linux/generic/files/crypto/ocf/cryptocteon/cavium_crypto.c2283
-rw-r--r--target/linux/generic/files/crypto/ocf/cryptocteon/cryptocteon.c576
-rw-r--r--target/linux/generic/files/crypto/ocf/cryptodev.c1069
-rw-r--r--target/linux/generic/files/crypto/ocf/cryptodev.h480
-rw-r--r--target/linux/generic/files/crypto/ocf/cryptosoft.c1322
-rw-r--r--target/linux/generic/files/crypto/ocf/ep80579/Makefile119
-rw-r--r--target/linux/generic/files/crypto/ocf/ep80579/environment.mk78
-rw-r--r--target/linux/generic/files/crypto/ocf/ep80579/icp_asym.c1334
-rw-r--r--target/linux/generic/files/crypto/ocf/ep80579/icp_common.c773
-rw-r--r--target/linux/generic/files/crypto/ocf/ep80579/icp_ocf.h376
-rw-r--r--target/linux/generic/files/crypto/ocf/ep80579/icp_sym.c1153
-rw-r--r--target/linux/generic/files/crypto/ocf/ep80579/linux_2.6_kernel_space.mk69
-rw-r--r--target/linux/generic/files/crypto/ocf/hifn/Makefile13
-rw-r--r--target/linux/generic/files/crypto/ocf/hifn/hifn7751.c2954
-rw-r--r--target/linux/generic/files/crypto/ocf/hifn/hifn7751reg.h540
-rw-r--r--target/linux/generic/files/crypto/ocf/hifn/hifn7751var.h368
-rw-r--r--target/linux/generic/files/crypto/ocf/hifn/hifnHIPP.c421
-rw-r--r--target/linux/generic/files/crypto/ocf/hifn/hifnHIPPreg.h46
-rw-r--r--target/linux/generic/files/crypto/ocf/hifn/hifnHIPPvar.h93
-rw-r--r--target/linux/generic/files/crypto/ocf/ixp4xx/Makefile104
-rw-r--r--target/linux/generic/files/crypto/ocf/ixp4xx/ixp4xx.c1339
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/Makefile19
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAes.h62
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c317
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h19
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesApi.c312
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat123
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.c3126
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.h412
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaDebug.c484
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaRegs.h357
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaTest.c3096
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvLru.c158
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvLru.h112
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvMD5.c349
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvMD5.h93
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvSHA1.c239
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvSHA1.h88
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/cesa_ocf_drv.c1302
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mv802_3.h213
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCommon.c277
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCommon.h308
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDebug.c326
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDebug.h178
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h225
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvHalVer.h73
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvStack.c100
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvStack.h140
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvTypes.h245
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/dbg-trace.c110
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/dbg-trace.h24
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.c2513
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.h376
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.c848
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.h262
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c320
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h99
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.c296
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.h203
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h98
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c1825
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h185
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h419
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h257
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c1048
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h130
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h143
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c1036
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h120
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S163
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h304
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c324
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h123
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c382
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h100
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.c348
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.h80
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c658
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h113
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.c1697
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.h348
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.c430
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.h128
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c427
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h125
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c462
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h106
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.c591
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.h110
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c497
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h125
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.c662
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.h140
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.c75
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.h74
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDeviceRegs.h101
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c211
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h423
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOsSata.h158
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h375
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c376
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h121
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h121
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c207
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h213
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c143
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h151
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c1479
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h191
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c1599
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h179
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S988
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.S668
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.h192
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfRegs.h306
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c1855
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h172
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfBasicInit.S986
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.S528
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.h157
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h423
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfStaticInit.h179
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c1474
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h192
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c2952
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.c748
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.h146
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h751
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h700
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h356
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c362
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h118
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h116
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c669
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h134
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIfRegs.h245
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c1006
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h323
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c1047
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h185
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h411
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c1143
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h168
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h751
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c313
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h82
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.c1522
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.h166
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlashSpec.h233
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c576
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h94
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c249
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h82
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h98
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvCompVer.txt4
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c1023
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h121
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S457
-rw-r--r--target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h160
-rw-r--r--target/linux/generic/files/crypto/ocf/ocf-bench.c514
-rw-r--r--target/linux/generic/files/crypto/ocf/ocf-compat.h372
-rw-r--r--target/linux/generic/files/crypto/ocf/ocfnull/Makefile12
-rw-r--r--target/linux/generic/files/crypto/ocf/ocfnull/ocfnull.c204
-rw-r--r--target/linux/generic/files/crypto/ocf/pasemi/Makefile12
-rw-r--r--target/linux/generic/files/crypto/ocf/pasemi/pasemi.c1007
-rw-r--r--target/linux/generic/files/crypto/ocf/pasemi/pasemi_fnu.h410
-rw-r--r--target/linux/generic/files/crypto/ocf/random.c317
-rw-r--r--target/linux/generic/files/crypto/ocf/rndtest.c300
-rw-r--r--target/linux/generic/files/crypto/ocf/rndtest.h54
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/Makefile12
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/hmachack.h37
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/md5.c308
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/md5.h76
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/safe.c2230
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/safereg.h421
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/safevar.h229
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/sha1.c279
-rw-r--r--target/linux/generic/files/crypto/ocf/safe/sha1.h72
-rw-r--r--target/linux/generic/files/crypto/ocf/talitos/Makefile12
-rw-r--r--target/linux/generic/files/crypto/ocf/talitos/talitos.c1355
-rw-r--r--target/linux/generic/files/crypto/ocf/talitos/talitos_dev.h277
-rw-r--r--target/linux/generic/files/crypto/ocf/talitos/talitos_soft.h76
-rw-r--r--target/linux/generic/files/crypto/ocf/ubsec_ssb/Makefile12
-rw-r--r--target/linux/generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h527
-rw-r--r--target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c2220
-rw-r--r--target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h233
-rw-r--r--target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h228
-rw-r--r--target/linux/generic/files/crypto/ocf/uio.h54
-rw-r--r--target/linux/generic/files/drivers/char/gpio_dev.c181
-rw-r--r--target/linux/generic/files/drivers/input/misc/gpio_buttons.c232
-rw-r--r--target/linux/generic/files/drivers/leds/ledtrig-morse.c366
-rw-r--r--target/linux/generic/files/drivers/leds/ledtrig-netdev.c451
-rw-r--r--target/linux/generic/files/drivers/leds/ledtrig-usbdev.c348
-rw-r--r--target/linux/generic/files/drivers/mtd/myloader.c186
-rw-r--r--target/linux/generic/files/drivers/net/phy/adm6996.c737
-rw-r--r--target/linux/generic/files/drivers/net/phy/adm6996.h162
-rw-r--r--target/linux/generic/files/drivers/net/phy/ar8216.c1536
-rw-r--r--target/linux/generic/files/drivers/net/phy/ar8216.h341
-rw-r--r--target/linux/generic/files/drivers/net/phy/ip17xx.c1410
-rw-r--r--target/linux/generic/files/drivers/net/phy/micrel.c83
-rw-r--r--target/linux/generic/files/drivers/net/phy/mvswitch.c422
-rw-r--r--target/linux/generic/files/drivers/net/phy/mvswitch.h145
-rw-r--r--target/linux/generic/files/drivers/net/phy/psb6970.c438
-rw-r--r--target/linux/generic/files/drivers/net/phy/rtl8306.c1056
-rw-r--r--target/linux/generic/files/drivers/net/phy/rtl8366_smi.c1375
-rw-r--r--target/linux/generic/files/drivers/net/phy/rtl8366_smi.h149
-rw-r--r--target/linux/generic/files/drivers/net/phy/rtl8366rb.c1271
-rw-r--r--target/linux/generic/files/drivers/net/phy/rtl8366s.c1150
-rw-r--r--target/linux/generic/files/drivers/net/phy/rtl8367.c1775
-rw-r--r--target/linux/generic/files/drivers/net/phy/swconfig.c1043
-rw-r--r--target/linux/generic/files/drivers/net/phy/swconfig_leds.c354
-rw-r--r--target/linux/generic/files/drivers/pwm/Kconfig20
-rw-r--r--target/linux/generic/files/drivers/pwm/Makefile5
-rw-r--r--target/linux/generic/files/drivers/pwm/gpio-pwm.c298
-rw-r--r--target/linux/generic/files/drivers/pwm/pwm.c643
-rw-r--r--target/linux/generic/files/fs/yaffs2/Kconfig175
-rw-r--r--target/linux/generic/files/fs/yaffs2/Makefile11
-rw-r--r--target/linux/generic/files/fs/yaffs2/devextras.h264
-rw-r--r--target/linux/generic/files/fs/yaffs2/moduleconfig.h65
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_checkptrw.c404
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_checkptrw.h35
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_ecc.c331
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_ecc.h44
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_fs.c2299
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_guts.c7469
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_guts.h902
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_mtdif.c241
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_mtdif.h27
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_mtdif1-compat.c434
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_mtdif1.c363
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_mtdif1.h28
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_mtdif2.c232
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_mtdif2.h29
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_nand.c134
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_nand.h44
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_nandemul2k.h39
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_packedtags1.c52
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_packedtags1.h37
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_packedtags2.c182
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_packedtags2.h38
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_qsort.c160
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_qsort.h23
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_tagscompat.c530
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_tagscompat.h40
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_tagsvalidity.c28
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffs_tagsvalidity.h24
-rw-r--r--target/linux/generic/files/fs/yaffs2/yaffsinterface.h21
-rw-r--r--target/linux/generic/files/fs/yaffs2/yportenv.h187
-rw-r--r--target/linux/generic/files/include/linux/ar8216_platform.h81
-rw-r--r--target/linux/generic/files/include/linux/ath5k_platform.h30
-rw-r--r--target/linux/generic/files/include/linux/ath9k_platform.h41
-rw-r--r--target/linux/generic/files/include/linux/glamo-engine.h27
-rw-r--r--target/linux/generic/files/include/linux/glamofb.h35
-rw-r--r--target/linux/generic/files/include/linux/gpio_buttons.h33
-rw-r--r--target/linux/generic/files/include/linux/gpio_dev.h42
-rw-r--r--target/linux/generic/files/include/linux/myloader.h121
-rw-r--r--target/linux/generic/files/include/linux/pwm/pwm.h165
-rw-r--r--target/linux/generic/files/include/linux/routerboot.h105
-rw-r--r--target/linux/generic/files/include/linux/rt2x00_platform.h23
-rw-r--r--target/linux/generic/files/include/linux/rtl8366.h40
-rw-r--r--target/linux/generic/files/include/linux/rtl8367.h59
-rw-r--r--target/linux/generic/files/include/linux/switch.h237
-rw-r--r--target/linux/generic/image/Makefile12
-rw-r--r--target/linux/generic/image/initramfs-base-files.txt9
-rw-r--r--target/linux/generic/image/lzma-loader/Makefile46
-rw-r--r--target/linux/generic/image/lzma-loader/src/LzmaDecode.c590
-rw-r--r--target/linux/generic/image/lzma-loader/src/LzmaDecode.h131
-rw-r--r--target/linux/generic/image/lzma-loader/src/Makefile68
-rw-r--r--target/linux/generic/image/lzma-loader/src/decompress.c157
-rw-r--r--target/linux/generic/image/lzma-loader/src/lzma-copy.lds.in20
-rw-r--r--target/linux/generic/image/lzma-loader/src/lzma.lds.in24
-rw-r--r--target/linux/generic/image/lzma-loader/src/print.c324
-rw-r--r--target/linux/generic/image/lzma-loader/src/print.h36
-rw-r--r--target/linux/generic/image/lzma-loader/src/printf.c35
-rw-r--r--target/linux/generic/image/lzma-loader/src/printf.h18
-rw-r--r--target/linux/generic/image/lzma-loader/src/start.S160
-rw-r--r--target/linux/generic/image/lzma-loader/src/uart16550.c86
-rw-r--r--target/linux/generic/image/lzma-loader/src/uart16550.h47
-rw-r--r--target/linux/generic/patches-3.3/006-arm_kernel_xz_support.patch96
-rw-r--r--target/linux/generic/patches-3.3/020-ssb_update.patch837
-rw-r--r--target/linux/generic/patches-3.3/025-bcma_backport.patch3330
-rw-r--r--target/linux/generic/patches-3.3/026-bcma_pmu_regression.patch29
-rw-r--r--target/linux/generic/patches-3.3/027-bcma-add-missing-iounmap-on-error-path.patch55
-rw-r--r--target/linux/generic/patches-3.3/028-bcma-fix-regression-in-interrupt-assignment-on-mips.patch29
-rw-r--r--target/linux/generic/patches-3.3/040-Controlled-Delay-AQM.patch757
-rw-r--r--target/linux/generic/patches-3.3/041-codel-use-Newton-method-instead-of-sqrt-and-divides.patch185
-rw-r--r--target/linux/generic/patches-3.3/042-fq_codel-Fair-Queue-Codel-AQM.patch839
-rw-r--r--target/linux/generic/patches-3.3/043-net-codel-Add-missing-include-linux-prefetch.h.patch33
-rw-r--r--target/linux/generic/patches-3.3/044-net-codel-fix-build-errors.patch51
-rw-r--r--target/linux/generic/patches-3.3/045-codel-use-u16-field-instead-of-31bits-for-rec_inv_sq.patch86
-rw-r--r--target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch132
-rw-r--r--target/linux/generic/patches-3.3/047-spi_message_queue.patch603
-rw-r--r--target/linux/generic/patches-3.3/048-spi-Dont-call-prepare-unprepare-transfer-if-not-popu.patch39
-rw-r--r--target/linux/generic/patches-3.3/049-codel-refine-one-condition-to-avoid-a-nul-rec_inv_sqrt.patch52
-rw-r--r--target/linux/generic/patches-3.3/050-rng_git_backport.patch783
-rw-r--r--target/linux/generic/patches-3.3/051-rng_git_backport-remove_irqf_sample_random.patch543
-rw-r--r--target/linux/generic/patches-3.3/100-overlayfs_v12.patch3232
-rw-r--r--target/linux/generic/patches-3.3/102-ehci_hcd_ignore_oc.patch41
-rw-r--r--target/linux/generic/patches-3.3/110-fix_mtd_include.patch10
-rw-r--r--target/linux/generic/patches-3.3/130-pppoatm-queue-depth.patch188
-rw-r--r--target/linux/generic/patches-3.3/140-ixp4xx_hss_module_h_include.patch39
-rw-r--r--target/linux/generic/patches-3.3/200-fix_localversion.patch11
-rw-r--r--target/linux/generic/patches-3.3/201-extra_optimization.patch24
-rw-r--r--target/linux/generic/patches-3.3/202-reduce_module_size.patch11
-rw-r--r--target/linux/generic/patches-3.3/210-darwin_scripts_include.patch78
-rw-r--r--target/linux/generic/patches-3.3/211-stddef_include.patch17
-rw-r--r--target/linux/generic/patches-3.3/212-x86_reloc_portability.patch22
-rw-r--r--target/linux/generic/patches-3.3/220-module_exports.patch89
-rw-r--r--target/linux/generic/patches-3.3/230-openwrt_lzma_options.patch54
-rw-r--r--target/linux/generic/patches-3.3/250-netfilter_depends.patch18
-rw-r--r--target/linux/generic/patches-3.3/251-sound_kconfig.patch11
-rw-r--r--target/linux/generic/patches-3.3/252-mv_cesa_depends.patch10
-rw-r--r--target/linux/generic/patches-3.3/253-ssb_b43_default_on.patch29
-rw-r--r--target/linux/generic/patches-3.3/254-textsearch_kconfig_hacks.patch23
-rw-r--r--target/linux/generic/patches-3.3/255-lib80211_kconfig_hacks.patch19
-rw-r--r--target/linux/generic/patches-3.3/256-crypto_add_kconfig_prompts.patch47
-rw-r--r--target/linux/generic/patches-3.3/257-wireless_ext_kconfig_hack.patch22
-rw-r--r--target/linux/generic/patches-3.3/258-netfilter_netlink_kconfig_hack.patch11
-rw-r--r--target/linux/generic/patches-3.3/300-mips_expose_boot_raw.patch39
-rw-r--r--target/linux/generic/patches-3.3/301-mips_image_cmdline_hack.patch28
-rw-r--r--target/linux/generic/patches-3.3/302-mips_use_generic_thread_info_allocator.patch18
-rw-r--r--target/linux/generic/patches-3.3/303-mips_fix_kexec.patch11
-rw-r--r--target/linux/generic/patches-3.3/304-mips_disable_fpu.patch160
-rw-r--r--target/linux/generic/patches-3.3/305-mips_module_reloc.patch371
-rw-r--r--target/linux/generic/patches-3.3/306-mips_mem_functions_performance.patch83
-rw-r--r--target/linux/generic/patches-3.3/307-mips_oprofile_fix.patch35
-rw-r--r--target/linux/generic/patches-3.3/308-mips-show-correct-cpu-name-for-24KEc.patch17
-rw-r--r--target/linux/generic/patches-3.3/309-mips_fuse_workaround.patch32
-rw-r--r--target/linux/generic/patches-3.3/310-arm_module_unresolved_weak_sym.patch13
-rw-r--r--target/linux/generic/patches-3.3/320-ppc4xx_optimization.patch31
-rw-r--r--target/linux/generic/patches-3.3/321-powerpc_crtsavres_prereq.patch10
-rw-r--r--target/linux/generic/patches-3.3/322-ppc4xx-crypto-compile-fix.patch10
-rw-r--r--target/linux/generic/patches-3.3/330-mips-add-crash-and-kdump-support.patch616
-rw-r--r--target/linux/generic/patches-3.3/331-mips-kexec-enhanche-the-support.patch159
-rw-r--r--target/linux/generic/patches-3.3/332-mips-kexec-init-the-arguments-for-the-new-kernel-image.patch52
-rw-r--r--target/linux/generic/patches-3.3/333-mips-kexec-get-kernel-parameters-from-kexec-tools.patch88
-rw-r--r--target/linux/generic/patches-3.3/334-mips-fix-compiling-failure-of-relocate_kernel.patch83
-rw-r--r--target/linux/generic/patches-3.3/335-mips-kexec-cleanup-kexec-tools-parameter-handling.patch186
-rw-r--r--target/linux/generic/patches-3.3/340-module_alloc_size_check.patch20
-rw-r--r--target/linux/generic/patches-3.3/400-rootfs_split.patch327
-rw-r--r--target/linux/generic/patches-3.3/401-partial_eraseblock_write.patch145
-rw-r--r--target/linux/generic/patches-3.3/410-mtd_info_move_forward_decl.patch18
-rw-r--r--target/linux/generic/patches-3.3/420-redboot_space.patch30
-rw-r--r--target/linux/generic/patches-3.3/421-redboot_boardconfig.patch60
-rw-r--r--target/linux/generic/patches-3.3/430-mtd_myloader_partition_parser.patch35
-rw-r--r--target/linux/generic/patches-3.3/440-block2mtd_init.patch116
-rw-r--r--target/linux/generic/patches-3.3/441-block2mtd_refresh.patch291
-rw-r--r--target/linux/generic/patches-3.3/442-block2mtd_probe.patch10
-rw-r--r--target/linux/generic/patches-3.3/443-block2mtd-avoid-recursive-call-of-mtd_writev.patch10
-rw-r--r--target/linux/generic/patches-3.3/450-mtd_plat_nand_chip_fixup.patch37
-rw-r--r--target/linux/generic/patches-3.3/451-mtd_fix_nand_correct_data_return_code.patch12
-rw-r--r--target/linux/generic/patches-3.3/460-cfi_cmdset_0002_no_erase_suspend.patch11
-rw-r--r--target/linux/generic/patches-3.3/470-mtd_m25p80_add_pm25lv_flash_support.patch39
-rw-r--r--target/linux/generic/patches-3.3/473-mtd_m25p80_add_w25q128.patch10
-rw-r--r--target/linux/generic/patches-3.3/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch18
-rw-r--r--target/linux/generic/patches-3.3/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch41
-rw-r--r--target/linux/generic/patches-3.3/477-mtd-m25p80-add-support-for-the-EON-EN25Q64-chip.patch10
-rw-r--r--target/linux/generic/patches-3.3/500-yaffs_support.patch18
-rw-r--r--target/linux/generic/patches-3.3/501-yaffs_cvs_2009_04_24.patch12344
-rw-r--r--target/linux/generic/patches-3.3/502-yaffs_git_2010_10_20.patch27068
-rw-r--r--target/linux/generic/patches-3.3/503-yaffs_symlink_bug.patch17
-rw-r--r--target/linux/generic/patches-3.3/504-yaffs_mutex_fix.patch20
-rw-r--r--target/linux/generic/patches-3.3/505-2.6.39_fix.patch147
-rw-r--r--target/linux/generic/patches-3.3/506-yaffs2-3.2_fix.patch289
-rw-r--r--target/linux/generic/patches-3.3/507-yaffs2-3.3_fix.patch71
-rw-r--r--target/linux/generic/patches-3.3/510-jffs2_make_lzma_available.patch5142
-rw-r--r--target/linux/generic/patches-3.3/511-debloat_lzma.patch485
-rw-r--r--target/linux/generic/patches-3.3/512-jffs2_eofdetect.patch132
-rw-r--r--target/linux/generic/patches-3.3/520-squashfs_update_xz_comp_opts.patch25
-rw-r--r--target/linux/generic/patches-3.3/540-crypto-xz-decompression-support.patch146
-rw-r--r--target/linux/generic/patches-3.3/541-ubifs-xz-decompression-support.patch94
-rw-r--r--target/linux/generic/patches-3.3/550-ubifs-symlink-xattr-support.patch67
-rw-r--r--target/linux/generic/patches-3.3/600-netfilter_layer7_2.22.patch2142
-rw-r--r--target/linux/generic/patches-3.3/601-netfilter_layer7_pktmatch.patch108
-rw-r--r--target/linux/generic/patches-3.3/602-netfilter_layer7_match.patch51
-rw-r--r--target/linux/generic/patches-3.3/603-netfilter_layer7_2.6.36_fix.patch61
-rw-r--r--target/linux/generic/patches-3.3/604-netfilter_cisco_794x_iphone.patch118
-rw-r--r--target/linux/generic/patches-3.3/610-netfilter_match_bypass_default_checks.patch93
-rw-r--r--target/linux/generic/patches-3.3/611-netfilter_match_bypass_default_table.patch81
-rw-r--r--target/linux/generic/patches-3.3/612-netfilter_match_reduce_memory_access.patch16
-rw-r--r--target/linux/generic/patches-3.3/613-netfilter_optional_tcp_window_check.patch36
-rw-r--r--target/linux/generic/patches-3.3/620-sched_esfq.patch791
-rw-r--r--target/linux/generic/patches-3.3/621-sched_act_connmark.patch172
-rw-r--r--target/linux/generic/patches-3.3/630-packet_socket_type.patch132
-rw-r--r--target/linux/generic/patches-3.3/640-bridge_no_eap_forward.patch15
-rw-r--r--target/linux/generic/patches-3.3/641-bridge_always_accept_eap.patch11
-rw-r--r--target/linux/generic/patches-3.3/642-bridge_port_isolate.patch103
-rw-r--r--target/linux/generic/patches-3.3/643-bridge_remove_ipv6_dependency.patch107
-rw-r--r--target/linux/generic/patches-3.3/644-bridge_optimize_netfilter_hooks.patch146
-rw-r--r--target/linux/generic/patches-3.3/650-pppoe_header_pad.patch20
-rw-r--r--target/linux/generic/patches-3.3/651-wireless_mesh_header.patch11
-rw-r--r--target/linux/generic/patches-3.3/652-atm_header_changes.patch12
-rw-r--r--target/linux/generic/patches-3.3/653-disable_netlink_trim.patch28
-rw-r--r--target/linux/generic/patches-3.3/654-avoid_skb_cow_realloc.patch21
-rw-r--r--target/linux/generic/patches-3.3/655-increase_skb_pad.patch11
-rw-r--r--target/linux/generic/patches-3.3/700-swconfig.patch29
-rw-r--r--target/linux/generic/patches-3.3/701-phy_extension.patch72
-rw-r--r--target/linux/generic/patches-3.3/702-phy_add_aneg_done_function.patch45
-rw-r--r--target/linux/generic/patches-3.3/710-phy-add-mdio_register_board_info.patch191
-rw-r--r--target/linux/generic/patches-3.3/720-phy_adm6996.patch26
-rw-r--r--target/linux/generic/patches-3.3/721-phy_packets.patch175
-rw-r--r--target/linux/generic/patches-3.3/722-phy_mvswitch.patch23
-rw-r--r--target/linux/generic/patches-3.3/723-phy_ip175c.patch23
-rw-r--r--target/linux/generic/patches-3.3/724-phy_ar8216.patch24
-rw-r--r--target/linux/generic/patches-3.3/725-phy_rtl8306.patch23
-rw-r--r--target/linux/generic/patches-3.3/726-phy_rtl8366.patch45
-rw-r--r--target/linux/generic/patches-3.3/727-phy-rtl8367.patch23
-rw-r--r--target/linux/generic/patches-3.3/728-phy-micrel.patch24
-rw-r--r--target/linux/generic/patches-3.3/729-phy-tantos.patch21
-rw-r--r--target/linux/generic/patches-3.3/750-hostap_txpower.patch154
-rw-r--r--target/linux/generic/patches-3.3/810-pci_disable_common_quirks.patch43
-rw-r--r--target/linux/generic/patches-3.3/811-pci_disable_usb_common_quirks.patch38
-rw-r--r--target/linux/generic/patches-3.3/820-usb_add_usb_find_device_by_name.patch84
-rw-r--r--target/linux/generic/patches-3.3/830-ledtrig_morse.patch28
-rw-r--r--target/linux/generic/patches-3.3/831-ledtrig_netdev.patch51
-rw-r--r--target/linux/generic/patches-3.3/832-ledtrig_usbdev.patch31
-rw-r--r--target/linux/generic/patches-3.3/835-gpiodev.patch27
-rw-r--r--target/linux/generic/patches-3.3/840-rtc7301.patch250
-rw-r--r--target/linux/generic/patches-3.3/841-rtc_pt7c4338.patch247
-rw-r--r--target/linux/generic/patches-3.3/850-glamo_headers.patch21
-rw-r--r--target/linux/generic/patches-3.3/861-04_spi_gpio_implement_spi_delay.patch58
-rw-r--r--target/linux/generic/patches-3.3/862-gpio_spi_driver.patch373
-rw-r--r--target/linux/generic/patches-3.3/863-gpiommc.patch844
-rw-r--r--target/linux/generic/patches-3.3/864-gpiommc_configfs_locking.patch58
-rw-r--r--target/linux/generic/patches-3.3/865-gpiopwm.patch21
-rw-r--r--target/linux/generic/patches-3.3/870-hifn795x_byteswap.patch17
-rw-r--r--target/linux/generic/patches-3.3/900-slab_maxsize.patch13
-rw-r--r--target/linux/generic/patches-3.3/910-kobject_uevent.patch21
-rw-r--r--target/linux/generic/patches-3.3/911-kobject_add_broadcast_uevent.patch85
-rw-r--r--target/linux/generic/patches-3.3/920-unable_to_open_console.patch11
-rw-r--r--target/linux/generic/patches-3.3/921-use_preinit_as_init.patch14
-rw-r--r--target/linux/generic/patches-3.3/930-crashlog.patch285
-rw-r--r--target/linux/generic/patches-3.3/940-ocf_kbuild_integration.patch20
-rw-r--r--target/linux/generic/patches-3.3/941-ocf_20120127.patch164
-rw-r--r--target/linux/generic/patches-3.3/950-vm_exports.patch117
-rw-r--r--target/linux/generic/patches-3.3/960-decompress_unlzo_fix.patch23
-rw-r--r--target/linux/generic/patches-3.3/980-update_arm_machtypes.patch3618
-rw-r--r--target/linux/generic/patches-3.3/992-mpcore_wdt_fix_watchdog_counter_loading.patch64
-rw-r--r--target/linux/generic/patches-3.3/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch29
-rw-r--r--target/linux/generic/patches-3.3/994-mpcore_wdt_fix_timer_mode_setup.patch57
470 files changed, 205686 insertions, 0 deletions
diff --git a/target/linux/generic/PATCHES b/target/linux/generic/PATCHES
new file mode 100644
index 000000000..34608188d
--- /dev/null
+++ b/target/linux/generic/PATCHES
@@ -0,0 +1,15 @@
+The patches-* subdirectories contain the kernel patches applied for every
+OpenWrt target. All patches should be named 'NNN-lowercase_shortname.patch'
+and sorted into the following categories:
+
+0xx - upstream backports
+1xx - code awaiting upstream merge
+2xx - kernel build / config / header patches
+3xx - architecture specific patches
+4xx - mtd related patches (subsystem and drivers)
+5xx - filesystem related patches
+6xx - generic network patches
+7xx - network / phy driver patches
+8xx - other drivers
+9xx - uncategorized other patches
+
diff --git a/target/linux/generic/base-files/init b/target/linux/generic/base-files/init
new file mode 100755
index 000000000..5e4fbaec3
--- /dev/null
+++ b/target/linux/generic/base-files/init
@@ -0,0 +1,96 @@
+#!/bin/sh
+# Copyright (C) 2006 OpenWrt.org
+
+INITRAMFS=1
+
+. /etc/preinit
+
+set_state init
+echo "- init -"
+
+[ -n "$SWITCH_ROOT_TMPFS" ] && {
+ mount none /mnt -t tmpfs
+ ( cd /; find -xdev -type d ) | ( cd /mnt; xargs mkdir -p )
+ find / \! -type d -xdev | tar -cT /proc/self/fd/0 | tar -xC /mnt
+ mkdir /mnt/proc /mnt/dev /mnt/tmp /mnt/sys
+ mount -o move /proc /mnt/proc
+ mount -o move /dev /mnt/dev
+ mount -o move /tmp /mnt/tmp
+ mount -o move /sys /mnt/sys
+ rm -rf /proc /dev /tmp /sys
+ exec switch_root -c /dev/console /mnt /sbin/init
+}
+
+# if we have no root parameter, just go to running from ramfs
+[ -z $rootfs ] && {
+ export NOMOUNT="No Root"
+ exec /sbin/init
+}
+
+#if we have a failsafe boot selected, dont bother
+#trying to find or wait for a root mount point
+[ -z "$FAILSAFE" ] || {
+ exec /bin/busybox init
+}
+
+# Load the modules we have in initramfs, this should
+# make the media accessible, but, it may take some time
+. /etc/functions.sh
+load_modules /etc/modules /etc/modules.d/*
+
+#wait 10 seconds for the disc to show up
+#usb stick typically takes 4 to 6 seconds
+#till it's readable
+#it's quite possible the disc never shows up
+#if we netbooted this kernel
+COUNTER=0
+while [ $COUNTER -lt 10 ]; do
+ sleep 1
+ [ -e $rootfs ] && let COUNTER=10;
+ let COUNTER=COUNTER+1
+done
+[ -e $rootfs ] || {
+ export FAILSAFE="NoDisc"
+ exec /bin/busybox init
+}
+
+# now we'll try mount it, again with a timeout
+# This will fail if the inserted stick is formatted
+# in a manner we dont understand
+COUNTER=0
+while [ $COUNTER -lt 10 ]; do
+ sleep 1
+ mount $rootfs /mnt
+ [ $? -eq "0" ] && let COUNTER=100;
+ let COUNTER=COUNTER+1
+done
+[ $? -ne "0" ] && {
+ export FAILSAFE="MountFail"
+ exec /bin/busybox init
+}
+
+#It mounted, lets look for a postinit file, again, give it time
+#I've seen this take 6 seconds to actually complete
+COUNTER=0
+while [ $COUNTER -lt 10 ]; do
+ sleep 1
+ [ -e /mnt/etc/banner ] && let COUNTER=10;
+ let COUNTER=COUNTER+1
+done
+[ -e /mnt/etc/banner ] || {
+ export FAILSAFE="No Openwrt FS"
+ exec /bin/busybox init
+}
+
+unset rootfs
+
+mount -o move /proc /mnt/proc
+mount -o move /dev /mnt/dev
+mount -o move /dev/pts /mnt/dev/pts
+mount -o move /tmp /mnt/tmp
+mount -o move /sys /mnt/sys
+mount none /tmp -t tmpfs
+killall -q hotplug2
+exec switch_root -c /dev/console /mnt /sbin/init
+
+set_state done
diff --git a/target/linux/generic/config-3.3 b/target/linux/generic/config-3.3
new file mode 100644
index 000000000..abf499ff2
--- /dev/null
+++ b/target/linux/generic/config-3.3
@@ -0,0 +1,3473 @@
+CONFIG_32BIT=y
+# CONFIG_6PACK is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_9P_FS is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_AB8500_CORE is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_ACENIC is not set
+# CONFIG_ACERHDF is not set
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_ACPI_APEI is not set
+# CONFIG_ACPI_CUSTOM_METHOD is not set
+# CONFIG_ACPI_HED is not set
+# CONFIG_ACPI_POWER_METER is not set
+# CONFIG_ACPI_QUICKSTART is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_ADIS16255 is not set
+# CONFIG_ADM6996_PHY is not set
+# CONFIG_ADM8211 is not set
+CONFIG_AEABI=y
+# CONFIG_AFFS_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_AGP is not set
+CONFIG_AIO=y
+# CONFIG_AIRO is not set
+# CONFIG_AIRO_CS is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_AMILO_RFKILL is not set
+# CONFIG_ANDROID is not set
+CONFIG_ANON_INODES=y
+# CONFIG_APDS9802ALS is not set
+# CONFIG_APM8018X is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_AR7 is not set
+# CONFIG_AR8216_PHY is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_DMA_ADDR_T_64BIT is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_EXYNOS is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+# CONFIG_ARCH_HIGHBANK is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_MXS is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+# CONFIG_ARCH_PICOXCELL is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PRIMA2 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_ARCH_TCC_926 is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_VT8500 is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_ZYNQ is not set
+# CONFIG_ARCNET is not set
+CONFIG_ARM_CPU_TOPOLOGY=y
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+# CONFIG_ARM_ERRATA_411920 is not set
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+# CONFIG_ARM_UNWIND is not set
+CONFIG_ARPD=y
+# CONFIG_ARTHUR is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_ASYNC_RAID6_TEST is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_ATA is not set
+# CONFIG_ATALK is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_ATA_ACPI is not set
+CONFIG_ATA_BMDMA=y
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_ATA_NONSTANDARD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_ATA_PIIX is not set
+CONFIG_ATA_SFF=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATH5K is not set
+# CONFIG_ATH6K_LEGACY is not set
+# CONFIG_ATH79 is not set
+# CONFIG_ATH9K is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1C is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL2 is not set
+# CONFIG_ATM is not set
+# CONFIG_ATMEL is not set
+# CONFIG_ATMEL_PWM is not set
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_ATM_BR2684 is not set
+CONFIG_ATM_BR2684_IPFILTER=y
+# CONFIG_ATM_CLIP is not set
+CONFIG_ATM_CLIP_NO_ICMP=y
+# CONFIG_ATM_DRIVERS is not set
+# CONFIG_ATM_DUMMY is not set
+# CONFIG_ATM_ENI is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_FORE200E is not set
+# CONFIG_ATM_HE is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_IA is not set
+# CONFIG_ATM_IDT77252 is not set
+# CONFIG_ATM_LANAI is not set
+# CONFIG_ATM_LANE is not set
+# CONFIG_ATM_MPOA is not set
+# CONFIG_ATM_NICSTAR is not set
+# CONFIG_ATM_SOLOS is not set
+# CONFIG_ATM_TCP is not set
+# CONFIG_ATM_ZATM is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_ATP is not set
+# CONFIG_AUDIT is not set
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_AUTO_ZRELADDR is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_AVERAGE is not set
+# CONFIG_AX25 is not set
+# CONFIG_AX25_DAMA_SLAVE is not set
+# CONFIG_AX88796 is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_B44 is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+CONFIG_BASE_FULL=y
+CONFIG_BASE_SMALL=0
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BAYCOM_EPP is not set
+# CONFIG_BAYCOM_PAR is not set
+# CONFIG_BAYCOM_SER_FDX is not set
+# CONFIG_BAYCOM_SER_HDX is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
+# CONFIG_BCM63XX_PHY is not set
+# CONFIG_BCMA is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BE2ISCSI is not set
+# CONFIG_BE2NET is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_BINARY_PRINTF is not set
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_BITREVERSE=y
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_CPQ_DA is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_BLK_DEV_4DRIVES is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI14XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_CS5535 is not set
+# CONFIG_BLK_DEV_CS5536 is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_DELKIN is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_DTC2278 is not set
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_GENERIC is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_HT6560B is not set
+# CONFIG_BLK_DEV_IDEACPI is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDECS is not set
+# CONFIG_BLK_DEV_IDEPCI is not set
+# CONFIG_BLK_DEV_IDEPNP is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_BLK_DEV_IT8172 is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_PLATFORM is not set
+# CONFIG_BLK_DEV_QD65XX is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SL82C105 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_UB is not set
+# CONFIG_BLK_DEV_UMC8672 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_BLOCK=y
+# CONFIG_BMP085 is not set
+# CONFIG_BNA is not set
+# CONFIG_BNX2 is not set
+# CONFIG_BNX2X is not set
+# CONFIG_BONDING is not set
+# CONFIG_BOOKE_WDT is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+CONFIG_BOOT_RAW=y
+# CONFIG_BPQETHER is not set
+CONFIG_BQL=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_BRCMFMAC is not set
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_EBT_802_3 is not set
+# CONFIG_BRIDGE_EBT_AMONG is not set
+# CONFIG_BRIDGE_EBT_ARP is not set
+# CONFIG_BRIDGE_EBT_ARPREPLY is not set
+# CONFIG_BRIDGE_EBT_BROUTE is not set
+# CONFIG_BRIDGE_EBT_DNAT is not set
+# CONFIG_BRIDGE_EBT_IP is not set
+# CONFIG_BRIDGE_EBT_IP6 is not set
+# CONFIG_BRIDGE_EBT_LIMIT is not set
+# CONFIG_BRIDGE_EBT_LOG is not set
+# CONFIG_BRIDGE_EBT_MARK is not set
+# CONFIG_BRIDGE_EBT_MARK_T is not set
+# CONFIG_BRIDGE_EBT_NFLOG is not set
+# CONFIG_BRIDGE_EBT_PKTTYPE is not set
+# CONFIG_BRIDGE_EBT_REDIRECT is not set
+# CONFIG_BRIDGE_EBT_SNAT is not set
+# CONFIG_BRIDGE_EBT_STP is not set
+# CONFIG_BRIDGE_EBT_T_FILTER is not set
+# CONFIG_BRIDGE_EBT_T_NAT is not set
+# CONFIG_BRIDGE_EBT_ULOG is not set
+# CONFIG_BRIDGE_EBT_VLAN is not set
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+# CONFIG_BRIDGE_NETFILTER is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_BROADCOM_PHY is not set
+CONFIG_BROKEN_ON_SMP=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_BT is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_BT_ATH3K is not set
+# CONFIG_BT_BNEP is not set
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+# CONFIG_BT_CMTP is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIBLUECARD is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBT3C is not set
+# CONFIG_BT_HCIBTSDIO is not set
+# CONFIG_BT_HCIBTUART is not set
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIDTL1 is not set
+# CONFIG_BT_HCIUART is not set
+# CONFIG_BT_HCIUART_ATH3K is not set
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_H4=y
+# CONFIG_BT_HCIUART_LL is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_HIDP is not set
+CONFIG_BT_L2CAP=y
+# CONFIG_BT_MRVL is not set
+# CONFIG_BT_RFCOMM is not set
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_SCO=y
+CONFIG_BUG=y
+# CONFIG_C2PORT is not set
+# CONFIG_CAIF is not set
+# CONFIG_CAN is not set
+# CONFIG_CAPI_AVM is not set
+# CONFIG_CAPI_EICON is not set
+# CONFIG_CAPI_TRACE is not set
+CONFIG_CARDBUS=y
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_CARMA_FPGA is not set
+# CONFIG_CARMA_FPGA_PROGRAM is not set
+# CONFIG_CASSINI is not set
+CONFIG_CAVIUM_OCTEON_HELPER=y
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CB710_CORE is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_CFG80211 is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_CHELSIO_T4VF is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_CIFS is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_NFSD_EXPORT is not set
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_STATS=y
+# CONFIG_CIFS_STATS2 is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CLEANCACHE is not set
+CONFIG_CLS_U32_MARK=y
+CONFIG_CLS_U32_PERF=y
+CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_EXTEND is not set
+# CONFIG_CMDLINE_FORCE is not set
+# CONFIG_CMDLINE_FROM_BOOTLOADER is not set
+# CONFIG_CNIC is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_COMEDI is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_CONFIGFS_FS is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_CONSTRUCTORS=y
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_COPS is not set
+# CONFIG_CORDIC is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_CPA_DEBUG is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+# CONFIG_CRAMFS is not set
+CONFIG_CRASHLOG=y
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_CRC8 is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CROSS_COMPILE=""
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_AEAD is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_AES_586 is not set
+# CONFIG_CRYPTO_AES_NI_INTEL is not set
+# CONFIG_CRYPTO_ALGAPI is not set
+# CONFIG_CRYPTO_ALGAPI2 is not set
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_BLKCIPHER is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_DEV_FSL_CAAM is not set
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_CRYPTO_DEV_MV_CESA is not set
+# CONFIG_CRYPTO_DEV_TALITOS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set
+# CONFIG_CRYPTO_HASH is not set
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_LZO is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_PCOMP is not set
+# CONFIG_CRYPTO_PCOMP2 is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_RNG is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SEQIV is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TEST is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_TWOFISH_586 is not set
+# CONFIG_CRYPTO_TWOFISH_COMMON is not set
+# CONFIG_CRYPTO_USER is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+# CONFIG_CRYPTO_VMAC is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_XZ is not set
+# CONFIG_CRYPTO_XTS is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_CS5535_MFGPT is not set
+# CONFIG_CUSE is not set
+# CONFIG_CXT1E1 is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_DCB is not set
+# CONFIG_DE600 is not set
+# CONFIG_DE620 is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_LL is not set
+# CONFIG_DEBUG_LOCKDEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_NX_TEST is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_RODATA is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+# CONFIG_DEBUG_SET_MODULE_RONX is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DECNET is not set
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_DEFAULT_IOSCHED="deadline"
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_DEFAULT_NOOP is not set
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_DEFAULT_CUBIC=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+# CONFIG_DETECT_HUNG_TASK is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_DEVPORT=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_DEVTMPFS is not set
+# CONFIG_DEVTMPFS_MOUNT is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_DL2K is not set
+# CONFIG_DLM is not set
+# CONFIG_DM9000 is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_DMADEVICES_DEBUG is not set
+# CONFIG_DMASCC is not set
+# CONFIG_DMATEST is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_DMA_ENGINE is not set
+# CONFIG_DMA_SHARED_BUFFER is not set
+# CONFIG_DM_DEBUG is not set
+# CONFIG_DM_DELAY is not set
+# CONFIG_DM_LOG_USERSPACE is not set
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_UEVENT is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DNET is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_DQL=y
+# CONFIG_DRAGONRISE_FF is not set
+# CONFIG_DRM is not set
+# CONFIG_DS1682 is not set
+# CONFIG_DTLK is not set
+# CONFIG_DUMMY is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DW_WATCHDOG is not set
+# CONFIG_DX_SEP is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_E100 is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_E2100 is not set
+# CONFIG_EASYCAP is not set
+# CONFIG_ECHO is not set
+# CONFIG_ECONET is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_EDAC is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_DIGSY_MTC_CFG is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEXPRESS is not set
+# CONFIG_EEXPRESS_PRO is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_EFS_FS is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ENCRYPTED_KEYS is not set
+# CONFIG_ENIC is not set
+# CONFIG_EPIC100 is not set
+CONFIG_EPOLL=y
+# CONFIG_EQUALIZER is not set
+# CONFIG_ET131X is not set
+# CONFIG_ETH16I is not set
+CONFIG_ETHERNET=y
+# CONFIG_ETHOC is not set
+# CONFIG_EVENT_POWER_TRACING_DEPRECATED is not set
+CONFIG_EVENTFD=y
+# CONFIG_EWRK3 is not set
+CONFIG_EXPERIMENTAL=y
+CONFIG_EXPERT=y
+# CONFIG_EXPORTFS is not set
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_DEBUG is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+CONFIG_EXT4_FS_XATTR=y
+CONFIG_EXT4_USE_FOR_EXT23=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_EXTRA_TARGETS=""
+# CONFIG_EZX_PCAP is not set
+# CONFIG_FAIR_GROUP_SCHED is not set
+# CONFIG_FANOTIFY is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_FAT_FS is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_FB is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_OF is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_PS3 is not set
+# CONFIG_FB_PXA is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_FCOE is not set
+# CONFIG_FCOE_FNIC is not set
+# CONFIG_FDDI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_FHANDLE is not set
+CONFIG_FIB_RULES=y
+CONFIG_FILE_LOCKING=y
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FIXED_PHY is not set
+CONFIG_FLATMEM=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_FORCEDETH is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# CONFIG_FRAME_POINTER is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_FREEZER is not set
+# CONFIG_FRONTSWAP is not set
+# CONFIG_FSCACHE is not set
+CONFIG_FSNOTIFY=y
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_FT1000 is not set
+# CONFIG_FTGMAC100 is not set
+# CONFIG_FTL is not set
+# CONFIG_FTMAC100 is not set
+# CONFIG_FTRACE is not set
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+# CONFIG_FUJITSU_TABLET is not set
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_FUSE_FS is not set
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+# CONFIG_FUSION_SPI is not set
+CONFIG_FUTEX=y
+CONFIG_FW_LOADER=y
+CONFIG_GACT_PROB=y
+# CONFIG_GAMEPORT is not set
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_IRQ_PROBE=y
+# CONFIG_GENERIC_PWM is not set
+CONFIG_GENERIC_TIME=y
+# CONFIG_GFS2_FS is not set
+# CONFIG_GIGASET_CAPI is not set
+# CONFIG_GIGASET_DEBUG is not set
+# CONFIG_GPIOLIB is not set
+# CONFIG_GPIO_74X164 is not set
+# CONFIG_GPIO_ADP5588 is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_CS5535 is not set
+# CONFIG_GPIO_DEVICE is not set
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_LANGWELL is not set
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_PCH is not set
+# CONFIG_GPIO_RDC321X is not set
+# CONFIG_GPIO_SCH is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_SYSFS is not set
+# CONFIG_GPIO_VX855 is not set
+# CONFIG_GPIO_XILINX is not set
+# CONFIG_GREENASIA_FF is not set
+# CONFIG_HAMACHI is not set
+CONFIG_HAMRADIO=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_HAVE_AOUT is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+# CONFIG_HCALL_STATS is not set
+# CONFIG_HDLC is not set
+# CONFIG_HDLC_CISCO is not set
+# CONFIG_HDLC_FR is not set
+# CONFIG_HDLC_PPP is not set
+# CONFIG_HDLC_RAW is not set
+# CONFIG_HDLC_RAW_ETH is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_HERMES is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HIBERNATION is not set
+# CONFIG_HID is not set
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX_FF is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PID is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_ROCCAT_ARVO is not set
+# CONFIG_HID_ROCCAT_KONE is not set
+# CONFIG_HID_ROCCAT_KONEPLUS is not set
+# CONFIG_HID_ROCCAT_KOVAPLUS is not set
+# CONFIG_HID_ROCCAT_PYRA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_HIPPI is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_HOSTAP_CS is not set
+# CONFIG_HOSTAP_PCI is not set
+# CONFIG_HOSTAP_PLX is not set
+CONFIG_HOTPLUG=y
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_HP100 is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_HPLAN is not set
+# CONFIG_HPLAN_PLUS is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_HVC_UDBG is not set
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWSPINLOCK_OMAP is not set
+CONFIG_HW_PERF_EVENTS=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_GEODE is not set
+# CONFIG_HW_RANDOM_INTEL is not set
+# CONFIG_HW_RANDOM_PPC4XX is not set
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_HW_RANDOM_VIA is not set
+# CONFIG_HYPERV is not set
+# CONFIG_HYSDN is not set
+CONFIG_HZ=100
+CONFIG_HZ_100=y
+# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_1024 is not set
+# CONFIG_HZ_128 is not set
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_256 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_48 is not set
+# CONFIG_I2C is not set
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCA is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_CHARDEV is not set
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_EG20T is not set
+# CONFIG_I2C_ELEKTOR is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_IBM_IIC is not set
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_MPC is not set
+# CONFIG_I2C_MUX is not set
+# CONFIG_I2C_MV64XXX is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PCA_ISA is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_SCMI is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_SMBUS is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+# CONFIG_I2C_VERSATILE is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_XILINX is not set
+# CONFIG_I2O is not set
+# CONFIG_I82092 is not set
+# CONFIG_I82365 is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_IBM_EMAC_DEBUG is not set
+# CONFIG_IBM_EMAC_EMAC4 is not set
+# CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_IBM_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_EMAC_RGMII is not set
+# CONFIG_IBM_EMAC_TAH is not set
+# CONFIG_IBM_EMAC_ZMII is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_IDE is not set
+# CONFIG_IDE_GD is not set
+# CONFIG_IDE_PHISON is not set
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+# CONFIG_IDEAPAD_LAPTOP is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_IFB is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_IIO is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_IKCONFIG_PROC is not set
+# CONFIG_IMAGE_CMDLINE_HACK is not set
+CONFIG_INET=y
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_TCP_DIAG is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_INFTL is not set
+# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
+# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
+# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
+CONFIG_INITRAMFS_COMPRESSION_NONE=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_INPUT is not set
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_ATLAS_BTNS is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_CMA3000 is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_EVBUG is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INTEL_IDLE is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+# CONFIG_IP_SET is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IP17XX_PHY is not set
+# CONFIG_IP6_NF_FILTER is not set
+# CONFIG_IP6_NF_IPTABLES is not set
+# CONFIG_IP6_NF_MANGLE is not set
+# CONFIG_IP6_NF_MATCH_AH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_MH is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_RT is not set
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+# CONFIG_IP6_NF_QUEUE is not set
+# CONFIG_IP6_NF_RAW is not set
+# CONFIG_IP6_NF_TARGET_HL is not set
+# CONFIG_IP6_NF_TARGET_LOG is not set
+# CONFIG_IP6_NF_TARGET_REJECT is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_IPV6 is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_IPV6_MROUTE_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_ROUTE_INFO is not set
+# CONFIG_IPV6_SIT is not set
+# CONFIG_IPV6_SIT_6RD is not set
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2100_MONITOR=y
+# CONFIG_IPW2200 is not set
+# CONFIG_IPW2200_DEBUG is not set
+CONFIG_IPW2200_MONITOR=y
+# CONFIG_IPW2200_PROMISCUOUS is not set
+# CONFIG_IPW2200_QOS is not set
+# CONFIG_IPW2200_RADIOTAP is not set
+# CONFIG_IPWIRELESS is not set
+# CONFIG_IPX is not set
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_DCCP is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_NF_ARPFILTER is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_NF_ARP_MANGLE is not set
+# CONFIG_IP_NF_FILTER is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_MASQUERADE is not set
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
+# CONFIG_IP_NF_TARGET_REJECT is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_PIMSM_V1 is not set
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_IP_PNP is not set
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_SCTP is not set
+# CONFIG_IP_VS is not set
+# CONFIG_IRDA is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_IR_IMON is not set
+# CONFIG_IR_JVC_DECODER is not set
+# CONFIG_IR_LIRC_CODEC is not set
+# CONFIG_IR_MCEUSB is not set
+# CONFIG_IR_NEC_DECODER is not set
+# CONFIG_IR_RC5_DECODER is not set
+# CONFIG_IR_RC5_SZ_DECODER is not set
+# CONFIG_IR_RC6_DECODER is not set
+# CONFIG_IR_SONY_DECODER is not set
+# CONFIG_IR_STREAMZAP is not set
+# CONFIG_ISCSI_BOOT_SYSFS is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_CXGB4_ISCSI is not set
+# CONFIG_ISCSI_TCP is not set
+CONFIG_ISDN=y
+# CONFIG_ISDN_AUDIO is not set
+# CONFIG_ISDN_CAPI is not set
+# CONFIG_ISDN_CAPI_CAPIDRV is not set
+# CONFIG_ISDN_DIVERSION is not set
+# CONFIG_ISDN_DRV_ACT2000 is not set
+# CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON is not set
+# CONFIG_ISDN_DRV_GIGASET is not set
+# CONFIG_ISDN_DRV_HISAX is not set
+# CONFIG_ISDN_DRV_ICN is not set
+# CONFIG_ISDN_DRV_LOOP is not set
+# CONFIG_ISDN_DRV_PCBIT is not set
+# CONFIG_ISDN_DRV_SC is not set
+# CONFIG_ISDN_I4L is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_ISO9660_FS is not set
+# CONFIG_ISS4xx is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_IWLAGN is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWMC3200TOP is not set
+# CONFIG_IXGB is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGBEVF is not set
+# CONFIG_JBD is not set
+# CONFIG_JBD2_DEBUG is not set
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_LZMA=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_JFFS2_SUMMARY=y
+# CONFIG_JFFS2_ZLIB is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_SECURITY is not set
+# CONFIG_JFS_STATISTICS is not set
+# CONFIG_JME is not set
+CONFIG_JOLIET=y
+# CONFIG_JUMP_LABEL is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_KERNEL_XZ=y
+# CONFIG_KEXEC is not set
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_GPIO_POLLED is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_SAMSUNG is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYS is not set
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_KGDB is not set
+# CONFIG_KMEMCHECK is not set
+# CONFIG_KPROBES is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_KSM is not set
+# CONFIG_KSZ884X_PCI is not set
+# CONFIG_KVM_GUEST is not set
+# CONFIG_L2TP is not set
+# CONFIG_L2TP_ETH is not set
+# CONFIG_L2TP_IP is not set
+# CONFIG_L2TP_V3 is not set
+# CONFIG_LANMEDIA is not set
+# CONFIG_LANTIQ is not set
+# CONFIG_LAPB is not set
+# CONFIG_LASAT is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_LBDAF=y
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_LEDS_BD2802 is not set
+CONFIG_LEDS_CLASS=y
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_GPIO is not set
+CONFIG_LEDS_GPIO_OF=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_NET5501 is not set
+# CONFIG_LEDS_OT200 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_RENESAS_TPU is not set
+# CONFIG_LEDS_TCA6507 is not set
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_IDE_DISK is not set
+# CONFIG_LEDS_TRIGGER_MORSE is not set
+CONFIG_LEDS_TRIGGER_NETDEV=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+# CONFIG_LEDS_TRIGGER_USBDEV is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_LIB80211 is not set
+# CONFIG_LIB80211_CRYPT_CCMP is not set
+# CONFIG_LIB80211_CRYPT_TKIP is not set
+# CONFIG_LIB80211_CRYPT_WEP is not set
+# CONFIG_LIB80211_DEBUG is not set
+# CONFIG_LIBCRC32C is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_LIBERTAS_USB is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_LIBIPW_DEBUG is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_LIRC_STAGING is not set
+# CONFIG_LKDTM is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_LOCKD is not set
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_LOCKD_V4=y
+# CONFIG_LOCKUP_DETECTOR is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_LOGFS is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGO is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_LOONGSON_MC146818 is not set
+# CONFIG_LP486E is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_LP_CONSOLE is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_LTPC is not set
+# CONFIG_LXT_PHY is not set
+CONFIG_LZMA_COMPRESS=y
+CONFIG_LZMA_DECOMPRESS=y
+# CONFIG_M25PXX_PREFER_SMALL_SECTOR_ERASE is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_MACH_DECSTATION is not set
+# CONFIG_MACH_JAZZ is not set
+# CONFIG_MACH_JZ4740 is not set
+# CONFIG_MACH_LOONGSON is not set
+# CONFIG_MACH_NO_WESTBRIDGE is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MACH_VR41XX is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_MACVTAP is not set
+# CONFIG_MAC_EMUMOUSEBTN is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+# CONFIG_MD is not set
+# CONFIG_MD_FAULTY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MEDIA_ATTACH is not set
+# CONFIG_MEDIA_CONTROLLER is not set
+# CONFIG_MEDIA_SUPPORT is not set
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_MFD_MC13XXX is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_S5M_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_STMPE is not set
+CONFIG_MFD_SUPPORT=y
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_MICREL_KS8995MA is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_MIGRATION is not set
+CONFIG_MII=y
+# CONFIG_MIKROTIK_RB532 is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_MINIX_FS_NATIVE_ENDIAN is not set
+# CONFIG_MIPS_ALCHEMY is not set
+# CONFIG_MIPS_COBALT is not set
+# CONFIG_MIPS_FPU_EMU is not set
+# CONFIG_MIPS_MALTA is not set
+# CONFIG_MIPS_SIM is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_MISDN is not set
+# CONFIG_MISDN_AVMFRITZ is not set
+# CONFIG_MISDN_HFCPCI is not set
+# CONFIG_MISDN_HFCUSB is not set
+# CONFIG_MISDN_INFINEON is not set
+# CONFIG_MISDN_NETJET is not set
+# CONFIG_MISDN_SPEEDFAX is not set
+# CONFIG_MISDN_W6692 is not set
+# CONFIG_MKISS is not set
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_MMC is not set
+# CONFIG_MMC_ARMMMCI is not set
+# CONFIG_MMC_BLOCK is not set
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_MMC_BLOCK_MINORS=8
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_CLKGATE is not set
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_DW is not set
+# CONFIG_MMC_MVSDIO is not set
+# CONFIG_MMC_S3C is not set
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_SDHCI_PXAV2 is not set
+# CONFIG_MMC_SDHCI_PXAV3 is not set
+# CONFIG_MMC_SDRICOH_CS is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_TEST is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_VUB300 is not set
+CONFIG_MMU=y
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_MSDOS_FS is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_MSI_BITMAP_SELFTEST is not set
+CONFIG_MTD=y
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_AR7_PARTS is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_MTD_BLOCK2MTD is not set
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_CHAR=y
+# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_DOCG3 is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_GPIO_ADDR is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_JEDECPROBE is not set
+# CONFIG_MTD_LPDDR is not set
+# CONFIG_MTD_M25P80 is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_MYLOADER_PARTS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_NAND_AMS_DELTA is not set
+# CONFIG_MTD_NAND_AR934X is not set
+# CONFIG_MTD_NAND_ATMEL is not set
+# CONFIG_MTD_NAND_AU1550 is not set
+# CONFIG_MTD_NAND_AUTCPU12 is not set
+# CONFIG_MTD_NAND_BCH is not set
+# CONFIG_MTD_NAND_BCM_UMI is not set
+# CONFIG_MTD_NAND_BF5XX is not set
+# CONFIG_MTD_NAND_CAFE is not set
+# CONFIG_MTD_NAND_CM_X270 is not set
+# CONFIG_MTD_NAND_CS553X is not set
+# CONFIG_MTD_NAND_DAVINCI is not set
+# CONFIG_MTD_NAND_DENALI is not set
+CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xff108018
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_ECC is not set
+# CONFIG_MTD_NAND_ECC_BCH is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_FSL_ELBC is not set
+# CONFIG_MTD_NAND_FSL_UPM is not set
+# CONFIG_MTD_NAND_FSMC is not set
+# CONFIG_MTD_NAND_GPIO is not set
+# CONFIG_MTD_NAND_GPMI_NAND is not set
+# CONFIG_MTD_NAND_H1900 is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_JZ4740 is not set
+# CONFIG_MTD_NAND_MPC5121_NFC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_MXC is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_NDFC is not set
+# CONFIG_MTD_NAND_NOMADIK is not set
+# CONFIG_MTD_NAND_NUC900 is not set
+# CONFIG_MTD_NAND_OMAP2 is not set
+# CONFIG_MTD_NAND_ORION is not set
+# CONFIG_MTD_NAND_PASEMI is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_NAND_PPCHAMELEONEVB is not set
+# CONFIG_MTD_NAND_PXA3xx is not set
+# CONFIG_MTD_NAND_RB4XX is not set
+# CONFIG_MTD_NAND_RB750 is not set
+# CONFIG_MTD_NAND_RICOH is not set
+# CONFIG_MTD_NAND_RTC_FROM4 is not set
+# CONFIG_MTD_NAND_S3C2410 is not set
+# CONFIG_MTD_NAND_SHARPSL is not set
+# CONFIG_MTD_NAND_SH_FLCTL is not set
+# CONFIG_MTD_NAND_SOCRATES is not set
+# CONFIG_MTD_NAND_SPIA is not set
+# CONFIG_MTD_NAND_TMIO is not set
+# CONFIG_MTD_NAND_TXX9NDFMC is not set
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_ONENAND is not set
+# CONFIG_MTD_OOPS is not set
+# CONFIG_MTD_OTP is not set
+# CONFIG_MTD_PCI is not set
+# CONFIG_MTD_PCMCIA is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+# CONFIG_MTD_LATCH_ADDR is not set
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_RAM is not set
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_ROM is not set
+CONFIG_MTD_ROOTFS_ROOT_DEV=y
+CONFIG_MTD_ROOTFS_SPLIT=y
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SWAP is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_UBI is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_MV643XX_ETH is not set
+# CONFIG_MVSWITCH_PHY is not set
+# CONFIG_MWAVE is not set
+# CONFIG_MWL8K is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_NE2000 is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_NEC_MARKEINS is not set
+CONFIG_NET=y
+# CONFIG_NETCONSOLE is not set
+CONFIG_NETDEVICES=y
+CONFIG_NETDEV_1000=y
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_NETLINK is not set
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_TPROXY is not set
+# CONFIG_NETFILTER_XTABLES is not set
+# CONFIG_NETFILTER_XT_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LAYER7 is not set
+# CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set
+# CONFIG_NETFILTER_XT_MATCH_STATE is not set
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+# CONFIG_NETFILTER_XT_TARGET_HL is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NETROM is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETXEN_NIC is not set
+# CONFIG_NET_9P is not set
+# CONFIG_NET_ACT_CSUM is not set
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
+CONFIG_NET_CADENCE=y
+# CONFIG_NET_CALXEDA_XGMAC is not set
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_ACT is not set
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_FW is not set
+CONFIG_NET_CLS_IND=y
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_U32 is not set
+CONFIG_NET_CORE=y
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_TAG_DSA is not set
+# CONFIG_NET_DSA_TAG_EDSA is not set
+# CONFIG_NET_EMATCH is not set
+# CONFIG_NET_EMATCH_CMP is not set
+# CONFIG_NET_EMATCH_META is not set
+# CONFIG_NET_EMATCH_NBYTE is not set
+CONFIG_NET_EMATCH_STACK=32
+# CONFIG_NET_EMATCH_TEXT is not set
+# CONFIG_NET_EMATCH_U32 is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_NET_FC is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_NET_IPGRE_BROADCAST=y
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_ISA is not set
+# CONFIG_NET_KEY is not set
+# CONFIG_NET_KEY_MIGRATE is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+CONFIG_NET_PCI=y
+# CONFIG_NET_PCMCIA is not set
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_POCKET is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_NET_SB1000 is not set
+CONFIG_NET_SCHED=y
+# CONFIG_NET_SCH_ATM is not set
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_CODEL is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_ESFQ is not set
+CONFIG_NET_SCH_ESFQ_NFCT=y
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_NET_SCH_FQ_CODEL is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_INGRESS is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_QFQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_TEAM is not set
+# CONFIG_NET_TULIP is not set
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_NET_VENDOR_8390=y
+CONFIG_NET_VENDOR_ADAPTEC=y
+CONFIG_NET_VENDOR_ALTEON=y
+CONFIG_NET_VENDOR_AMD=y
+CONFIG_NET_VENDOR_ATHEROS=y
+CONFIG_NET_VENDOR_BROADCOM=y
+CONFIG_NET_VENDOR_BROCADE=y
+CONFIG_NET_VENDOR_CHELSIO=y
+CONFIG_NET_VENDOR_CIRRUS=y
+CONFIG_NET_VENDOR_CISCO=y
+CONFIG_NET_VENDOR_DEC=y
+CONFIG_NET_VENDOR_DLINK=y
+CONFIG_NET_VENDOR_EMULEX=y
+CONFIG_NET_VENDOR_EXAR=y
+CONFIG_NET_VENDOR_FARADAY=y
+CONFIG_NET_VENDOR_FREESCALE=y
+CONFIG_NET_VENDOR_FUJITSU=y
+CONFIG_NET_VENDOR_HP=y
+CONFIG_NET_VENDOR_IBM=y
+CONFIG_NET_VENDOR_INTEL=y
+CONFIG_NET_VENDOR_I825XX=y
+CONFIG_NET_VENDOR_MARVELL=y
+CONFIG_NET_VENDOR_MELLANOX=y
+CONFIG_NET_VENDOR_MICREL=y
+CONFIG_NET_VENDOR_MICROCHIP=y
+CONFIG_NET_VENDOR_MYRI=y
+CONFIG_NET_VENDOR_NATSEMI=y
+CONFIG_NET_VENDOR_NVIDIA=y
+CONFIG_NET_VENDOR_OKI=y
+CONFIG_NET_VENDOR_QLOGIC=y
+CONFIG_NET_VENDOR_REALTEK=y
+CONFIG_NET_VENDOR_RDC=y
+CONFIG_NET_VENDOR_SEEQ=y
+CONFIG_NET_VENDOR_SILAN=y
+CONFIG_NET_VENDOR_SIS=y
+CONFIG_NET_VENDOR_SMSC=y
+CONFIG_NET_VENDOR_STMICRO=y
+CONFIG_NET_VENDOR_SUN=y
+CONFIG_NET_VENDOR_TEHUTI=y
+CONFIG_NET_VENDOR_TOSHIBA=y
+CONFIG_NET_VENDOR_TI=y
+CONFIG_NET_VENDOR_VIA=y
+CONFIG_NET_VENDOR_XILINX=y
+CONFIG_NET_VENDOR_XIRCOM=y
+CONFIG_NEW_LEDS=y
+# CONFIG_NFC_DEVICES is not set
+# CONFIG_NFSD is not set
+# CONFIG_NFSD_DEPRECATED is not set
+# CONFIG_NFSD_V2_ACL is not set
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+# CONFIG_NFS_ACL_SUPPORT is not set
+CONFIG_NFS_COMMON=y
+# CONFIG_NFS_FS is not set
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFTL is not set
+# CONFIG_NFC is not set
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+# CONFIG_NF_CONNTRACK_FTP is not set
+# CONFIG_NF_CONNTRACK_H323 is not set
+# CONFIG_NF_CONNTRACK_IPV4 is not set
+# CONFIG_NF_CONNTRACK_IPV6 is not set
+# CONFIG_NF_CONNTRACK_IRC is not set
+# CONFIG_NF_CONNTRACK_MARK is not set
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+# CONFIG_NF_CONNTRACK_RTSP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_NF_CONNTRACK_TFTP is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_ZONES is not set
+# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_GRE is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_NF_NAT is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_FTP is not set
+# CONFIG_NF_NAT_H323 is not set
+# CONFIG_NF_NAT_IRC is not set
+# CONFIG_NF_NAT_NEEDED is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_PROTO_GRE is not set
+# CONFIG_NF_NAT_RTSP is not set
+# CONFIG_NF_NAT_SIP is not set
+# CONFIG_NF_NAT_SNMP_BASIC is not set
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NI52 is not set
+# CONFIG_NI65 is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_NIU is not set
+CONFIG_NLATTR=y
+# CONFIG_NLM_XLP_BOARD is not set
+# CONFIG_NLM_XLR_BOARD is not set
+# CONFIG_NLS is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_NORTEL_HERMES is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_NO_BOOTMEM is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_NO_IOPORT is not set
+# CONFIG_NS83820 is not set
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_NVRAM is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
+# CONFIG_N_GSM is not set
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_OBS600 is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_OCF_BENCH is not set
+# CONFIG_OCF_C7108 is not set
+# CONFIG_OCF_CRYPTOCTEON is not set
+# CONFIG_OCF_EP80579 is not set
+# CONFIG_OCF_HIFN is not set
+# CONFIG_OCF_HIFNHIPP is not set
+# CONFIG_OCF_IXP4XX is not set
+# CONFIG_OCF_KIRKWOOD is not set
+# CONFIG_OCF_OCF is not set
+# CONFIG_OCF_OCFNULL is not set
+# CONFIG_OCF_SAFE is not set
+# CONFIG_OCF_TALITOS is not set
+# CONFIG_OCF_UBSEC_SSB is not set
+# CONFIG_OC_ETM is not set
+# CONFIG_OF_SELFTEST is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_ORION_WATCHDOG is not set
+# CONFIG_OSF_PARTITION is not set
+CONFIG_OVERLAYFS_FS=y
+# CONFIG_P54_COMMON is not set
+CONFIG_PACKET=y
+# CONFIG_PAGE_POISONING is not set
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_64KB is not set
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PANEL is not set
+# CONFIG_PANTHERLORD_FF is not set
+# CONFIG_PARPORT is not set
+# CONFIG_PARPORT_1284 is not set
+# CONFIG_PARPORT_AX88796 is not set
+# CONFIG_PARPORT_PC is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARASAN_CF is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_ATP867X is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CS5536 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_ISAPNP is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PCMCIA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_PLATFORM is not set
+# CONFIG_PATA_QDI is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SCH is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+# CONFIG_PATA_WINBOND_VLB is not set
+# CONFIG_PC300TOO is not set
+# CONFIG_PCCARD is not set
+# CONFIG_PCH_GBE is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_PCI200SYN is not set
+# CONFIG_PCIEAER_INJECT is not set
+# CONFIG_PCIEASPM is not set
+# CONFIG_PCIE_ECRC is not set
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_PCI_ATMEL is not set
+# CONFIG_PCI_CNB20LE_QUIRK is not set
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_DISABLE_COMMON_QUIRKS is not set
+# CONFIG_PCI_HERMES is not set
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_MSI is not set
+# CONFIG_PCI_PASID is not set
+# CONFIG_PCI_PRI is not set
+CONFIG_PCI_QUIRKS=y
+# CONFIG_PCI_STUB is not set
+CONFIG_PCI_SYSCALL=y
+# CONFIG_PCMCIA is not set
+# CONFIG_PCMCIA_3C574 is not set
+# CONFIG_PCMCIA_3C589 is not set
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_ATMEL is not set
+# CONFIG_PCMCIA_AXNET is not set
+# CONFIG_PCMCIA_DEBUG is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_FMVJ18X is not set
+# CONFIG_PCMCIA_HERMES is not set
+# CONFIG_PCMCIA_LOAD_CIS is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_NMCLAN is not set
+# CONFIG_PCMCIA_PCNET is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_PCMCIA_SMC91C92 is not set
+# CONFIG_PCMCIA_SPECTRUM is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_PCMCIA_XIRC2PS is not set
+# CONFIG_PCMCIA_XIRCOM is not set
+# CONFIG_PCNET32 is not set
+# CONFIG_PCSPKR_PLATFORM is not set
+# CONFIG_PD6729 is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_PHONE is not set
+# CONFIG_PHONET is not set
+# CONFIG_PHYLIB is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+# CONFIG_PID_NS is not set
+# CONFIG_PINCTRL is not set
+# CONFIG_PLAT_SPEAR is not set
+# CONFIG_PLIP is not set
+# CONFIG_PLX_HERMES is not set
+# CONFIG_PM is not set
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_PM_RUNTIME is not set
+# CONFIG_PMBUS is not set
+# CONFIG_PMC_MSP is not set
+# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PNX8550_JBS is not set
+# CONFIG_PNX8550_STB810 is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_POWERTV is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PPC4xx_GPIO is not set
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_256K_PAGES is not set
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_64K_PAGES is not set
+# CONFIG_PPC_DISABLE_WERROR is not set
+# CONFIG_PPC_EMULATED_STATS is not set
+# CONFIG_PPC_EPAPR_HV_BYTECHAN is not set
+# CONFIG_PPP is not set
+# CONFIG_PPPOATM is not set
+# CONFIG_PPPOE is not set
+# CONFIG_PPPOL2TP is not set
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_DEFLATE is not set
+CONFIG_PPP_FILTER=y
+# CONFIG_PPP_MPPE is not set
+CONFIG_PPP_MULTILINK=y
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_PPS is not set
+# CONFIG_PPTP is not set
+# CONFIG_PREEMPT is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_PRINT_STACK_DEPTH=64
+CONFIG_PRINTK=y
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_PSB6970_PHY is not set
+# CONFIG_PROBE_INITRD_HEADER is not set
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_PROC_SYSCTL=y
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILING is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_PROVE_RCU is not set
+# CONFIG_PSTORE is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTA_DEBUG is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_R3964 is not set
+# CONFIG_R6040 is not set
+# CONFIG_R8169 is not set
+# CONFIG_R8187SE is not set
+# CONFIG_R8712U is not set
+# CONFIG_RTLLIB is not set
+# CONFIG_RTS5139 is not set
+# CONFIG_RTS_PSTOR is not set
+# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_RADIO_AZTECH is not set
+# CONFIG_RADIO_CADET is not set
+# CONFIG_RADIO_GEMTEK is not set
+# CONFIG_RADIO_MAXIRADIO is not set
+# CONFIG_RADIO_RTRACK is not set
+# CONFIG_RADIO_RTRACK2 is not set
+# CONFIG_RADIO_SF16FMI is not set
+# CONFIG_RADIO_SF16FMR2 is not set
+# CONFIG_RADIO_TERRATEC is not set
+# CONFIG_RADIO_TRUST is not set
+# CONFIG_RADIO_TYPHOON is not set
+# CONFIG_RADIO_ZOLTRIX is not set
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_RAMOOPS is not set
+# CONFIG_RAPIDIO is not set
+# CONFIG_RAR_REGISTER is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_RC_CORE is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_RC_MAP is not set
+# CONFIG_RDS is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_GZIP is not set
+CONFIG_RD_LZMA=y
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_REDWOOD is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_RELAY is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_RFKILL is not set
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_ROSE is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_RTC_DEBUG is not set
+# CONFIG_RTC_DRV_AU1XXX is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_EP93XX is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_GENERIC is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_MPC5121 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_PL030 is not set
+# CONFIG_RTC_DRV_PL031 is not set
+# CONFIG_RTC_DRV_PS3 is not set
+# CONFIG_RTC_DRV_PT7C4338 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_RTC7301 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_TEST is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_LIB=y
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_RTL8192E is not set
+# CONFIG_RTL8192U is not set
+# CONFIG_RTL8306_PHY is not set
+# CONFIG_RTL8366RB_PHY is not set
+# CONFIG_RTL8366S_PHY is not set
+# CONFIG_RTL8366_SMI is not set
+# CONFIG_RTL8366_SMI_DEBUG_FS is not set
+# CONFIG_RTL8367_PHY is not set
+# CONFIG_RTS_PSTOR is not set
+CONFIG_RT_MUTEXES=y
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_RUNTIME_DEBUG is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_S2IO is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_SATA_ACARD_AHCI is not set
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_AHCI_PLATFORM is not set
+# CONFIG_SATA_DWC is not set
+# CONFIG_SATA_FSL is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_SATA_PMP is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_SBE_2T3E3 is not set
+# CONFIG_SC92031 is not set
+# CONFIG_SCC is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHED_MC is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+# CONFIG_SCHED_SMT is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_SCSI_BNX2X_FCOE is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_HPSA is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_ISCI is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_LOGGING is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
+# CONFIG_SCSI_LPFC is not set
+CONFIG_SCSI_MOD=y
+# CONFIG_SCSI_MPT2SAS is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+# CONFIG_SCSI_MVUMI is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PM8001 is not set
+# CONFIG_SCSI_PMCRAID is not set
+CONFIG_SCSI_PROC_FS=y
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+CONFIG_SCSI_WAIT_SCAN=m
+# CONFIG_SDIO_UART is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SEEQ8005 is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_ACPI_POWER is not set
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ATK0110 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_ACCENT is not set
+# CONFIG_SERIAL_8250_BOCA is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_CS is not set
+# CONFIG_SERIAL_8250_DW is not set
+# CONFIG_SERIAL_8250_EXAR_ST16C554 is not set
+# CONFIG_SERIAL_8250_EXTENDED is not set
+# CONFIG_SERIAL_8250_FOURPORT is not set
+# CONFIG_SERIAL_8250_HUB6 is not set
+CONFIG_SERIAL_8250_NR_UARTS=2
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX3107 is not set
+# CONFIG_SERIAL_MFD_HSU is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_SERIAL_OF_PLATFORM is not set
+# CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_UARTLITE is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_SERIO is not set
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SFC is not set
+# CONFIG_SFI is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_SGI_IP22 is not set
+# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
+# CONFIG_SGI_IP32 is not set
+# CONFIG_SGI_PARTITION is not set
+CONFIG_SHMEM=y
+# CONFIG_SIBYTE_BIGSUR is not set
+# CONFIG_SIBYTE_CARMEL is not set
+# CONFIG_SIBYTE_CRHINE is not set
+# CONFIG_SIBYTE_CRHONE is not set
+# CONFIG_SIBYTE_LITTLESUR is not set
+# CONFIG_SIBYTE_RHONE is not set
+# CONFIG_SIBYTE_SENTOSA is not set
+# CONFIG_SIBYTE_SWARM is not set
+CONFIG_SIGNALFD=y
+# CONFIG_SIMPLE_GPIO is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SIS900 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SKY2_DEBUG is not set
+CONFIG_SLAB=y
+CONFIG_SLABINFO=y
+# CONFIG_SLHC is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_SLIP is not set
+# CONFIG_SLOB is not set
+# CONFIG_SLUB is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_SMARTJOYPLUS_FF is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMC9194 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_SMP is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_SMSC9420 is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_SND is not set
+# CONFIG_SND_AC97_POWER_SAVE is not set
+# CONFIG_SND_AD1816A is not set
+# CONFIG_SND_AD1848 is not set
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ADLIB is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ALOOP is not set
+# CONFIG_SND_ALS100 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_ASIHPI is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_ATMEL_AC97C is not set
+# CONFIG_SND_ATMEL_SOC is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT2320 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMI8330 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_CS4231 is not set
+# CONFIG_SND_CS4236 is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5530 is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1688 is not set
+# CONFIG_SND_ES18XX is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FIREWIRE is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_GUSCLASSIC is not set
+# CONFIG_SND_GUSEXTREME is not set
+# CONFIG_SND_GUSMAX is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HRTIMER is not set
+# CONFIG_SND_HWDEP is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGODJX is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_INTERWAVE is not set
+# CONFIG_SND_INTERWAVE_STB is not set
+# CONFIG_SND_ISA is not set
+# CONFIG_SND_KIRKWOOD_SOC is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_LOLA is not set
+# CONFIG_SND_LX6464ES is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_MIPS is not set
+# CONFIG_SND_MIRO is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MPC52xx_SOC_EFIKA is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_MTS64 is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_OPL3SA2 is not set
+# CONFIG_SND_OPTI92X_AD1848 is not set
+# CONFIG_SND_OPTI92X_CS4231 is not set
+# CONFIG_SND_OPTI93X is not set
+CONFIG_SND_OSSEMUL=y
+# CONFIG_SND_OXYGEN is not set
+CONFIG_SND_PCI=y
+# CONFIG_SND_PCM is not set
+# CONFIG_SND_PCMCIA is not set
+# CONFIG_SND_PCM_OSS is not set
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_PDAUDIOCF is not set
+# CONFIG_SND_PORTMAN2X4 is not set
+# CONFIG_SND_PPC is not set
+# CONFIG_SND_RAWMIDI is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_RTCTIMER is not set
+# CONFIG_SND_SB16 is not set
+# CONFIG_SND_SB8 is not set
+# CONFIG_SND_SBAWE is not set
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_SIS7019 is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SND_SOC_CACHE_LZO is not set
+# CONFIG_SND_SOC_MPC5200_I2S is not set
+# CONFIG_SND_SOC_MPC5200_AC97 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_SSCAPE is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_TIMER is not set
+# CONFIG_SND_TRIDENT is not set
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_6FIRE is not set
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_US122L is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_VXPOCKET is not set
+# CONFIG_SND_WAVEFRONT is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_SOLO6X10 is not set
+# CONFIG_SONYPI is not set
+# CONFIG_SONY_LAPTOP is not set
+# CONFIG_SOUND is not set
+# CONFIG_SOUND_PRIME is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+# CONFIG_SPARSE_IRQ is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_SPEAKUP is not set
+# CONFIG_SPI is not set
+# CONFIG_SPINLOCK_TEST is not set
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_BUTTERFLY is not set
+# CONFIG_SPI_DEBUG is not set
+# CONFIG_SPI_DESIGNWARE is not set
+# CONFIG_SPI_FSL_SPI is not set
+# CONFIG_SPI_FSL_ESPI is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_GPIO_OLD is not set
+# CONFIG_SPI_LM70_LLP is not set
+# CONFIG_SPI_MASTER is not set
+# CONFIG_SPI_MPC52xx is not set
+# CONFIG_SPI_MPC52xx_PSC is not set
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_ORION is not set
+# CONFIG_SPI_PL022 is not set
+# CONFIG_SPI_PPC4xx is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_RAMIPS is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_SPI_TOPCLIFF_PCH is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_XWAY is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_SQUASHFS_LZO is not set
+# CONFIG_SQUASHFS_XATTR is not set
+CONFIG_SQUASHFS_XZ=y
+# CONFIG_SQUASHFS_ZLIB is not set
+# CONFIG_SSB is not set
+# CONFIG_SSB_DEBUG is not set
+# CONFIG_SSB_PCMCIAHOST is not set
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB_SDIOHOST is not set
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSFDC is not set
+CONFIG_STACKTRACE_SUPPORT=y
+# CONFIG_STACK_TRACER is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_MEDIA is not set
+CONFIG_STANDALONE=y
+CONFIG_STDBINUTILS=y
+# CONFIG_STE10XP is not set
+# CONFIG_STMMAC_ETH is not set
+CONFIG_STP=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_STUB_POULSBO is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_SUNRPC is not set
+# CONFIG_SUNRPC_GSS is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_SUSPEND is not set
+CONFIG_SWAP=y
+# CONFIG_SWCONFIG is not set
+# CONFIG_SWCONFIG_LEDS is not set
+# CONFIG_SYNCLINK_CS is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_SYSCTL=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_SYSFS=y
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_TC35815 is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TCIC is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+CONFIG_TCP_CONG_CUBIC=y
+# CONFIG_TCP_CONG_HSTCP is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_TCP_CONG_HYBLA is not set
+# CONFIG_TCP_CONG_ILLINOIS is not set
+# CONFIG_TCP_CONG_LP is not set
+# CONFIG_TCP_CONG_SCALABLE is not set
+# CONFIG_TCP_CONG_VEGAS is not set
+# CONFIG_TCP_CONG_VENO is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_YEAH is not set
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_TEST_POWER is not set
+CONFIG_TEXTSEARCH=y
+# CONFIG_TEXTSEARCH_BM is not set
+# CONFIG_TEXTSEARCH_FSM is not set
+# CONFIG_TEXTSEARCH_KMP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_THRUSTMASTER_FF is not set
+CONFIG_TICK_ONESHOT=y
+# CONFIG_TIFM_CORE is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_TIMB_DMA is not set
+CONFIG_TIMERFD=y
+# CONFIG_TIMER_STATS is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TIPC is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_TI_ST is not set
+# CONFIG_TLAN is not set
+# CONFIG_TMD_HERMES is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_S3C2410 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WM97XX is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_TR is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_TRACE_SINK is not set
+CONFIG_TRACING_SUPPORT=y
+CONFIG_TRAD_SIGNALS=y
+# CONFIG_TRANZPORT is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_TUN is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TYPHOON is not set
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_UDF_FS is not set
+CONFIG_UDF_NLS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_UFS_FS is not set
+# CONFIG_UIO is not set
+# CONFIG_ULTRA is not set
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_USB is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_ADUTUX is not set
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARMLINUX=y
+# CONFIG_USB_ATM is not set
+CONFIG_USB_BELKIN=y
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_CXACRU is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_DWC3 is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_DUMMY_HCD is not set
+# CONFIG_USB_DWC_OTG_LPM is not set
+# CONFIG_USB_EHCI_MV is not set
+# CONFIG_USB_EHCI_HCD_PPC_OF is not set
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_ET61X251 is not set
+CONFIG_USB_EZUSB=y
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_FUSB300 is not set
+# CONFIG_USB_R8A66597 is not set
+# CONFIG_USB_GADGET is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+# CONFIG_USB_GL860 is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_GSPCA is not set
+# CONFIG_USB_GSPCA_BENQ is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_CPIA1 is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_JEILINJ is not set
+# CONFIG_USB_GSPCA_JL2005BCD is not set
+# CONFIG_USB_GSPCA_KINECT is not set
+# CONFIG_USB_GSPCA_KONICA is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_MR97310A is not set
+# CONFIG_USB_GSPCA_NW80X is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_OV534_9 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7302 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SE401 is not set
+# CONFIG_USB_GSPCA_SN9C2028 is not set
+# CONFIG_USB_GSPCA_SN9C20X is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA1528 is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_SQ905 is not set
+# CONFIG_USB_GSPCA_SQ905C is not set
+# CONFIG_USB_GSPCA_SQ930X is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_STV0680 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TOPRO is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+# CONFIG_USB_GSPCA_VICAM is not set
+# CONFIG_USB_GSPCA_XIRLINK_CIT is not set
+# CONFIG_USB_GSPCA_ZC3XX is not set
+# CONFIG_USB_G_ACM_MS is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_G_WEBCAM is not set
+# CONFIG_USB_HID is not set
+# CONFIG_USB_HIDDEV is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_IBMCAM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_KONICAWC is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LIBUSUAL is not set
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_EEM is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_GL620A is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_OHCI_HCD_PCI is not set
+# CONFIG_USB_OHCI_HCD_PPC_OF is not set
+# CONFIG_USB_OHCI_HCD_PPC_OF_BE is not set
+# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set
+# CONFIG_USB_OHCI_HCD_PPC_SOC is not set
+# CONFIG_USB_OHCI_HCD_SSB is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_PWC_INPUT_EVDEV is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_USB_SE401 is not set
+# CONFIG_USB_SERIAL is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7715_PARPORT is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_SSU100 is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_SIERRA_NET is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_SN9C102 is not set
+# CONFIG_USB_SPEEDTOUCH is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_STORAGE is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_TMC is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_UAS is not set
+# CONFIG_USB_UEAGLEATM is not set
+# CONFIG_USB_ULPI is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_USS720 is not set
+# CONFIG_USB_VIDEO_CLASS is not set
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+# CONFIG_USB_VL600 is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+# CONFIG_USB_XHCI_HCD is not set
+# CONFIG_USB_XUSBATM is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USE_GENERIC_SMP_HELPERS is not set
+# CONFIG_USE_OF is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_UWB is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_VETH is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VGA_ARB is not set
+# CONFIG_VGA_SWITCHEROO is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_AK881X is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT848 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_BWQCAM is not set
+# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_VIDEO_CAPTURE_DRIVERS is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CQCAM is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_CX231XX is not set
+# CONFIG_VIDEO_CX2341X is not set
+# CONFIG_VIDEO_CX25840 is not set
+# CONFIG_VIDEO_CX88 is not set
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_VIDEO_DT3155 is not set
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_GO7007 is not set
+# CONFIG_VIDEO_HDPVR is not set
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_IR_I2C is not set
+# CONFIG_VIDEO_IVTV is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_M52790 is not set
+# CONFIG_VIDEO_MEDIA is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_MXB is not set
+# CONFIG_VIDEO_NOON010PC30 is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_PMS is not set
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_SAA6588 is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7134 is not set
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_TCM825X is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_TIMBERDALE is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_TM6000 is not set
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_VIDEO_V4L2 is not set
+# CONFIG_VIDEO_V4L2_COMMON is not set
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+# CONFIG_VIDEO_VPX3220 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_ZORAN is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRT_DRIVERS is not set
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_VITESSE_PHY is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_VMSPLIT_1G is not set
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_2G_OPT is not set
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_3G_OPT is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_VMWARE_PVSCSI is not set
+# CONFIG_VORTEX is not set
+# CONFIG_VT is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_VXGE is not set
+# CONFIG_W1 is not set
+# CONFIG_W1_CON is not set
+# CONFIG_W1_MASTER_DS1WM is not set
+# CONFIG_W1_MASTER_DS2482 is not set
+# CONFIG_W1_MASTER_DS2490 is not set
+# CONFIG_W1_MASTER_GPIO is not set
+# CONFIG_W1_MASTER_MATROX is not set
+# CONFIG_W1_SLAVE_BQ27000 is not set
+# CONFIG_W1_SLAVE_DS2408 is not set
+# CONFIG_W1_SLAVE_DS2423 is not set
+# CONFIG_W1_SLAVE_DS2431 is not set
+# CONFIG_W1_SLAVE_DS2433 is not set
+# CONFIG_W1_SLAVE_DS2760 is not set
+# CONFIG_W1_SLAVE_DS2780 is not set
+# CONFIG_W1_SLAVE_SMEM is not set
+# CONFIG_W1_SLAVE_THERM is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83697HF_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_WAN is not set
+# CONFIG_WANXL is not set
+# CONFIG_WAN_ROUTER is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_CORE is not set
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+# CONFIG_WD80x3 is not set
+# CONFIG_WDTPCI is not set
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PRIV=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+# CONFIG_WIMAX is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+# CONFIG_WIRELESS_EXT_SYSFS is not set
+# CONFIG_WLAGS49_H2 is not set
+# CONFIG_WLAGS49_H25 is not set
+CONFIG_WLAN=y
+# CONFIG_WR_PPMC is not set
+# CONFIG_X25 is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_IPCOMP is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XILINX_EMACLITE is not set
+# CONFIG_XILINX_LL_TEMAC is not set
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_XMON is not set
+# CONFIG_XVMALLOC is not set
+# CONFIG_XZ_DEC is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_BCJ is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_SPARC is not set
+# CONFIG_XZ_DEC_TEST is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_YAFFS_FS is not set
+# CONFIG_YAM is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_YENTA is not set
+# CONFIG_YENTA_O2 is not set
+# CONFIG_YENTA_RICOH is not set
+# CONFIG_YENTA_TI is not set
+# CONFIG_YENTA_TOSHIBA is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_ZD1211RW_DEBUG is not set
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_ZISOFS=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
+# CONFIG_ZNET is not set
+CONFIG_ZONE_DMA=y
+CONFIG_ZONE_DMA_FLAG=1
+# CONFIG_ZRAM is not set
+# CONFIG_SIGMA is not set
diff --git a/target/linux/generic/files/Documentation/networking/adm6996.txt b/target/linux/generic/files/Documentation/networking/adm6996.txt
new file mode 100644
index 000000000..ab59f1df0
--- /dev/null
+++ b/target/linux/generic/files/Documentation/networking/adm6996.txt
@@ -0,0 +1,110 @@
+-------
+
+ADM6996FC / ADM6996M switch chip driver
+
+
+1. General information
+
+ This driver supports the FC and M models only. The ADM6996F and L are
+ completely different chips.
+
+ Support for the FC model is extremely limited at the moment. There is no VLAN
+ support as of yet. The driver will not offer an swconfig interface for the FC
+ chip.
+
+1.1 VLAN IDs
+
+ It is possible to define 16 different VLANs. Every VLAN has an identifier, its
+ VLAN ID. It is easiest if you use at most VLAN IDs 0-15. In that case, the
+ swconfig based configuration is very straightforward. To define two VLANs with
+ IDs 4 and 5, you can invoke, for example:
+
+ # swconfig dev ethX vlan 4 set ports '0 1t 2 5t'
+ # swconfig dev ethX vlan 5 set ports '0t 1t 5t'
+
+ The swconfig framework will automatically invoke 'port Y set pvid Z' for every
+ port that is an untagged member of VLAN Y, setting its Primary VLAN ID. In
+ this example, ports 0 and 2 would get "pvid 4". The Primary VLAN ID of a port
+ is the VLAN ID associated with untagged packets coming in on that port.
+
+ But if you wish to use VLAN IDs outside the range 0-15, this automatic
+ behaviour of the swconfig framework becomes a problem. The 16 VLANs that
+ swconfig can configure on the ADM6996 also have a "vid" setting. By default,
+ this is the same as the number of the VLAN entry, to make the simple behaviour
+ above possible. To still support a VLAN with a VLAN ID higher than 15
+ (presumably because you are in a network where such VLAN IDs are already in
+ use), you can change the "vid" setting of the VLAN to anything in the range
+ 0-1023. But suppose you did the following:
+
+ # swconfig dev ethX vlan 0 set vid 998
+ # swconfig dev ethX vlan 0 set ports '0 2 5t'
+
+ Now the swconfig framework will issue 'port 0 set pvid 0' and 'port 2 set pvid
+ 0'. But the "pvid" should be set to 998, so you are responsible for manually
+ fixing this!
+
+1.2 VLAN filtering
+
+ The switch is configured to apply source port filtering. This means that
+ packets are only accepted when the port the packets came in on is a member of
+ the VLAN the packet should go to.
+
+ Only membership of a VLAN is tested, it does not matter whether it is a tagged
+ or untagged membership.
+
+ For untagged packets, the destination VLAN is the Primary VLAN ID of the
+ incoming port. So if the PVID of a port is 0, but that port is not a member of
+ the VLAN with ID 0, this means that untagged packets on that port are dropped.
+ This can be used as a roundabout way of dropping untagged packets from a port,
+ a mode often referred to as "Admit only tagged packets".
+
+1.3 Reset
+
+ The two supported chip models do not have a sofware-initiated reset. When the
+ driver is initialised, as well as when the 'reset' swconfig option is invoked,
+ the driver will set those registers it knows about and supports to the correct
+ default value. But there are a lot of registers in the chip that the driver
+ does not support. If something changed those registers, invoking 'reset' or
+ performing a warm reboot might still leave the chip in a "broken" state. Only
+ a hardware reset will bring it back in the default state.
+
+2. Technical details on PHYs and the ADM6996
+
+ From the viewpoint of the Linux kernel, it is common that an Ethernet adapter
+ can be seen as a separate MAC entity and a separate PHY entity. The PHY entity
+ can be queried and set through registers accessible via an MDIO bus. A PHY
+ normally has a single address on that bus, in the range 0 through 31.
+
+ The ADM6996 has special-purpose registers in the range of PHYs 0 through 10.
+ Even though all these registers control a single ADM6996 chip, the Linux
+ kernel treats this as 11 separate PHYs. The driver will bind to these
+ addresses to prevent a different PHY driver from binding and corrupting these
+ registers.
+
+ What Linux sees as the PHY on address 0 is meant for the Ethernet MAC
+ connected to the CPU port of the ADM6996 switch chip (port 5). This is the
+ Ethernet MAC you will use to send and receive data through the switch.
+
+ The PHYs at addresses 16 through 20 map to the PHYs on ports 0 through 4 of
+ the switch chip. These can be accessed with the Generic PHY driver, as the
+ registers have the common layout.
+
+ If a second Ethernet MAC on your board is wired to the port 4 PHY, that MAC
+ needs to bind to PHY address 20 for the port to work correctly.
+
+ The ADM6996 switch driver will reset the ports 0 through 3 on startup and when
+ 'reset' is invoked. This could clash with a different PHY driver if the kernel
+ binds a PHY driver to address 16 through 19.
+
+ If Linux binds a PHY on addresses 1 through 10 to an Ethernet MAC, the ADM6996
+ driver will simply always report a connected 100 Mbit/s full-duplex link for
+ that PHY, and provide no other functionality. This is most likely not what you
+ want. So if you see a message in your log
+
+ ethX: PHY overlaps ADM6996, providing fixed PHY yy.
+
+ This is most likely an indication that ethX will not work properly, and your
+ kernel needs to be configured to attach a different PHY to that Ethernet MAC.
+
+ Controlling the mapping between MACs and PHYs is usually done in platform- or
+ board-specific fixup code. The ADM6996 driver has no influence over this.
diff --git a/target/linux/generic/files/Documentation/pwm.txt b/target/linux/generic/files/Documentation/pwm.txt
new file mode 100644
index 000000000..2c41ca586
--- /dev/null
+++ b/target/linux/generic/files/Documentation/pwm.txt
@@ -0,0 +1,260 @@
+ Generic PWM Device API
+
+ February 1, 2010
+ Bill Gatliff
+ <bgat@billgatliff.com>
+
+
+
+The code in drivers/pwm and include/linux/pwm/ implements an API for
+applications involving pulse-width-modulation signals. This document
+describes how the API implementation facilitates both PWM-generating
+devices, and users of those devices.
+
+
+
+Motivation
+
+The primary goals for implementing the "generic PWM API" are to
+consolidate the various PWM implementations within a consistent and
+redundancy-reducing framework, and to facilitate the use of
+hotpluggable PWM devices.
+
+Previous PWM-related implementations within the Linux kernel achieved
+their consistency via cut-and-paste, but did not need to (and didn't)
+facilitate more than one PWM-generating device within the system---
+hotplug or otherwise. The Generic PWM Device API might be most
+appropriately viewed as an update to those implementations, rather
+than a complete rewrite.
+
+
+
+Challenges
+
+One of the difficulties in implementing a generic PWM framework is the
+fact that pulse-width-modulation applications involve real-world
+signals, which often must be carefully managed to prevent destruction
+of hardware that is linked to those signals. A DC motor that
+experiences a brief interruption in the PWM signal controlling it
+might destructively overheat; it could suddenly change speed, losing
+synchronization with a sensor; it could even suddenly change direction
+or torque, breaking the mechanical device connected to it.
+
+(A generic PWM device framework is not directly responsible for
+preventing the above scenarios: that responsibility lies with the
+hardware designer, and the application and driver authors. But it
+must to the greatest extent possible make it easy to avoid such
+problems).
+
+A generic PWM device framework must accommodate the substantial
+differences between available PWM-generating hardware devices, without
+becoming sub-optimal for any of them.
+
+Finally, a generic PWM device framework must be relatively
+lightweight, computationally speaking. Some PWM users demand
+high-speed outputs, plus the ability to regulate those outputs
+quickly. A device framework must be able to "keep up" with such
+hardware, while still leaving time to do real work.
+
+The Generic PWM Device API is an attempt to meet all of the above
+requirements. At its initial publication, the API was already in use
+managing small DC motors, sensors and solenoids through a
+custom-designed, optically-isolated H-bridge driver.
+
+
+
+Functional Overview
+
+The Generic PWM Device API framework is implemented in
+include/linux/pwm/pwm.h and drivers/pwm/pwm.c. The functions therein
+use information from pwm_device, pwm_channel and pwm_channel_config
+structures to invoke services in PWM peripheral device drivers.
+Consult drivers/pwm/atmel-pwm.c for an example driver.
+
+There are two classes of adopters of the PWM framework:
+
+ "Users" -- those wishing to employ the API merely to produce PWM
+ signals; once they have identified the appropriate physical output
+ on the platform in question, they don't care about the details of
+ the underlying hardware
+
+ "Driver authors" -- those wishing to bind devices that can generate
+ PWM signals to the Generic PWM Device API, so that the services of
+ those devices become available to users. Assuming the hardware can
+ support the needs of a user, driver authors don't care about the
+ details of the user's application
+
+Generally speaking, users will first invoke pwm_request() to obtain a
+handle to a PWM device. They will then pass that handle to functions
+like pwm_duty_ns() and pwm_period_ns() to set the duty cycle and
+period of the PWM signal, respectively. They will also invoke
+pwm_start() and pwm_stop() to turn the signal on and off.
+
+The Generic PWM API framework also provides a sysfs interface to PWM
+devices, which is adequate for basic application needs and testing.
+
+Driver authors fill out a pwm_device structure, which describes the
+capabilities of the PWM hardware being constructed--- including the
+number of distinct output "channels" the peripheral offers. They then
+invoke pwm_register() (usually from within their device's probe()
+handler) to make the PWM API aware of their device. The framework
+will call back to the methods described in the pwm_device structure as
+users begin to configure and utilize the hardware.
+
+Note that PWM signals can be produced by a variety of peripherals,
+beyond the true "PWM hardware" offered by many system-on-chip devices.
+Other possibilities include timer/counters with compare-match
+capabilities, carefully-programmed synchronous serial ports
+(e.g. SPI), and GPIO pins driven by kernel interval timers. With a
+proper pwm_device structure, these devices and pseudo-devices can all
+be accommodated by the Generic PWM Device API framework.
+
+
+
+Using the API to Generate PWM Signals -- Basic Functions for Users
+
+
+pwm_request() -- Returns a pwm_channel pointer, which is subsequently
+passed to the other user-related PWM functions. Once requested, a PWM
+channel is marked as in-use and subsequent requests prior to
+pwm_free() will fail.
+
+The names used to refer to PWM devices are defined by driver authors.
+Typically they are platform device bus identifiers, and this
+convention is encouraged for consistency.
+
+
+pwm_free() -- Marks a PWM channel as no longer in use. The PWM device
+is stopped before it is released by the API.
+
+
+pwm_period_ns() -- Specifies the PWM signal's period, in nanoseconds.
+
+
+pwm_duty_ns() -- Specifies the PWM signal's active duration, in nanoseconds.
+
+
+pwm_duty_percent() -- Specifies the PWM signal's active duration, as a
+percentage of the current period of the signal. NOTE: this value is
+not recalculated if the period of the signal is subsequently changed.
+
+
+pwm_start(), pwm_stop() -- Turns the PWM signal on and off. Except
+where stated otherwise by a driver author, signals are stopped at the
+end of the current period, at which time the output is set to its
+inactive state.
+
+
+pwm_polarity() -- Defines whether the PWM signal output's active
+region is "1" or "0". A 10% duty-cycle, polarity=1 signal will
+conventionally be at 5V (or 3.3V, or 1000V, or whatever the platform
+hardware does) for 10% of the period. The same configuration of a
+polarity=0 signal will be at 5V (or 3.3V, or ...) for 90% of the
+period.
+
+
+
+Using the API to Generate PWM Signals -- Advanced Functions
+
+
+pwm_config() -- Passes a pwm_channel_config structure to the
+associated device driver. This function is invoked by pwm_start(),
+pwm_duty_ns(), etc. and is one of two main entry points to the PWM
+driver for the hardware being used. The configuration change is
+guaranteed atomic if multiple configuration changes are specified.
+This function might sleep, depending on what the device driver has to
+do to satisfy the request. All PWM device drivers must support this
+entry point.
+
+
+pwm_config_nosleep() -- Passes a pwm_channel_config structure to the
+associated device driver. If the driver must sleep in order to
+implement the requested configuration change, -EWOULDBLOCK is
+returned. Users may call this function from interrupt handlers, for
+example. This is the other main entry point into the PWM hardware
+driver, but not all device drivers support this entry point.
+
+
+pwm_synchronize(), pwm_unsynchronize() -- "Synchronizes" two or more
+PWM channels, if the underlying hardware permits. (If it doesn't, the
+framework facilitates emulating this capability but it is not yet
+implemented). Synchronized channels will start and stop
+simultaneously when any single channel in the group is started or
+stopped. Use pwm_unsynchronize(..., NULL) to completely detach a
+channel from any other synchronized channels. By default, all PWM
+channels are unsynchronized.
+
+
+pwm_set_handler() -- Defines an end-of-period callback. The indicated
+function will be invoked in a worker thread at the end of each PWM
+period, and can subsequently invoke pwm_config(), etc. Must be used
+with extreme care for high-speed PWM outputs. Set the handler
+function to NULL to un-set the handler.
+
+
+
+Implementing a PWM Device API Driver -- Functions for Driver Authors
+
+
+Fill out the appropriate fields in a pwm_device structure, and submit
+to pwm_register():
+
+
+bus_id -- the plain-text name of the device. Users will bind to a
+channel on the device using this name plus the channel number. For
+example, the Atmel PWMC's bus_id is "atmel_pwmc", the same as used by
+the platform device driver (recommended). The first device registered
+thereby receives bus_id "atmel_pwmc.0", which is what you put in
+pwm_device.bus_id. Channels are then named "atmel_pwmc.0:[0-3]".
+(Hint: just use pdev->dev.bus_id in your probe() method).
+
+
+nchan -- the number of distinct output channels provided by the device.
+
+
+request -- (optional) Invoked each time a user requests a channel.
+Use to turn on clocks, clean up register states, etc. The framework
+takes care of device locking/unlocking; you will see only successful
+requests.
+
+
+free -- (optional) Callback for each time a user relinquishes a
+channel. The framework will have already stopped, unsynchronized and
+un-handled the channel. Use to turn off clocks, etc. as necessary.
+
+
+synchronize, unsynchronize -- (optional) Callbacks to
+synchronize/unsynchronize channels. Some devices provide this
+capability in hardware; for others, it can be emulated (see
+atmel_pwmc.c's sync_mask for an example).
+
+
+set_callback -- (optional) Invoked when a user requests a handler. If
+the hardware supports an end-of-period interrupt, invoke the function
+indicated during your interrupt handler. The callback function itself
+is always internal to the API, and does not map directly to the user's
+callback function.
+
+
+config -- Invoked to change the device configuration, always from a
+sleep-capable context. All the changes indicated must be performed
+atomically, ideally synchronized to an end-of-period event (so that
+you avoid short or long output pulses). You may sleep, etc. as
+necessary within this function.
+
+
+config_nosleep -- (optional) Invoked to change device configuration
+from within a context that is not allowed to sleep. If you cannot
+perform the requested configuration changes without sleeping, return
+-EWOULDBLOCK.
+
+
+
+Acknowledgements
+
+
+The author expresses his gratitude to the countless developers who
+have reviewed and submitted feedback on the various versions of the
+Generic PWM Device API code, and those who have submitted drivers and
+applications that use the framework. You know who you are. ;)
+
diff --git a/target/linux/generic/files/arch/mips/fw/myloader/Makefile b/target/linux/generic/files/arch/mips/fw/myloader/Makefile
new file mode 100644
index 000000000..34acfd01c
--- /dev/null
+++ b/target/linux/generic/files/arch/mips/fw/myloader/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Compex's MyLoader support on MIPS architecture
+#
+
+lib-y += myloader.o
diff --git a/target/linux/generic/files/arch/mips/fw/myloader/myloader.c b/target/linux/generic/files/arch/mips/fw/myloader/myloader.c
new file mode 100644
index 000000000..a26f9ad3f
--- /dev/null
+++ b/target/linux/generic/files/arch/mips/fw/myloader/myloader.c
@@ -0,0 +1,63 @@
+/*
+ * Compex's MyLoader specific prom routines
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/addrspace.h>
+#include <asm/fw/myloader/myloader.h>
+
+#define SYS_PARAMS_ADDR KSEG1ADDR(0x80000800)
+#define BOARD_PARAMS_ADDR KSEG1ADDR(0x80000A00)
+#define PART_TABLE_ADDR KSEG1ADDR(0x80000C00)
+#define BOOT_PARAMS_ADDR KSEG1ADDR(0x80000E00)
+
+static struct myloader_info myloader_info __initdata;
+static int myloader_found __initdata;
+
+struct myloader_info * __init myloader_get_info(void)
+{
+ struct mylo_system_params *sysp;
+ struct mylo_board_params *boardp;
+ struct mylo_partition_table *parts;
+
+ if (myloader_found)
+ return &myloader_info;
+
+ sysp = (struct mylo_system_params *)(SYS_PARAMS_ADDR);
+ boardp = (struct mylo_board_params *)(BOARD_PARAMS_ADDR);
+ parts = (struct mylo_partition_table *)(PART_TABLE_ADDR);
+
+ printk(KERN_DEBUG "MyLoader: sysp=%08x, boardp=%08x, parts=%08x\n",
+ sysp->magic, boardp->magic, parts->magic);
+
+ /* Check for some magic numbers */
+ if (sysp->magic != MYLO_MAGIC_SYS_PARAMS ||
+ boardp->magic != MYLO_MAGIC_BOARD_PARAMS ||
+ le32_to_cpu(parts->magic) != MYLO_MAGIC_PARTITIONS)
+ return NULL;
+
+ printk(KERN_DEBUG "MyLoader: id=%04x:%04x, sub_id=%04x:%04x\n",
+ sysp->vid, sysp->did, sysp->svid, sysp->sdid);
+
+ myloader_info.vid = sysp->vid;
+ myloader_info.did = sysp->did;
+ myloader_info.svid = sysp->svid;
+ myloader_info.sdid = sysp->sdid;
+
+ memcpy(myloader_info.macs, boardp->addr, sizeof(myloader_info.macs));
+
+ myloader_found = 1;
+
+ return &myloader_info;
+}
diff --git a/target/linux/generic/files/crypto/ocf/Config.in b/target/linux/generic/files/crypto/ocf/Config.in
new file mode 100644
index 000000000..652f76e90
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/Config.in
@@ -0,0 +1,38 @@
+#############################################################################
+
+mainmenu_option next_comment
+comment 'OCF Configuration'
+tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
+dep_mbool ' enable fips RNG checks (fips check on RNG data before use)' \
+ CONFIG_OCF_FIPS $CONFIG_OCF_OCF
+dep_mbool ' enable harvesting entropy for /dev/random' \
+ CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
+dep_tristate ' cryptodev (user space support)' \
+ CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
+dep_tristate ' cryptosoft (software crypto engine)' \
+ CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
+dep_tristate ' safenet (HW crypto engine)' \
+ CONFIG_OCF_SAFE $CONFIG_OCF_OCF
+dep_tristate ' IXP4xx (HW crypto engine)' \
+ CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
+dep_mbool ' Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
+ CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
+dep_tristate ' hifn (HW crypto engine)' \
+ CONFIG_OCF_HIFN $CONFIG_OCF_OCF
+dep_tristate ' talitos (HW crypto engine)' \
+ CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
+dep_tristate ' pasemi (HW crypto engine)' \
+ CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
+dep_tristate ' ep80579 (HW crypto engine)' \
+ CONFIG_OCF_EP80579 $CONFIG_OCF_OCF
+dep_tristate ' Micronas c7108 (HW crypto engine)' \
+ CONFIG_OCF_C7108 $CONFIG_OCF_OCF
+dep_tristate ' uBsec BCM5365 (HW crypto engine)'
+ CONFIG_OCF_UBSEC_SSB $CONFIG_OCF_OCF
+dep_tristate ' ocfnull (does no crypto)' \
+ CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
+dep_tristate ' ocf-bench (HW crypto in-kernel benchmark)' \
+ CONFIG_OCF_BENCH $CONFIG_OCF_OCF
+endmenu
+
+#############################################################################
diff --git a/target/linux/generic/files/crypto/ocf/Kconfig b/target/linux/generic/files/crypto/ocf/Kconfig
new file mode 100644
index 000000000..65a446191
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/Kconfig
@@ -0,0 +1,125 @@
+menu "OCF Configuration"
+
+config OCF_OCF
+ tristate "OCF (Open Cryptograhic Framework)"
+ help
+ A linux port of the OpenBSD/FreeBSD crypto framework.
+
+config OCF_RANDOMHARVEST
+ bool "crypto random --- harvest entropy for /dev/random"
+ depends on OCF_OCF
+ help
+ Includes code to harvest random numbers from devices that support it.
+
+config OCF_FIPS
+ bool "enable fips RNG checks"
+ depends on OCF_OCF && OCF_RANDOMHARVEST
+ help
+ Run all RNG provided data through a fips check before
+ adding it /dev/random's entropy pool.
+
+config OCF_CRYPTODEV
+ tristate "cryptodev (user space support)"
+ depends on OCF_OCF
+ help
+ The user space API to access crypto hardware.
+
+config OCF_CRYPTOSOFT
+ tristate "cryptosoft (software crypto engine)"
+ depends on OCF_OCF
+ help
+ A software driver for the OCF framework that uses
+ the kernel CryptoAPI.
+
+config OCF_SAFE
+ tristate "safenet (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ A driver for a number of the safenet Excel crypto accelerators.
+ Currently tested and working on the 1141 and 1741.
+
+config OCF_IXP4XX
+ tristate "IXP4xx (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ XScale IXP4xx crypto accelerator driver. Requires the
+ Intel Access library.
+
+config OCF_IXP4XX_SHA1_MD5
+ bool "IXP4xx SHA1 and MD5 Hashing"
+ depends on OCF_IXP4XX
+ help
+ Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
+ Note: this is MUCH slower than using cryptosoft (software crypto engine).
+
+config OCF_HIFN
+ tristate "hifn (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for various HIFN based crypto accelerators.
+ (7951, 7955, 7956, 7751, 7811)
+
+config OCF_HIFNHIPP
+ tristate "Hifn HIPP (HW packet crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for various HIFN (HIPP) based crypto accelerators
+ (7855)
+
+config OCF_TALITOS
+ tristate "talitos (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for Freescale's security engine (SEC/talitos).
+
+config OCF_PASEMI
+ tristate "pasemi (HW crypto engine)"
+ depends on OCF_OCF && PPC_PASEMI
+ help
+ OCF driver for the PA Semi PWRficient DMA Engine
+
+config OCF_EP80579
+ tristate "ep80579 (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for the Intel EP80579 Integrated Processor Product Line.
+
+config OCF_CRYPTOCTEON
+ tristate "cryptocteon (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for the Cavium OCTEON Processors.
+
+config OCF_KIRKWOOD
+ tristate "kirkwood (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for the Marvell Kirkwood (88F6xxx) Processors.
+
+config OCF_C7108
+ tristate "Micronas 7108 (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for the Microna 7108 Cipher processors.
+
+config OCF_UBSEC_SSB
+ tristate "uBsec BCM5365 (HW crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for uBsec BCM5365 hardware crypto accelerator.
+
+config OCF_OCFNULL
+ tristate "ocfnull (fake crypto engine)"
+ depends on OCF_OCF
+ help
+ OCF driver for measuring ipsec overheads (does no crypto)
+
+config OCF_BENCH
+ tristate "ocf-bench (HW crypto in-kernel benchmark)"
+ depends on OCF_OCF
+ help
+ A very simple encryption test for the in-kernel interface
+ of OCF. Also includes code to benchmark the IXP Access library
+ for comparison.
+
+endmenu
diff --git a/target/linux/generic/files/crypto/ocf/Makefile b/target/linux/generic/files/crypto/ocf/Makefile
new file mode 100644
index 000000000..110ed83ea
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/Makefile
@@ -0,0 +1,148 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+OCF_OBJS = crypto.o criov.o
+
+ifdef CONFIG_OCF_RANDOMHARVEST
+ OCF_OBJS += random.o
+endif
+
+ifdef CONFIG_OCF_FIPS
+ OCF_OBJS += rndtest.o
+endif
+
+# Add in autoconf.h to get #defines for CONFIG_xxx
+AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
+ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
+ EXTRA_CFLAGS += -include $(AUTOCONF_H)
+ export EXTRA_CFLAGS
+endif
+
+ifndef obj
+ obj ?= .
+ _obj = subdir
+ mod-subdirs := safe hifn ixp4xx talitos ocfnull
+ export-objs += crypto.o criov.o random.o
+ list-multi += ocf.o
+ _slash :=
+else
+ _obj = obj
+ _slash := /
+endif
+
+EXTRA_CFLAGS += -I$(obj)/.
+
+obj-$(CONFIG_OCF_OCF) += ocf.o
+obj-$(CONFIG_OCF_CRYPTODEV) += cryptodev.o
+obj-$(CONFIG_OCF_CRYPTOSOFT) += cryptosoft.o
+obj-$(CONFIG_OCF_BENCH) += ocf-bench.o
+
+$(_obj)-$(CONFIG_OCF_SAFE) += safe$(_slash)
+$(_obj)-$(CONFIG_OCF_HIFN) += hifn$(_slash)
+$(_obj)-$(CONFIG_OCF_IXP4XX) += ixp4xx$(_slash)
+$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
+$(_obj)-$(CONFIG_OCF_PASEMI) += pasemi$(_slash)
+$(_obj)-$(CONFIG_OCF_EP80579) += ep80579$(_slash)
+$(_obj)-$(CONFIG_OCF_CRYPTOCTEON) += cryptocteon$(_slash)
+$(_obj)-$(CONFIG_OCF_KIRKWOOD) += kirkwood$(_slash)
+$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
+$(_obj)-$(CONFIG_OCF_C7108) += c7108$(_slash)
+$(_obj)-$(CONFIG_OCF_UBSEC_SSB) += ubsec_ssb$(_slash)
+
+ocf-objs := $(OCF_OBJS)
+
+dummy:
+ @echo "Please consult the README for how to build OCF."
+ @echo "If you can't wait then the following should do it:"
+ @echo ""
+ @echo " make ocf_modules"
+ @echo " sudo make ocf_install"
+ @echo ""
+ @exit 1
+
+$(list-multi) dummy1: $(ocf-objs)
+ $(LD) -r -o $@ $(ocf-objs)
+
+.PHONY:
+clean:
+ rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
+ rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
+ rm -f */modules.order */modules.builtin modules.order modules.builtin
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
+#
+# targets to build easily on the current machine
+#
+
+ocf_make:
+ make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m
+ make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_CRYPTOSOFT=m
+ -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_BENCH=m
+ -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_OCFNULL=m
+ -make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_HIFN=m
+
+ocf_modules:
+ $(MAKE) ocf_make OCF_TARGET=modules
+
+ocf_install:
+ $(MAKE) ocf_make OCF_TARGET="modules modules_install"
+ depmod
+ mkdir -p /usr/include/crypto
+ cp cryptodev.h /usr/include/crypto/.
+
+#
+# generate full kernel patches for 2.4 and 2.6 kernels to make patching
+# your kernel easier
+#
+
+.PHONY: patch
+patch:
+ patchbase=.; \
+ [ -d $$patchbase/patches ] || patchbase=..; \
+ patch=ocf-linux-base.patch; \
+ patch24=ocf-linux-24.patch; \
+ patch26=ocf-linux-26.patch; \
+ patch3=ocf-linux-3.patch; \
+ ( \
+ find . -name Makefile; \
+ find . -name Config.in; \
+ find . -name Kconfig; \
+ find . -name README; \
+ find . -name '*.[ch]' | grep -v '.mod.c'; \
+ ) | while read t; do \
+ diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
+ done > $$patch; \
+ cat $$patchbase/patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
+ cat $$patchbase/patches/linux-2.6.38-ocf.patch $$patch > $$patch26; \
+ cat $$patchbase/patches/linux-3.2.1-ocf.patch $$patch > $$patch3; \
+
+
+#
+# this target probably does nothing for anyone but me - davidm
+#
+
+.PHONY: release
+release:
+ REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
+ CURDIR=`pwd`; \
+ rm -rf /tmp/ocf-linux-$$REL*; \
+ mkdir -p $$RELDIR/ocf; \
+ mkdir -p $$RELDIR/patches; \
+ mkdir -p $$RELDIR/crypto-tools; \
+ cp README* $$RELDIR/.; \
+ cp patches/[!C]* $$RELDIR/patches/.; \
+ cp tools/[!C]* $$RELDIR/crypto-tools/.; \
+ cp -r [!C]* Config.in $$RELDIR/ocf/.; \
+ rm -rf $$RELDIR/ocf/patches $$RELDIR/ocf/tools; \
+ rm -f $$RELDIR/ocf/README*; \
+ cp $$CURDIR/../../user/crypto-tools/[!C]* $$RELDIR/crypto-tools/.; \
+ make -C $$RELDIR/crypto-tools clean; \
+ make -C $$RELDIR/ocf clean; \
+ find $$RELDIR/ocf -name CVS | xargs rm -rf; \
+ cd $$RELDIR/..; \
+ tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
+ gzip -9 ocf-linux-$$REL.tar
+
diff --git a/target/linux/generic/files/crypto/ocf/c7108/Makefile b/target/linux/generic/files/crypto/ocf/c7108/Makefile
new file mode 100644
index 000000000..e7e634b2b
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/c7108/Makefile
@@ -0,0 +1,12 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_C7108) += aes-7108.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/c7108/aes-7108.c b/target/linux/generic/files/crypto/ocf/c7108/aes-7108.c
new file mode 100644
index 000000000..f4841f555
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/c7108/aes-7108.c
@@ -0,0 +1,841 @@
+/*
+ * Copyright (C) 2006 Micronas USA
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+//#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+//#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <cryptodev.h>
+#include <uio.h>
+#include <aes-7108.h>
+
+/* Runtime mode */
+static int c7108_crypto_mode = C7108_AES_CTRL_MODE_CTR;
+//static int c7108_crypto_mode = C7108_AES_CTRL_MODE_CBC;
+
+static int32_t c7108_id = -1;
+static struct cipher_7108 **c7108_sessions = NULL;
+static u_int32_t c7108_sesnum = 0;
+static unsigned long iobar;
+
+/* Crypto entry points */
+static int c7108_process(void *, struct cryptop *, int);
+static int c7108_newsession(void *, u_int32_t *, struct cryptoini *);
+static int c7108_freesession(void *, u_int64_t);
+
+/* Globals */
+static int debug = 0;
+static spinlock_t csr_mutex;
+
+/* Generic controller-based lock */
+#define AES_LOCK()\
+ spin_lock(&csr_mutex)
+#define AES_UNLOCK()\
+ spin_unlock(&csr_mutex)
+
+/* 7108 AES register access */
+#define c7108_reg_wr8(a,d) iowrite8(d, (void*)(iobar+(a)))
+#define c7108_reg_wr16(a,d) iowrite16(d, (void*)(iobar+(a)))
+#define c7108_reg_wr32(a,d) iowrite32(d, (void*)(iobar+(a)))
+#define c7108_reg_rd8(a) ioread8((void*)(iobar+(a)))
+#define c7108_reg_rd16(a) ioread16((void*)(iobar+(a)))
+#define c7108_reg_rd32(a) ioread32((void*)(iobar+(a)))
+
+static int
+c7108_xlate_key(int klen, u8* k8ptr, u32* k32ptr)
+{
+ int i, nw=0;
+ nw = ((klen >= 256) ? 8 : (klen >= 192) ? 6 : 4);
+ for ( i = 0; i < nw; i++) {
+ k32ptr[i] = (k8ptr[i+3] << 24) | (k8ptr[i+2] << 16) |
+ (k8ptr[i+1] << 8) | k8ptr[i];
+
+ }
+ return 0;
+}
+
+static int
+c7108_cache_key(int klen, u32* k32ptr, u8* k8ptr)
+{
+ int i, nb=0;
+ u8* ptr = (u8*)k32ptr;
+ nb = ((klen >= 256) ? 32 : (klen >= 192) ? 24 : 16);
+ for ( i = 0; i < nb; i++)
+ k8ptr[i] = ptr[i];
+ return 0;
+}
+
+static int
+c7108_aes_setup_dma(u32 src, u32 dst, u32 len)
+{
+ if (len < 16) {
+ printk("len < 16\n");
+ return -10;
+ }
+ if (len % 16) {
+ printk("len not multiple of 16\n");
+ return -11;
+ }
+ c7108_reg_wr16(C7108_AES_DMA_SRC0_LO, (u16) src);
+ c7108_reg_wr16(C7108_AES_DMA_SRC0_HI, (u16)((src & 0xffff0000) >> 16));
+ c7108_reg_wr16(C7108_AES_DMA_DST0_LO, (u16) dst);
+ c7108_reg_wr16(C7108_AES_DMA_DST0_HI, (u16)((dst & 0xffff0000) >> 16));
+ c7108_reg_wr16(C7108_AES_DMA_LEN, (u16) ((len / 16) - 1));
+
+ return 0;
+}
+
+static int
+c7108_aes_set_hw_iv(u8 iv[16])
+{
+ c7108_reg_wr16(C7108_AES_IV0_LO, (u16) ((iv[1] << 8) | iv[0]));
+ c7108_reg_wr16(C7108_AES_IV0_HI, (u16) ((iv[3] << 8) | iv[2]));
+ c7108_reg_wr16(C7108_AES_IV1_LO, (u16) ((iv[5] << 8) | iv[4]));
+ c7108_reg_wr16(C7108_AES_IV1_HI, (u16) ((iv[7] << 8) | iv[6]));
+ c7108_reg_wr16(C7108_AES_IV2_LO, (u16) ((iv[9] << 8) | iv[8]));
+ c7108_reg_wr16(C7108_AES_IV2_HI, (u16) ((iv[11] << 8) | iv[10]));
+ c7108_reg_wr16(C7108_AES_IV3_LO, (u16) ((iv[13] << 8) | iv[12]));
+ c7108_reg_wr16(C7108_AES_IV3_HI, (u16) ((iv[15] << 8) | iv[14]));
+
+ return 0;
+}
+
+static void
+c7108_aes_read_dkey(u32 * dkey)
+{
+ dkey[0] = (c7108_reg_rd16(C7108_AES_EKEY0_HI) << 16) |
+ c7108_reg_rd16(C7108_AES_EKEY0_LO);
+ dkey[1] = (c7108_reg_rd16(C7108_AES_EKEY1_HI) << 16) |
+ c7108_reg_rd16(C7108_AES_EKEY1_LO);
+ dkey[2] = (c7108_reg_rd16(C7108_AES_EKEY2_HI) << 16) |
+ c7108_reg_rd16(C7108_AES_EKEY2_LO);
+ dkey[3] = (c7108_reg_rd16(C7108_AES_EKEY3_HI) << 16) |
+ c7108_reg_rd16(C7108_AES_EKEY3_LO);
+ dkey[4] = (c7108_reg_rd16(C7108_AES_EKEY4_HI) << 16) |
+ c7108_reg_rd16(C7108_AES_EKEY4_LO);
+ dkey[5] = (c7108_reg_rd16(C7108_AES_EKEY5_HI) << 16) |
+ c7108_reg_rd16(C7108_AES_EKEY5_LO);
+ dkey[6] = (c7108_reg_rd16(C7108_AES_EKEY6_HI) << 16) |
+ c7108_reg_rd16(C7108_AES_EKEY6_LO);
+ dkey[7] = (c7108_reg_rd16(C7108_AES_EKEY7_HI) << 16) |
+ c7108_reg_rd16(C7108_AES_EKEY7_LO);
+}
+
+static int
+c7108_aes_cipher(int op,
+ u32 dst,
+ u32 src,
+ u32 len,
+ int klen,
+ u16 mode,
+ u32 key[8],
+ u8 iv[16])
+{
+ int rv = 0, cnt=0;
+ u16 ctrl = 0, stat = 0;
+
+ AES_LOCK();
+
+ /* Setup key length */
+ if (klen == 128) {
+ ctrl |= C7108_AES_KEY_LEN_128;
+ } else if (klen == 192) {
+ ctrl |= C7108_AES_KEY_LEN_192;
+ } else if (klen == 256) {
+ ctrl |= C7108_AES_KEY_LEN_256;
+ } else {
+ AES_UNLOCK();
+ return -3;
+ }
+
+ /* Check opcode */
+ if (C7108_AES_ENCRYPT == op) {
+ ctrl |= C7108_AES_ENCRYPT;
+ } else if (C7108_AES_DECRYPT == op) {
+ ctrl |= C7108_AES_DECRYPT;
+ } else {
+ AES_UNLOCK();
+ return -4;
+ }
+
+ /* check mode */
+ if ( (mode != C7108_AES_CTRL_MODE_CBC) &&
+ (mode != C7108_AES_CTRL_MODE_CFB) &&
+ (mode != C7108_AES_CTRL_MODE_OFB) &&
+ (mode != C7108_AES_CTRL_MODE_CTR) &&
+ (mode != C7108_AES_CTRL_MODE_ECB) ) {
+ AES_UNLOCK();
+ return -5;
+ }
+
+ /* Now set mode */
+ ctrl |= mode;
+
+ /* For CFB, OFB, and CTR, neither backward key
+ * expansion nor key inversion is required.
+ */
+ if ( (C7108_AES_DECRYPT == op) &&
+ (C7108_AES_CTRL_MODE_CBC == mode ||
+ C7108_AES_CTRL_MODE_ECB == mode ) ){
+
+ /* Program Key */
+ c7108_reg_wr16(C7108_AES_KEY0_LO, (u16) key[4]);
+ c7108_reg_wr16(C7108_AES_KEY0_HI, (u16) (key[4] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY1_LO, (u16) key[5]);
+ c7108_reg_wr16(C7108_AES_KEY1_HI, (u16) (key[5] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY2_LO, (u16) key[6]);
+ c7108_reg_wr16(C7108_AES_KEY2_HI, (u16) (key[6] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY3_LO, (u16) key[7]);
+ c7108_reg_wr16(C7108_AES_KEY3_HI, (u16) (key[7] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY6_LO, (u16) key[2]);
+ c7108_reg_wr16(C7108_AES_KEY6_HI, (u16) (key[2] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY7_LO, (u16) key[3]);
+ c7108_reg_wr16(C7108_AES_KEY7_HI, (u16) (key[3] >> 16));
+
+
+ if (192 == klen) {
+ c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[7]);
+ c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[7] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[7]);
+ c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[7] >> 16));
+
+ } else if (256 == klen) {
+ /* 256 */
+ c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[0]);
+ c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[0] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[1]);
+ c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[1] >> 16));
+
+ }
+
+ } else {
+ /* Program Key */
+ c7108_reg_wr16(C7108_AES_KEY0_LO, (u16) key[0]);
+ c7108_reg_wr16(C7108_AES_KEY0_HI, (u16) (key[0] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY1_LO, (u16) key[1]);
+ c7108_reg_wr16(C7108_AES_KEY1_HI, (u16) (key[1] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY2_LO, (u16) key[2]);
+ c7108_reg_wr16(C7108_AES_KEY2_HI, (u16) (key[2] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY3_LO, (u16) key[3]);
+ c7108_reg_wr16(C7108_AES_KEY3_HI, (u16) (key[3] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[4]);
+ c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[4] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[5]);
+ c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[5] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY6_LO, (u16) key[6]);
+ c7108_reg_wr16(C7108_AES_KEY6_HI, (u16) (key[6] >> 16));
+ c7108_reg_wr16(C7108_AES_KEY7_LO, (u16) key[7]);
+ c7108_reg_wr16(C7108_AES_KEY7_HI, (u16) (key[7] >> 16));
+
+ }
+
+ /* Set IV always */
+ c7108_aes_set_hw_iv(iv);
+
+ /* Program DMA addresses */
+ if ((rv = c7108_aes_setup_dma(src, dst, len)) < 0) {
+ AES_UNLOCK();
+ return rv;
+ }
+
+
+ /* Start AES cipher */
+ c7108_reg_wr16(C7108_AES_CTRL, ctrl | C7108_AES_GO);
+
+ //printk("Ctrl: 0x%x\n", ctrl | C7108_AES_GO);
+ do {
+ /* TODO: interrupt mode */
+ // printk("aes_stat=0x%x\n", stat);
+ //udelay(100);
+ } while ((cnt++ < 1000000) &&
+ !((stat=c7108_reg_rd16(C7108_AES_CTRL))&C7108_AES_OP_DONE));
+
+
+ if ((mode == C7108_AES_CTRL_MODE_ECB)||
+ (mode == C7108_AES_CTRL_MODE_CBC)) {
+ /* Save out key when the lock is held ... */
+ c7108_aes_read_dkey(key);
+ }
+
+ AES_UNLOCK();
+ return 0;
+
+}
+
+/*
+ * Generate a new crypto device session.
+ */
+static int
+c7108_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
+{
+ struct cipher_7108 **swd;
+ u_int32_t i;
+ char *algo;
+ int mode, xfm_type;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid == NULL || cri == NULL) {
+ dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ if (c7108_sessions) {
+ for (i = 1; i < c7108_sesnum; i++)
+ if (c7108_sessions[i] == NULL)
+ break;
+ } else
+ i = 1; /* NB: to silence compiler warning */
+
+ if (c7108_sessions == NULL || i == c7108_sesnum) {
+ if (c7108_sessions == NULL) {
+ i = 1; /* We leave c7108_sessions[0] empty */
+ c7108_sesnum = CRYPTO_SW_SESSIONS;
+ } else
+ c7108_sesnum *= 2;
+
+ swd = kmalloc(c7108_sesnum * sizeof(struct cipher_7108 *),
+ GFP_ATOMIC);
+ if (swd == NULL) {
+ /* Reset session number */
+ if (c7108_sesnum == CRYPTO_SW_SESSIONS)
+ c7108_sesnum = 0;
+ else
+ c7108_sesnum /= 2;
+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ memset(swd, 0, c7108_sesnum * sizeof(struct cipher_7108 *));
+
+ /* Copy existing sessions */
+ if (c7108_sessions) {
+ memcpy(swd, c7108_sessions,
+ (c7108_sesnum / 2) * sizeof(struct cipher_7108 *));
+ kfree(c7108_sessions);
+ }
+
+ c7108_sessions = swd;
+
+ }
+
+ swd = &c7108_sessions[i];
+ *sid = i;
+
+ while (cri) {
+ *swd = (struct cipher_7108 *)
+ kmalloc(sizeof(struct cipher_7108), GFP_ATOMIC);
+ if (*swd == NULL) {
+ c7108_freesession(NULL, i);
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ memset(*swd, 0, sizeof(struct cipher_7108));
+
+ algo = NULL;
+ mode = 0;
+ xfm_type = HW_TYPE_CIPHER;
+
+ switch (cri->cri_alg) {
+
+ case CRYPTO_AES_CBC:
+ algo = "aes";
+ mode = CRYPTO_TFM_MODE_CBC;
+ c7108_crypto_mode = C7108_AES_CTRL_MODE_CBC;
+ break;
+#if 0
+ case CRYPTO_AES_CTR:
+ algo = "aes_ctr";
+ mode = CRYPTO_TFM_MODE_CBC;
+ c7108_crypto_mode = C7108_AES_CTRL_MODE_CTR;
+ break;
+ case CRYPTO_AES_ECB:
+ algo = "aes_ecb";
+ mode = CRYPTO_TFM_MODE_CBC;
+ c7108_crypto_mode = C7108_AES_CTRL_MODE_ECB;
+ break;
+ case CRYPTO_AES_OFB:
+ algo = "aes_ofb";
+ mode = CRYPTO_TFM_MODE_CBC;
+ c7108_crypto_mode = C7108_AES_CTRL_MODE_OFB;
+ break;
+ case CRYPTO_AES_CFB:
+ algo = "aes_cfb";
+ mode = CRYPTO_TFM_MODE_CBC;
+ c7108_crypto_mode = C7108_AES_CTRL_MODE_CFB;
+ break;
+#endif
+ default:
+ printk("unsupported crypto algorithm: %d\n",
+ cri->cri_alg);
+ return -EINVAL;
+ break;
+ }
+
+
+ if (!algo || !*algo) {
+ printk("cypher_7108_crypto: Unknown algo 0x%x\n",
+ cri->cri_alg);
+ c7108_freesession(NULL, i);
+ return EINVAL;
+ }
+
+ if (xfm_type == HW_TYPE_CIPHER) {
+ if (debug) {
+ dprintk("%s key:", __FUNCTION__);
+ for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
+ dprintk("%s0x%02x", (i % 8) ? " " : "\n ",
+ cri->cri_key[i]);
+ dprintk("\n");
+ }
+
+ } else if (xfm_type == SW_TYPE_HMAC ||
+ xfm_type == SW_TYPE_HASH) {
+ printk("cypher_7108_crypto: HMAC unsupported!\n");
+ return -EINVAL;
+ c7108_freesession(NULL, i);
+ } else {
+ printk("cypher_7108_crypto: "
+ "Unhandled xfm_type %d\n", xfm_type);
+ c7108_freesession(NULL, i);
+ return EINVAL;
+ }
+
+ (*swd)->cri_alg = cri->cri_alg;
+ (*swd)->xfm_type = xfm_type;
+
+ cri = cri->cri_next;
+ swd = &((*swd)->next);
+ }
+ return 0;
+}
+
+/*
+ * Free a session.
+ */
+static int
+c7108_freesession(void *arg, u_int64_t tid)
+{
+ struct cipher_7108 *swd;
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid > c7108_sesnum || c7108_sessions == NULL ||
+ c7108_sessions[sid] == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return(EINVAL);
+ }
+
+ /* Silently accept and return */
+ if (sid == 0)
+ return(0);
+
+ while ((swd = c7108_sessions[sid]) != NULL) {
+ c7108_sessions[sid] = swd->next;
+ kfree(swd);
+ }
+ return 0;
+}
+
+/*
+ * Process a hardware request.
+ */
+static int
+c7108_process(void *arg, struct cryptop *crp, int hint)
+{
+ struct cryptodesc *crd;
+ struct cipher_7108 *sw;
+ u_int32_t lid;
+ int type;
+ u32 hwkey[8];
+
+#define SCATTERLIST_MAX 16
+ struct scatterlist sg[SCATTERLIST_MAX];
+ int sg_num, sg_len, skip;
+ struct sk_buff *skb = NULL;
+ struct uio *uiop = NULL;
+
+ dprintk("%s()\n", __FUNCTION__);
+ /* Sanity check */
+ if (crp == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ crp->crp_etype = 0;
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ lid = crp->crp_sid & 0xffffffff;
+ if (lid >= c7108_sesnum || lid == 0 || c7108_sessions == NULL ||
+ c7108_sessions[lid] == NULL) {
+ crp->crp_etype = ENOENT;
+ dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ /*
+ * do some error checking outside of the loop for SKB and IOV
+ * processing this leaves us with valid skb or uiop pointers
+ * for later
+ */
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ skb = (struct sk_buff *) crp->crp_buf;
+ if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
+ printk("%s,%d: %d nr_frags > SCATTERLIST_MAX",
+ __FILE__, __LINE__,
+ skb_shinfo(skb)->nr_frags);
+ goto done;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ uiop = (struct uio *) crp->crp_buf;
+ if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
+ printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX",
+ __FILE__, __LINE__,
+ uiop->uio_iovcnt);
+ goto done;
+ }
+ }
+
+ /* Go through crypto descriptors, processing as we go */
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+ /*
+ * Find the crypto context.
+ *
+ * XXX Note that the logic here prevents us from having
+ * XXX the same algorithm multiple times in a session
+ * XXX (or rather, we can but it won't give us the right
+ * XXX results). To do that, we'd need some way of differentiating
+ * XXX between the various instances of an algorithm (so we can
+ * XXX locate the correct crypto context).
+ */
+ for (sw = c7108_sessions[lid];
+ sw && sw->cri_alg != crd->crd_alg;
+ sw = sw->next)
+ ;
+
+ /* No such context ? */
+ if (sw == NULL) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ skip = crd->crd_skip;
+
+ /*
+ * setup the SG list skip from the start of the buffer
+ */
+ memset(sg, 0, sizeof(sg));
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ int i, len;
+ type = CRYPTO_BUF_SKBUF;
+
+ sg_num = 0;
+ sg_len = 0;
+
+ if (skip < skb_headlen(skb)) {
+ //sg[sg_num].page = virt_to_page(skb->data + skip);
+ //sg[sg_num].offset = offset_in_page(skb->data + skip);
+ len = skb_headlen(skb) - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ //sg[sg_num].length = len;
+ sg_set_page(&sg[sg_num], virt_to_page(skb->data + skip), len, offset_in_page(skb->data + skip));
+ sg_len += sg[sg_num].length;
+ sg_num++;
+ skip = 0;
+ } else
+ skip -= skb_headlen(skb);
+
+ for (i = 0; sg_len < crd->crd_len &&
+ i < skb_shinfo(skb)->nr_frags &&
+ sg_num < SCATTERLIST_MAX; i++) {
+ if (skip < skb_shinfo(skb)->frags[i].size) {
+ //sg[sg_num].page = skb_frag_page(&skb_shinfo(skb)->frags[i]);
+ //sg[sg_num].offset = skb_shinfo(skb)->frags[i].page_offset + skip;
+ len = skb_shinfo(skb)->frags[i].size - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ //sg[sg_num].length = len;
+ sg_set_page(&sg[sg_num], skb_frag_page(&skb_shinfo(skb)->frags[i]), len, skb_shinfo(skb)->frags[i].page_offset + skip);
+ sg_len += sg[sg_num].length;
+ sg_num++;
+ skip = 0;
+ } else
+ skip -= skb_shinfo(skb)->frags[i].size;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ int len;
+ type = CRYPTO_BUF_IOV;
+ sg_len = 0;
+ for (sg_num = 0; sg_len < crd->crd_len &&
+ sg_num < uiop->uio_iovcnt &&
+ sg_num < SCATTERLIST_MAX; sg_num++) {
+ if (skip < uiop->uio_iov[sg_num].iov_len) {
+ //sg[sg_num].page = virt_to_page(uiop->uio_iov[sg_num].iov_base+skip);
+ //sg[sg_num].offset = offset_in_page(uiop->uio_iov[sg_num].iov_base+skip);
+ len = uiop->uio_iov[sg_num].iov_len - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ //sg[sg_num].length = len;
+ sg_set_page(&sg[sg_num], virt_to_page(uiop->uio_iov[sg_num].iov_base+skip), len, offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
+ sg_len += sg[sg_num].length;
+ skip = 0;
+ } else
+ skip -= uiop->uio_iov[sg_num].iov_len;
+ }
+ } else {
+ type = CRYPTO_BUF_CONTIG;
+ //sg[0].page = virt_to_page(crp->crp_buf + skip);
+ //sg[0].offset = offset_in_page(crp->crp_buf + skip);
+ sg_len = (crp->crp_ilen - skip);
+ if (sg_len > crd->crd_len)
+ sg_len = crd->crd_len;
+ //sg[0].length = sg_len;
+ sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip), sg_len, offset_in_page(crp->crp_buf + skip));
+ sg_num = 1;
+ }
+ if (sg_num > 0)
+ sg_mark_end(&sg[sg_num-1]);
+
+
+ switch (sw->xfm_type) {
+
+ case HW_TYPE_CIPHER: {
+
+ unsigned char iv[64];
+ unsigned char *ivp = iv;
+ int i;
+ int ivsize = 16; /* fixed for AES */
+ int blocksize = 16; /* fixed for AES */
+
+ if (sg_len < blocksize) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL len %d < %d\n",
+ __FILE__, __LINE__,
+ sg_len,
+ blocksize);
+ goto done;
+ }
+
+ if (ivsize > sizeof(iv)) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+ ivp = crd->crd_iv;
+ } else {
+ get_random_bytes(ivp, ivsize);
+ }
+ /*
+ * do we have to copy the IV back to the buffer ?
+ */
+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ crypto_copyback(crp->crp_buf,
+ crd->crd_inject,
+ ivsize,
+ (caddr_t)ivp);
+ }
+
+ c7108_xlate_key(crd->crd_klen,
+ (u8*)crd->crd_key, (u32*)hwkey);
+
+ /* Encrypt SG list */
+ for (i = 0; i < sg_num; i++) {
+ sg[i].dma_address =
+ dma_map_single(NULL,
+ kmap(sg_page(&sg[i])) + sg[i].offset, sg_len, DMA_BIDIRECTIONAL);
+#if 0
+ printk("sg[%d]:0x%08x, off 0x%08x "
+ "kmap 0x%08x phys 0x%08x\n",
+ i, sg[i].page, sg[i].offset,
+ kmap(sg[i].page) + sg[i].offset,
+ sg[i].dma_address);
+#endif
+ c7108_aes_cipher(C7108_AES_ENCRYPT,
+ sg[i].dma_address,
+ sg[i].dma_address,
+ sg_len,
+ crd->crd_klen,
+ c7108_crypto_mode,
+ hwkey,
+ ivp);
+
+ if ((c7108_crypto_mode == C7108_AES_CTRL_MODE_CBC)||
+ (c7108_crypto_mode == C7108_AES_CTRL_MODE_ECB)) {
+ /* Read back expanded key and cache it in key
+ * context.
+ * NOTE: for ECB/CBC modes only (not CTR, CFB, OFB)
+ * where you set the key once.
+ */
+ c7108_cache_key(crd->crd_klen,
+ (u32*)hwkey, (u8*)crd->crd_key);
+#if 0
+ printk("%s expanded key:", __FUNCTION__);
+ for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
+ printk("%s0x%02x", (i % 8) ? " " : "\n ",
+ crd->crd_key[i]);
+ printk("\n");
+#endif
+ }
+ }
+ }
+ else { /*decrypt */
+
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+ ivp = crd->crd_iv;
+ } else {
+ crypto_copydata(crp->crp_buf, crd->crd_inject,
+ ivsize, (caddr_t)ivp);
+ }
+
+ c7108_xlate_key(crd->crd_klen,
+ (u8*)crd->crd_key, (u32*)hwkey);
+
+ /* Decrypt SG list */
+ for (i = 0; i < sg_num; i++) {
+ sg[i].dma_address =
+ dma_map_single(NULL,
+ kmap(sg_page(&sg[i])) + sg[i].offset,
+ sg_len, DMA_BIDIRECTIONAL);
+
+#if 0
+ printk("sg[%d]:0x%08x, off 0x%08x "
+ "kmap 0x%08x phys 0x%08x\n",
+ i, sg[i].page, sg[i].offset,
+ kmap(sg[i].page) + sg[i].offset,
+ sg[i].dma_address);
+#endif
+ c7108_aes_cipher(C7108_AES_DECRYPT,
+ sg[i].dma_address,
+ sg[i].dma_address,
+ sg_len,
+ crd->crd_klen,
+ c7108_crypto_mode,
+ hwkey,
+ ivp);
+ }
+ }
+ } break;
+ case SW_TYPE_HMAC:
+ case SW_TYPE_HASH:
+ crp->crp_etype = EINVAL;
+ goto done;
+ break;
+
+ case SW_TYPE_COMP:
+ crp->crp_etype = EINVAL;
+ goto done;
+ break;
+
+ default:
+ /* Unknown/unsupported algorithm */
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ crypto_done(crp);
+ return 0;
+}
+
+static struct {
+ softc_device_decl sc_dev;
+} a7108dev;
+
+static device_method_t a7108_methods = {
+/* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, c7108_newsession),
+ DEVMETHOD(cryptodev_freesession, c7108_freesession),
+ DEVMETHOD(cryptodev_process, c7108_process),
+ DEVMETHOD(cryptodev_kprocess, NULL)
+};
+
+static int
+cypher_7108_crypto_init(void)
+{
+ dprintk("%s(%p)\n", __FUNCTION__, cypher_7108_crypto_init);
+
+ iobar = (unsigned long)ioremap(CCU_AES_REG_BASE, 0x4000);
+ printk("7108: AES @ 0x%08x (0x%08x phys) %s mode\n",
+ iobar, CCU_AES_REG_BASE,
+ c7108_crypto_mode & C7108_AES_CTRL_MODE_CBC ? "CBC" :
+ c7108_crypto_mode & C7108_AES_CTRL_MODE_ECB ? "ECB" :
+ c7108_crypto_mode & C7108_AES_CTRL_MODE_CTR ? "CTR" :
+ c7108_crypto_mode & C7108_AES_CTRL_MODE_CFB ? "CFB" :
+ c7108_crypto_mode & C7108_AES_CTRL_MODE_OFB ? "OFB" : "???");
+ csr_mutex = SPIN_LOCK_UNLOCKED;
+
+ memset(&a7108dev, 0, sizeof(a7108dev));
+ softc_device_init(&a7108dev, "aes7108", 0, a7108_methods);
+
+ c7108_id = crypto_get_driverid(softc_get_device(&a7108dev), CRYPTOCAP_F_HARDWARE);
+ if (c7108_id < 0)
+ panic("7108: crypto device cannot initialize!");
+
+// crypto_register(c7108_id, CRYPTO_AES_CBC, 0, 0, c7108_newsession, c7108_freesession, c7108_process, NULL);
+ crypto_register(c7108_id, CRYPTO_AES_CBC, 0, 0);
+
+ return(0);
+}
+
+static void
+cypher_7108_crypto_exit(void)
+{
+ dprintk("%s()\n", __FUNCTION__);
+ crypto_unregister_all(c7108_id);
+ c7108_id = -1;
+}
+
+module_init(cypher_7108_crypto_init);
+module_exit(cypher_7108_crypto_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Cypher 7108 Crypto (OCF module for kernel crypto)");
diff --git a/target/linux/generic/files/crypto/ocf/c7108/aes-7108.h b/target/linux/generic/files/crypto/ocf/c7108/aes-7108.h
new file mode 100644
index 000000000..0c7bfcbb8
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/c7108/aes-7108.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2006 Micronas USA
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef __AES_7108_H__
+#define __AES_7108_H__
+
+/* Cypher 7108 AES Controller Hardware */
+#define CCU_REG_BASE 0x1b500000
+#define CCU_AES_REG_BASE (CCU_REG_BASE + 0x100)
+#define C7108_AES_KEY0_LO (0x0000)
+#define C7108_AES_KEY0_HI (0x0004)
+#define C7108_AES_KEY1_LO (0x0008)
+#define C7108_AES_KEY1_HI (0x000c)
+#define C7108_AES_KEY2_LO (0x0010)
+#define C7108_AES_KEY2_HI (0x0014)
+#define C7108_AES_KEY3_LO (0x0018)
+#define C7108_AES_KEY3_HI (0x001c)
+#define C7108_AES_KEY4_LO (0x0020)
+#define C7108_AES_KEY4_HI (0x0024)
+#define C7108_AES_KEY5_LO (0x0028)
+#define C7108_AES_KEY5_HI (0x002c)
+#define C7108_AES_KEY6_LO (0x0030)
+#define C7108_AES_KEY6_HI (0x0034)
+#define C7108_AES_KEY7_LO (0x0038)
+#define C7108_AES_KEY7_HI (0x003c)
+#define C7108_AES_IV0_LO (0x0040)
+#define C7108_AES_IV0_HI (0x0044)
+#define C7108_AES_IV1_LO (0x0048)
+#define C7108_AES_IV1_HI (0x004c)
+#define C7108_AES_IV2_LO (0x0050)
+#define C7108_AES_IV2_HI (0x0054)
+#define C7108_AES_IV3_LO (0x0058)
+#define C7108_AES_IV3_HI (0x005c)
+
+#define C7108_AES_DMA_SRC0_LO (0x0068) /* Bits 0:15 */
+#define C7108_AES_DMA_SRC0_HI (0x006c) /* Bits 27:16 */
+#define C7108_AES_DMA_DST0_LO (0x0070) /* Bits 0:15 */
+#define C7108_AES_DMA_DST0_HI (0x0074) /* Bits 27:16 */
+#define C7108_AES_DMA_LEN (0x0078) /*Bytes:(Count+1)x16 */
+
+/* AES/Copy engine control register */
+#define C7108_AES_CTRL (0x007c) /* AES control */
+#define C7108_AES_CTRL_RS (1<<0) /* Which set of src/dst to use */
+
+/* AES Cipher mode, controlled by setting Bits 2:0 */
+#define C7108_AES_CTRL_MODE_CBC 0
+#define C7108_AES_CTRL_MODE_CFB (1<<0)
+#define C7108_AES_CTRL_MODE_OFB (1<<1)
+#define C7108_AES_CTRL_MODE_CTR ((1<<0)|(1<<1))
+#define C7108_AES_CTRL_MODE_ECB (1<<2)
+
+/* AES Key length , Bits 5:4 */
+#define C7108_AES_KEY_LEN_128 0 /* 00 */
+#define C7108_AES_KEY_LEN_192 (1<<4) /* 01 */
+#define C7108_AES_KEY_LEN_256 (1<<5) /* 10 */
+
+/* AES Operation (crypt/decrypt), Bit 3 */
+#define C7108_AES_DECRYPT (1<<3) /* Clear for encrypt */
+#define C7108_AES_ENCRYPT 0
+#define C7108_AES_INTR (1<<13) /* Set on done trans from 0->1*/
+#define C7108_AES_GO (1<<14) /* Run */
+#define C7108_AES_OP_DONE (1<<15) /* Set when complete */
+
+
+/* Expanded key registers */
+#define C7108_AES_EKEY0_LO (0x0080)
+#define C7108_AES_EKEY0_HI (0x0084)
+#define C7108_AES_EKEY1_LO (0x0088)
+#define C7108_AES_EKEY1_HI (0x008c)
+#define C7108_AES_EKEY2_LO (0x0090)
+#define C7108_AES_EKEY2_HI (0x0094)
+#define C7108_AES_EKEY3_LO (0x0098)
+#define C7108_AES_EKEY3_HI (0x009c)
+#define C7108_AES_EKEY4_LO (0x00a0)
+#define C7108_AES_EKEY4_HI (0x00a4)
+#define C7108_AES_EKEY5_LO (0x00a8)
+#define C7108_AES_EKEY5_HI (0x00ac)
+#define C7108_AES_EKEY6_LO (0x00b0)
+#define C7108_AES_EKEY6_HI (0x00b4)
+#define C7108_AES_EKEY7_LO (0x00b8)
+#define C7108_AES_EKEY7_HI (0x00bc)
+#define C7108_AES_OK (0x00fc) /* Reset: "OK" */
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+/* Software session entry */
+
+#define HW_TYPE_CIPHER 0
+#define SW_TYPE_HMAC 1
+#define SW_TYPE_AUTH2 2
+#define SW_TYPE_HASH 3
+#define SW_TYPE_COMP 4
+
+struct cipher_7108 {
+ int xfm_type;
+ int cri_alg;
+ union {
+ struct {
+ char sw_key[HMAC_BLOCK_LEN];
+ int sw_klen;
+ int sw_authlen;
+ } hmac;
+ } u;
+ struct cipher_7108 *next;
+};
+
+
+
+#endif /* __C7108_AES_7108_H__ */
diff --git a/target/linux/generic/files/crypto/ocf/criov.c b/target/linux/generic/files/crypto/ocf/criov.c
new file mode 100644
index 000000000..a8c1a8c4d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/criov.c
@@ -0,0 +1,215 @@
+/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
+
+/*
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 1999 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+
+#include <uio.h>
+#include <cryptodev.h>
+
+/*
+ * This macro is only for avoiding code duplication, as we need to skip
+ * given number of bytes in the same way in three functions below.
+ */
+#define CUIO_SKIP() do { \
+ KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
+ KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
+ while (off > 0) { \
+ KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
+ if (off < iov->iov_len) \
+ break; \
+ off -= iov->iov_len; \
+ iol--; \
+ iov++; \
+ } \
+} while (0)
+
+void
+cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+ unsigned count;
+
+ CUIO_SKIP();
+ while (len > 0) {
+ KASSERT(iol >= 0, ("%s: empty", __func__));
+ count = min((int)(iov->iov_len - off), len);
+ memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ iol--;
+ iov++;
+ }
+}
+
+void
+cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+ unsigned count;
+
+ CUIO_SKIP();
+ while (len > 0) {
+ KASSERT(iol >= 0, ("%s: empty", __func__));
+ count = min((int)(iov->iov_len - off), len);
+ memcpy(((caddr_t)iov->iov_base) + off, cp, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ iol--;
+ iov++;
+ }
+}
+
+/*
+ * Return a pointer to iov/offset of location in iovec list.
+ */
+struct iovec *
+cuio_getptr(struct uio *uio, int loc, int *off)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+
+ while (loc >= 0) {
+ /* Normal end of search */
+ if (loc < iov->iov_len) {
+ *off = loc;
+ return (iov);
+ }
+
+ loc -= iov->iov_len;
+ if (iol == 0) {
+ if (loc == 0) {
+ /* Point at the end of valid data */
+ *off = iov->iov_len;
+ return (iov);
+ } else
+ return (NULL);
+ } else {
+ iov++, iol--;
+ }
+ }
+
+ return (NULL);
+}
+
+EXPORT_SYMBOL(cuio_copyback);
+EXPORT_SYMBOL(cuio_copydata);
+EXPORT_SYMBOL(cuio_getptr);
+
+static void
+skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
+{
+ int i;
+ if (offset < skb_headlen(skb)) {
+ memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
+ len -= skb_headlen(skb);
+ cp += skb_headlen(skb);
+ }
+ offset -= skb_headlen(skb);
+ for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
+ if (offset < skb_shinfo(skb)->frags[i].size) {
+ memcpy(page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+ skb_shinfo(skb)->frags[i].page_offset,
+ cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
+ len -= skb_shinfo(skb)->frags[i].size;
+ cp += skb_shinfo(skb)->frags[i].size;
+ }
+ offset -= skb_shinfo(skb)->frags[i].size;
+ }
+}
+
+void
+crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
+{
+
+ if ((flags & CRYPTO_F_SKBUF) != 0)
+ skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ cuio_copyback((struct uio *)buf, off, size, in);
+ else
+ bcopy(in, buf + off, size);
+}
+
+void
+crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
+{
+
+ if ((flags & CRYPTO_F_SKBUF) != 0)
+ skb_copy_bits((struct sk_buff *)buf, off, out, size);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ cuio_copydata((struct uio *)buf, off, size, out);
+ else
+ bcopy(buf + off, out, size);
+}
+
+int
+crypto_apply(int flags, caddr_t buf, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg)
+{
+#if 0
+ int error;
+
+ if ((flags & CRYPTO_F_SKBUF) != 0)
+ error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ error = cuio_apply((struct uio *)buf, off, len, f, arg);
+ else
+ error = (*f)(arg, buf + off, len);
+ return (error);
+#else
+ KASSERT(0, ("crypto_apply not implemented!\n"));
+#endif
+ return 0;
+}
+
+EXPORT_SYMBOL(crypto_copyback);
+EXPORT_SYMBOL(crypto_copydata);
+EXPORT_SYMBOL(crypto_apply);
+
diff --git a/target/linux/generic/files/crypto/ocf/crypto.c b/target/linux/generic/files/crypto/ocf/crypto.c
new file mode 100644
index 000000000..f48210d06
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/crypto.c
@@ -0,0 +1,1766 @@
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
+ *
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
+#endif
+
+/*
+ * Cryptographic Subsystem.
+ *
+ * This code is derived from the Openbsd Cryptographic Framework (OCF)
+ * that has the copyright shown below. Very little of the original
+ * code remains.
+ */
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000, 2001 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ *
+__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
+ */
+
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,4)
+#include <linux/kthread.h>
+#endif
+#include <cryptodev.h>
+
+/*
+ * keep track of whether or not we have been initialised, a big
+ * issue if we are linked into the kernel and a driver gets started before
+ * us
+ */
+static int crypto_initted = 0;
+
+/*
+ * Crypto drivers register themselves by allocating a slot in the
+ * crypto_drivers table with crypto_get_driverid() and then registering
+ * each algorithm they support with crypto_register() and crypto_kregister().
+ */
+
+/*
+ * lock on driver table
+ * we track its state as spin_is_locked does not do anything on non-SMP boxes
+ */
+static spinlock_t crypto_drivers_lock;
+static int crypto_drivers_locked; /* for non-SMP boxes */
+
+#define CRYPTO_DRIVER_LOCK() \
+ ({ \
+ spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
+ crypto_drivers_locked = 1; \
+ dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
+ })
+#define CRYPTO_DRIVER_UNLOCK() \
+ ({ \
+ dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
+ crypto_drivers_locked = 0; \
+ spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
+ })
+#define CRYPTO_DRIVER_ASSERT() \
+ ({ \
+ if (!crypto_drivers_locked) { \
+ dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
+ } \
+ })
+
+/*
+ * Crypto device/driver capabilities structure.
+ *
+ * Synchronization:
+ * (d) - protected by CRYPTO_DRIVER_LOCK()
+ * (q) - protected by CRYPTO_Q_LOCK()
+ * Not tagged fields are read-only.
+ */
+struct cryptocap {
+ device_t cc_dev; /* (d) device/driver */
+ u_int32_t cc_sessions; /* (d) # of sessions */
+ u_int32_t cc_koperations; /* (d) # os asym operations */
+ /*
+ * Largest possible operator length (in bits) for each type of
+ * encryption algorithm. XXX not used
+ */
+ u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
+ u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
+ u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
+
+ int cc_flags; /* (d) flags */
+#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
+ int cc_qblocked; /* (q) symmetric q blocked */
+ int cc_kqblocked; /* (q) asymmetric q blocked */
+
+ int cc_unqblocked; /* (q) symmetric q blocked */
+ int cc_unkqblocked; /* (q) asymmetric q blocked */
+};
+static struct cryptocap *crypto_drivers = NULL;
+static int crypto_drivers_num = 0;
+
+/*
+ * There are two queues for crypto requests; one for symmetric (e.g.
+ * cipher) operations and one for asymmetric (e.g. MOD)operations.
+ * A single mutex is used to lock access to both queues. We could
+ * have one per-queue but having one simplifies handling of block/unblock
+ * operations.
+ */
+static LIST_HEAD(crp_q); /* crypto request queue */
+static LIST_HEAD(crp_kq); /* asym request queue */
+
+static spinlock_t crypto_q_lock;
+
+int crypto_all_qblocked = 0; /* protect with Q_LOCK */
+module_param(crypto_all_qblocked, int, 0444);
+MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
+
+int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
+module_param(crypto_all_kqblocked, int, 0444);
+MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
+
+#define CRYPTO_Q_LOCK() \
+ ({ \
+ spin_lock_irqsave(&crypto_q_lock, q_flags); \
+ dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
+ })
+#define CRYPTO_Q_UNLOCK() \
+ ({ \
+ dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
+ spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
+ })
+
+/*
+ * There are two queues for processing completed crypto requests; one
+ * for the symmetric and one for the asymmetric ops. We only need one
+ * but have two to avoid type futzing (cryptop vs. cryptkop). A single
+ * mutex is used to lock access to both queues. Note that this lock
+ * must be separate from the lock on request queues to insure driver
+ * callbacks don't generate lock order reversals.
+ */
+static LIST_HEAD(crp_ret_q); /* callback queues */
+static LIST_HEAD(crp_ret_kq);
+
+static spinlock_t crypto_ret_q_lock;
+#define CRYPTO_RETQ_LOCK() \
+ ({ \
+ spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
+ dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
+ })
+#define CRYPTO_RETQ_UNLOCK() \
+ ({ \
+ dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
+ spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
+ })
+#define CRYPTO_RETQ_EMPTY() (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static kmem_cache_t *cryptop_zone;
+static kmem_cache_t *cryptodesc_zone;
+#else
+static struct kmem_cache *cryptop_zone;
+static struct kmem_cache *cryptodesc_zone;
+#endif
+
+#define debug crypto_debug
+int crypto_debug = 0;
+module_param(crypto_debug, int, 0644);
+MODULE_PARM_DESC(crypto_debug, "Enable debug");
+EXPORT_SYMBOL(crypto_debug);
+
+/*
+ * Maximum number of outstanding crypto requests before we start
+ * failing requests. We need this to prevent DOS when too many
+ * requests are arriving for us to keep up. Otherwise we will
+ * run the system out of memory. Since crypto is slow, we are
+ * usually the bottleneck that needs to say, enough is enough.
+ *
+ * We cannot print errors when this condition occurs, we are already too
+ * slow, printing anything will just kill us
+ */
+
+static int crypto_q_cnt = 0;
+module_param(crypto_q_cnt, int, 0444);
+MODULE_PARM_DESC(crypto_q_cnt,
+ "Current number of outstanding crypto requests");
+
+static int crypto_q_max = 1000;
+module_param(crypto_q_max, int, 0644);
+MODULE_PARM_DESC(crypto_q_max,
+ "Maximum number of outstanding crypto requests");
+
+#define bootverbose crypto_verbose
+static int crypto_verbose = 0;
+module_param(crypto_verbose, int, 0644);
+MODULE_PARM_DESC(crypto_verbose,
+ "Enable verbose crypto startup");
+
+int crypto_usercrypto = 1; /* userland may do crypto reqs */
+module_param(crypto_usercrypto, int, 0644);
+MODULE_PARM_DESC(crypto_usercrypto,
+ "Enable/disable user-mode access to crypto support");
+
+int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
+module_param(crypto_userasymcrypto, int, 0644);
+MODULE_PARM_DESC(crypto_userasymcrypto,
+ "Enable/disable user-mode access to asymmetric crypto support");
+
+int crypto_devallowsoft = 0; /* only use hardware crypto */
+module_param(crypto_devallowsoft, int, 0644);
+MODULE_PARM_DESC(crypto_devallowsoft,
+ "Enable/disable use of software crypto support");
+
+/*
+ * This parameter controls the maximum number of crypto operations to
+ * do consecutively in the crypto kernel thread before scheduling to allow
+ * other processes to run. Without it, it is possible to get into a
+ * situation where the crypto thread never allows any other processes to run.
+ * Default to 1000 which should be less than one second.
+ */
+static int crypto_max_loopcount = 1000;
+module_param(crypto_max_loopcount, int, 0644);
+MODULE_PARM_DESC(crypto_max_loopcount,
+ "Maximum number of crypto ops to do before yielding to other processes");
+
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif
+
+static struct task_struct *cryptoproc[CONFIG_NR_CPUS];
+static struct task_struct *cryptoretproc[CONFIG_NR_CPUS];
+static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
+
+static int crypto_proc(void *arg);
+static int crypto_ret_proc(void *arg);
+static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
+static int crypto_kinvoke(struct cryptkop *krp, int flags);
+static void crypto_exit(void);
+static int crypto_init(void);
+
+static struct cryptostats cryptostats;
+
+static struct cryptocap *
+crypto_checkdriver(u_int32_t hid)
+{
+ if (crypto_drivers == NULL)
+ return NULL;
+ return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
+}
+
+/*
+ * Compare a driver's list of supported algorithms against another
+ * list; return non-zero if all algorithms are supported.
+ */
+static int
+driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
+{
+ const struct cryptoini *cr;
+
+ /* See if all the algorithms are supported. */
+ for (cr = cri; cr; cr = cr->cri_next)
+ if (cap->cc_alg[cr->cri_alg] == 0)
+ return 0;
+ return 1;
+}
+
+
+/*
+ * Select a driver for a new session that supports the specified
+ * algorithms and, optionally, is constrained according to the flags.
+ * The algorithm we use here is pretty stupid; just use the
+ * first driver that supports all the algorithms we need. If there
+ * are multiple drivers we choose the driver with the fewest active
+ * sessions. We prefer hardware-backed drivers to software ones.
+ *
+ * XXX We need more smarts here (in real life too, but that's
+ * XXX another story altogether).
+ */
+static struct cryptocap *
+crypto_select_driver(const struct cryptoini *cri, int flags)
+{
+ struct cryptocap *cap, *best;
+ int match, hid;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ /*
+ * Look first for hardware crypto devices if permitted.
+ */
+ if (flags & CRYPTOCAP_F_HARDWARE)
+ match = CRYPTOCAP_F_HARDWARE;
+ else
+ match = CRYPTOCAP_F_SOFTWARE;
+ best = NULL;
+again:
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ cap = &crypto_drivers[hid];
+ /*
+ * If it's not initialized, is in the process of
+ * going away, or is not appropriate (hardware
+ * or software based on match), then skip.
+ */
+ if (cap->cc_dev == NULL ||
+ (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
+ (cap->cc_flags & match) == 0)
+ continue;
+
+ /* verify all the algorithms are supported. */
+ if (driver_suitable(cap, cri)) {
+ if (best == NULL ||
+ cap->cc_sessions < best->cc_sessions)
+ best = cap;
+ }
+ }
+ if (best != NULL)
+ return best;
+ if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
+ /* sort of an Algol 68-style for loop */
+ match = CRYPTOCAP_F_SOFTWARE;
+ goto again;
+ }
+ return best;
+}
+
+/*
+ * Create a new session. The crid argument specifies a crypto
+ * driver to use or constraints on a driver to select (hardware
+ * only, software only, either). Whatever driver is selected
+ * must be capable of the requested crypto algorithms.
+ */
+int
+crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
+{
+ struct cryptocap *cap;
+ u_int32_t hid, lid;
+ int err;
+ unsigned long d_flags;
+
+ CRYPTO_DRIVER_LOCK();
+ if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ /*
+ * Use specified driver; verify it is capable.
+ */
+ cap = crypto_checkdriver(crid);
+ if (cap != NULL && !driver_suitable(cap, cri))
+ cap = NULL;
+ } else {
+ /*
+ * No requested driver; select based on crid flags.
+ */
+ cap = crypto_select_driver(cri, crid);
+ /*
+ * if NULL then can't do everything in one session.
+ * XXX Fix this. We need to inject a "virtual" session
+ * XXX layer right about here.
+ */
+ }
+ if (cap != NULL) {
+ /* Call the driver initialization routine. */
+ hid = cap - crypto_drivers;
+ lid = hid; /* Pass the driver ID. */
+ cap->cc_sessions++;
+ CRYPTO_DRIVER_UNLOCK();
+ err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
+ CRYPTO_DRIVER_LOCK();
+ if (err == 0) {
+ (*sid) = (cap->cc_flags & 0xff000000)
+ | (hid & 0x00ffffff);
+ (*sid) <<= 32;
+ (*sid) |= (lid & 0xffffffff);
+ } else
+ cap->cc_sessions--;
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+static void
+crypto_remove(struct cryptocap *cap)
+{
+ CRYPTO_DRIVER_ASSERT();
+ if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
+ bzero(cap, sizeof(*cap));
+}
+
+/*
+ * Delete an existing session (or a reserved session on an unregistered
+ * driver).
+ */
+int
+crypto_freesession(u_int64_t sid)
+{
+ struct cryptocap *cap;
+ u_int32_t hid;
+ int err = 0;
+ unsigned long d_flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+ CRYPTO_DRIVER_LOCK();
+
+ if (crypto_drivers == NULL) {
+ err = EINVAL;
+ goto done;
+ }
+
+ /* Determine two IDs. */
+ hid = CRYPTO_SESID2HID(sid);
+
+ if (hid >= crypto_drivers_num) {
+ dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
+ err = ENOENT;
+ goto done;
+ }
+ cap = &crypto_drivers[hid];
+
+ if (cap->cc_dev) {
+ CRYPTO_DRIVER_UNLOCK();
+ /* Call the driver cleanup routine, if available, unlocked. */
+ err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
+ CRYPTO_DRIVER_LOCK();
+ }
+
+ if (cap->cc_sessions)
+ cap->cc_sessions--;
+
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ crypto_remove(cap);
+
+done:
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+/*
+ * Return an unused driver id. Used by drivers prior to registering
+ * support for the algorithms they handle.
+ */
+int32_t
+crypto_get_driverid(device_t dev, int flags)
+{
+ struct cryptocap *newdrv;
+ int i;
+ unsigned long d_flags;
+
+ if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ printf("%s: no flags specified when registering driver\n",
+ device_get_nameunit(dev));
+ return -1;
+ }
+
+ CRYPTO_DRIVER_LOCK();
+
+ for (i = 0; i < crypto_drivers_num; i++) {
+ if (crypto_drivers[i].cc_dev == NULL &&
+ (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
+ break;
+ }
+ }
+
+ /* Out of entries, allocate some more. */
+ if (i == crypto_drivers_num) {
+ /* Be careful about wrap-around. */
+ if (2 * crypto_drivers_num <= crypto_drivers_num) {
+ CRYPTO_DRIVER_UNLOCK();
+ printk("crypto: driver count wraparound!\n");
+ return -1;
+ }
+
+ newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
+ GFP_KERNEL);
+ if (newdrv == NULL) {
+ CRYPTO_DRIVER_UNLOCK();
+ printk("crypto: no space to expand driver table!\n");
+ return -1;
+ }
+
+ memcpy(newdrv, crypto_drivers,
+ crypto_drivers_num * sizeof(struct cryptocap));
+ memset(&newdrv[crypto_drivers_num], 0,
+ crypto_drivers_num * sizeof(struct cryptocap));
+
+ crypto_drivers_num *= 2;
+
+ kfree(crypto_drivers);
+ crypto_drivers = newdrv;
+ }
+
+ /* NB: state is zero'd on free */
+ crypto_drivers[i].cc_sessions = 1; /* Mark */
+ crypto_drivers[i].cc_dev = dev;
+ crypto_drivers[i].cc_flags = flags;
+ if (bootverbose)
+ printf("crypto: assign %s driver id %u, flags %u\n",
+ device_get_nameunit(dev), i, flags);
+
+ CRYPTO_DRIVER_UNLOCK();
+
+ return i;
+}
+
+/*
+ * Lookup a driver by name. We match against the full device
+ * name and unit, and against just the name. The latter gives
+ * us a simple widlcarding by device name. On success return the
+ * driver/hardware identifier; otherwise return -1.
+ */
+int
+crypto_find_driver(const char *match)
+{
+ int i, len = strlen(match);
+ unsigned long d_flags;
+
+ CRYPTO_DRIVER_LOCK();
+ for (i = 0; i < crypto_drivers_num; i++) {
+ device_t dev = crypto_drivers[i].cc_dev;
+ if (dev == NULL ||
+ (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
+ continue;
+ if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
+ strncmp(match, device_get_name(dev), len) == 0)
+ break;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+ return i < crypto_drivers_num ? i : -1;
+}
+
+/*
+ * Return the device_t for the specified driver or NULL
+ * if the driver identifier is invalid.
+ */
+device_t
+crypto_find_device_byhid(int hid)
+{
+ struct cryptocap *cap = crypto_checkdriver(hid);
+ return cap != NULL ? cap->cc_dev : NULL;
+}
+
+/*
+ * Return the device/driver capabilities.
+ */
+int
+crypto_getcaps(int hid)
+{
+ struct cryptocap *cap = crypto_checkdriver(hid);
+ return cap != NULL ? cap->cc_flags : 0;
+}
+
+/*
+ * Register support for a key-related algorithm. This routine
+ * is called once for each algorithm supported a driver.
+ */
+int
+crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
+{
+ struct cryptocap *cap;
+ int err;
+ unsigned long d_flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+ CRYPTO_DRIVER_LOCK();
+
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL &&
+ (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
+ /*
+ * XXX Do some performance testing to determine placing.
+ * XXX We probably need an auxiliary data structure that
+ * XXX describes relative performances.
+ */
+
+ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
+ if (bootverbose)
+ printf("crypto: %s registers key alg %u flags %u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , kalg
+ , flags
+ );
+ err = 0;
+ } else
+ err = EINVAL;
+
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+/*
+ * Register support for a non-key-related algorithm. This routine
+ * is called once for each such algorithm supported by a driver.
+ */
+int
+crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+ u_int32_t flags)
+{
+ struct cryptocap *cap;
+ int err;
+ unsigned long d_flags;
+
+ dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
+ driverid, alg, maxoplen, flags);
+
+ CRYPTO_DRIVER_LOCK();
+
+ cap = crypto_checkdriver(driverid);
+ /* NB: algorithms are in the range [1..max] */
+ if (cap != NULL &&
+ (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
+ /*
+ * XXX Do some performance testing to determine placing.
+ * XXX We probably need an auxiliary data structure that
+ * XXX describes relative performances.
+ */
+
+ cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
+ cap->cc_max_op_len[alg] = maxoplen;
+ if (bootverbose)
+ printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , alg
+ , flags
+ , maxoplen
+ );
+ cap->cc_sessions = 0; /* Unmark */
+ err = 0;
+ } else
+ err = EINVAL;
+
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+static void
+driver_finis(struct cryptocap *cap)
+{
+ u_int32_t ses, kops;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ ses = cap->cc_sessions;
+ kops = cap->cc_koperations;
+ bzero(cap, sizeof(*cap));
+ if (ses != 0 || kops != 0) {
+ /*
+ * If there are pending sessions,
+ * just mark as invalid.
+ */
+ cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
+ cap->cc_sessions = ses;
+ cap->cc_koperations = kops;
+ }
+}
+
+/*
+ * Unregister a crypto driver. If there are pending sessions using it,
+ * leave enough information around so that subsequent calls using those
+ * sessions will correctly detect the driver has been unregistered and
+ * reroute requests.
+ */
+int
+crypto_unregister(u_int32_t driverid, int alg)
+{
+ struct cryptocap *cap;
+ int i, err;
+ unsigned long d_flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+ CRYPTO_DRIVER_LOCK();
+
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL &&
+ (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
+ cap->cc_alg[alg] != 0) {
+ cap->cc_alg[alg] = 0;
+ cap->cc_max_op_len[alg] = 0;
+
+ /* Was this the last algorithm ? */
+ for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
+ if (cap->cc_alg[i] != 0)
+ break;
+
+ if (i == CRYPTO_ALGORITHM_MAX + 1)
+ driver_finis(cap);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+/*
+ * Unregister all algorithms associated with a crypto driver.
+ * If there are pending sessions using it, leave enough information
+ * around so that subsequent calls using those sessions will
+ * correctly detect the driver has been unregistered and reroute
+ * requests.
+ */
+int
+crypto_unregister_all(u_int32_t driverid)
+{
+ struct cryptocap *cap;
+ int err;
+ unsigned long d_flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL) {
+ driver_finis(cap);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+
+ return err;
+}
+
+/*
+ * Clear blockage on a driver. The what parameter indicates whether
+ * the driver is now ready for cryptop's and/or cryptokop's.
+ */
+int
+crypto_unblock(u_int32_t driverid, int what)
+{
+ struct cryptocap *cap;
+ int err;
+ unsigned long q_flags;
+
+ CRYPTO_Q_LOCK();
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL) {
+ if (what & CRYPTO_SYMQ) {
+ cap->cc_qblocked = 0;
+ cap->cc_unqblocked = 0;
+ crypto_all_qblocked = 0;
+ }
+ if (what & CRYPTO_ASYMQ) {
+ cap->cc_kqblocked = 0;
+ cap->cc_unkqblocked = 0;
+ crypto_all_kqblocked = 0;
+ }
+ wake_up_interruptible(&cryptoproc_wait);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
+
+ return err;
+}
+
+/*
+ * Add a crypto request to a queue, to be processed by the kernel thread.
+ */
+int
+crypto_dispatch(struct cryptop *crp)
+{
+ struct cryptocap *cap;
+ int result = -1;
+ unsigned long q_flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ cryptostats.cs_ops++;
+
+ CRYPTO_Q_LOCK();
+ if (crypto_q_cnt >= crypto_q_max) {
+ cryptostats.cs_drops++;
+ CRYPTO_Q_UNLOCK();
+ return ENOMEM;
+ }
+ crypto_q_cnt++;
+
+ /* make sure we are starting a fresh run on this crp. */
+ crp->crp_flags &= ~CRYPTO_F_DONE;
+ crp->crp_etype = 0;
+
+ /*
+ * Caller marked the request to be processed immediately; dispatch
+ * it directly to the driver unless the driver is currently blocked.
+ */
+ if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
+ int hid = CRYPTO_SESID2HID(crp->crp_sid);
+ cap = crypto_checkdriver(hid);
+ /* Driver cannot disappear when there is an active session. */
+ KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+ if (!cap->cc_qblocked) {
+ crypto_all_qblocked = 0;
+ crypto_drivers[hid].cc_unqblocked = 1;
+ CRYPTO_Q_UNLOCK();
+ result = crypto_invoke(cap, crp, 0);
+ CRYPTO_Q_LOCK();
+ if (result == ERESTART)
+ if (crypto_drivers[hid].cc_unqblocked)
+ crypto_drivers[hid].cc_qblocked = 1;
+ crypto_drivers[hid].cc_unqblocked = 0;
+ }
+ }
+ if (result == ERESTART) {
+ /*
+ * The driver ran out of resources, mark the
+ * driver ``blocked'' for cryptop's and put
+ * the request back in the queue. It would
+ * best to put the request back where we got
+ * it but that's hard so for now we put it
+ * at the front. This should be ok; putting
+ * it at the end does not work.
+ */
+ list_add(&crp->crp_next, &crp_q);
+ cryptostats.cs_blocks++;
+ result = 0;
+ } else if (result == -1) {
+ TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
+ result = 0;
+ }
+ wake_up_interruptible(&cryptoproc_wait);
+ CRYPTO_Q_UNLOCK();
+ return result;
+}
+
+/*
+ * Add an asymetric crypto request to a queue,
+ * to be processed by the kernel thread.
+ */
+int
+crypto_kdispatch(struct cryptkop *krp)
+{
+ int error;
+ unsigned long q_flags;
+
+ cryptostats.cs_kops++;
+
+ error = crypto_kinvoke(krp, krp->krp_crid);
+ if (error == ERESTART) {
+ CRYPTO_Q_LOCK();
+ TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
+ wake_up_interruptible(&cryptoproc_wait);
+ CRYPTO_Q_UNLOCK();
+ error = 0;
+ }
+ return error;
+}
+
+/*
+ * Verify a driver is suitable for the specified operation.
+ */
+static __inline int
+kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
+{
+ return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
+}
+
+/*
+ * Select a driver for an asym operation. The driver must
+ * support the necessary algorithm. The caller can constrain
+ * which device is selected with the flags parameter. The
+ * algorithm we use here is pretty stupid; just use the first
+ * driver that supports the algorithms we need. If there are
+ * multiple suitable drivers we choose the driver with the
+ * fewest active operations. We prefer hardware-backed
+ * drivers to software ones when either may be used.
+ */
+static struct cryptocap *
+crypto_select_kdriver(const struct cryptkop *krp, int flags)
+{
+ struct cryptocap *cap, *best, *blocked;
+ int match, hid;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ /*
+ * Look first for hardware crypto devices if permitted.
+ */
+ if (flags & CRYPTOCAP_F_HARDWARE)
+ match = CRYPTOCAP_F_HARDWARE;
+ else
+ match = CRYPTOCAP_F_SOFTWARE;
+ best = NULL;
+ blocked = NULL;
+again:
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ cap = &crypto_drivers[hid];
+ /*
+ * If it's not initialized, is in the process of
+ * going away, or is not appropriate (hardware
+ * or software based on match), then skip.
+ */
+ if (cap->cc_dev == NULL ||
+ (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
+ (cap->cc_flags & match) == 0)
+ continue;
+
+ /* verify all the algorithms are supported. */
+ if (kdriver_suitable(cap, krp)) {
+ if (best == NULL ||
+ cap->cc_koperations < best->cc_koperations)
+ best = cap;
+ }
+ }
+ if (best != NULL)
+ return best;
+ if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
+ /* sort of an Algol 68-style for loop */
+ match = CRYPTOCAP_F_SOFTWARE;
+ goto again;
+ }
+ return best;
+}
+
+/*
+ * Dispatch an assymetric crypto request.
+ */
+static int
+crypto_kinvoke(struct cryptkop *krp, int crid)
+{
+ struct cryptocap *cap = NULL;
+ int error;
+ unsigned long d_flags;
+
+ KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
+ KASSERT(krp->krp_callback != NULL,
+ ("%s: krp->crp_callback == NULL", __func__));
+
+ CRYPTO_DRIVER_LOCK();
+ if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ cap = crypto_checkdriver(crid);
+ if (cap != NULL) {
+ /*
+ * Driver present, it must support the necessary
+ * algorithm and, if s/w drivers are excluded,
+ * it must be registered as hardware-backed.
+ */
+ if (!kdriver_suitable(cap, krp) ||
+ (!crypto_devallowsoft &&
+ (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
+ cap = NULL;
+ }
+ } else {
+ /*
+ * No requested driver; select based on crid flags.
+ */
+ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
+ crid &= ~CRYPTOCAP_F_SOFTWARE;
+ cap = crypto_select_kdriver(krp, crid);
+ }
+ if (cap != NULL && !cap->cc_kqblocked) {
+ krp->krp_hid = cap - crypto_drivers;
+ cap->cc_koperations++;
+ CRYPTO_DRIVER_UNLOCK();
+ error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
+ CRYPTO_DRIVER_LOCK();
+ if (error == ERESTART) {
+ cap->cc_koperations--;
+ CRYPTO_DRIVER_UNLOCK();
+ return (error);
+ }
+ /* return the actual device used */
+ krp->krp_crid = krp->krp_hid;
+ } else {
+ /*
+ * NB: cap is !NULL if device is blocked; in
+ * that case return ERESTART so the operation
+ * is resubmitted if possible.
+ */
+ error = (cap == NULL) ? ENODEV : ERESTART;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+
+ if (error) {
+ krp->krp_status = error;
+ crypto_kdone(krp);
+ }
+ return 0;
+}
+
+
+/*
+ * Dispatch a crypto request to the appropriate crypto devices.
+ */
+static int
+crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
+{
+ KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
+ KASSERT(crp->crp_callback != NULL,
+ ("%s: crp->crp_callback == NULL", __func__));
+ KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
+
+ dprintk("%s()\n", __FUNCTION__);
+
+#ifdef CRYPTO_TIMING
+ if (crypto_timing)
+ crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
+#endif
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
+ struct cryptodesc *crd;
+ u_int64_t nid;
+
+ /*
+ * Driver has unregistered; migrate the session and return
+ * an error to the caller so they'll resubmit the op.
+ *
+ * XXX: What if there are more already queued requests for this
+ * session?
+ */
+ crypto_freesession(crp->crp_sid);
+
+ for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
+ crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
+
+ /* XXX propagate flags from initial session? */
+ if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
+ crp->crp_sid = nid;
+
+ crp->crp_etype = EAGAIN;
+ crypto_done(crp);
+ return 0;
+ } else {
+ /*
+ * Invoke the driver to process the request.
+ */
+ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
+ }
+}
+
+/*
+ * Release a set of crypto descriptors.
+ */
+void
+crypto_freereq(struct cryptop *crp)
+{
+ struct cryptodesc *crd;
+
+ if (crp == NULL)
+ return;
+
+#ifdef DIAGNOSTIC
+ {
+ struct cryptop *crp2;
+ unsigned long q_flags;
+
+ CRYPTO_Q_LOCK();
+ TAILQ_FOREACH(crp2, &crp_q, crp_next) {
+ KASSERT(crp2 != crp,
+ ("Freeing cryptop from the crypto queue (%p).",
+ crp));
+ }
+ CRYPTO_Q_UNLOCK();
+ CRYPTO_RETQ_LOCK();
+ TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
+ KASSERT(crp2 != crp,
+ ("Freeing cryptop from the return queue (%p).",
+ crp));
+ }
+ CRYPTO_RETQ_UNLOCK();
+ }
+#endif
+
+ while ((crd = crp->crp_desc) != NULL) {
+ crp->crp_desc = crd->crd_next;
+ kmem_cache_free(cryptodesc_zone, crd);
+ }
+ kmem_cache_free(cryptop_zone, crp);
+}
+
+/*
+ * Acquire a set of crypto descriptors.
+ */
+struct cryptop *
+crypto_getreq(int num)
+{
+ struct cryptodesc *crd;
+ struct cryptop *crp;
+
+ crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
+ if (crp != NULL) {
+ memset(crp, 0, sizeof(*crp));
+ INIT_LIST_HEAD(&crp->crp_next);
+ init_waitqueue_head(&crp->crp_waitq);
+ while (num--) {
+ crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
+ if (crd == NULL) {
+ crypto_freereq(crp);
+ return NULL;
+ }
+ memset(crd, 0, sizeof(*crd));
+ crd->crd_next = crp->crp_desc;
+ crp->crp_desc = crd;
+ }
+ }
+ return crp;
+}
+
+/*
+ * Invoke the callback on behalf of the driver.
+ */
+void
+crypto_done(struct cryptop *crp)
+{
+ unsigned long q_flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
+ crp->crp_flags |= CRYPTO_F_DONE;
+ CRYPTO_Q_LOCK();
+ crypto_q_cnt--;
+ CRYPTO_Q_UNLOCK();
+ } else
+ printk("crypto: crypto_done op already done, flags 0x%x",
+ crp->crp_flags);
+ if (crp->crp_etype != 0)
+ cryptostats.cs_errs++;
+ /*
+ * CBIMM means unconditionally do the callback immediately;
+ * CBIFSYNC means do the callback immediately only if the
+ * operation was done synchronously. Both are used to avoid
+ * doing extraneous context switches; the latter is mostly
+ * used with the software crypto driver.
+ */
+ if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
+ ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
+ (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
+ /*
+ * Do the callback directly. This is ok when the
+ * callback routine does very little (e.g. the
+ * /dev/crypto callback method just does a wakeup).
+ */
+ crp->crp_callback(crp);
+ } else {
+ unsigned long r_flags;
+ /*
+ * Normal case; queue the callback for the thread.
+ */
+ CRYPTO_RETQ_LOCK();
+ wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
+ TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
+ CRYPTO_RETQ_UNLOCK();
+ }
+}
+
+/*
+ * Invoke the callback on behalf of the driver.
+ */
+void
+crypto_kdone(struct cryptkop *krp)
+{
+ struct cryptocap *cap;
+ unsigned long d_flags;
+
+ if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
+ printk("crypto: crypto_kdone op already done, flags 0x%x",
+ krp->krp_flags);
+ krp->krp_flags |= CRYPTO_KF_DONE;
+ if (krp->krp_status != 0)
+ cryptostats.cs_kerrs++;
+
+ CRYPTO_DRIVER_LOCK();
+ /* XXX: What if driver is loaded in the meantime? */
+ if (krp->krp_hid < crypto_drivers_num) {
+ cap = &crypto_drivers[krp->krp_hid];
+ cap->cc_koperations--;
+ KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ crypto_remove(cap);
+ }
+ CRYPTO_DRIVER_UNLOCK();
+
+ /*
+ * CBIMM means unconditionally do the callback immediately;
+ * This is used to avoid doing extraneous context switches
+ */
+ if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
+ /*
+ * Do the callback directly. This is ok when the
+ * callback routine does very little (e.g. the
+ * /dev/crypto callback method just does a wakeup).
+ */
+ krp->krp_callback(krp);
+ } else {
+ unsigned long r_flags;
+ /*
+ * Normal case; queue the callback for the thread.
+ */
+ CRYPTO_RETQ_LOCK();
+ wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
+ TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
+ CRYPTO_RETQ_UNLOCK();
+ }
+}
+
+int
+crypto_getfeat(int *featp)
+{
+ int hid, kalg, feat = 0;
+ unsigned long d_flags;
+
+ CRYPTO_DRIVER_LOCK();
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ const struct cryptocap *cap = &crypto_drivers[hid];
+
+ if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
+ !crypto_devallowsoft) {
+ continue;
+ }
+ for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
+ if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
+ feat |= 1 << kalg;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+ *featp = feat;
+ return (0);
+}
+
+/*
+ * Crypto thread, dispatches crypto requests.
+ */
+static int
+crypto_proc(void *arg)
+{
+ struct cryptop *crp, *submit;
+ struct cryptkop *krp, *krpp;
+ struct cryptocap *cap;
+ u_int32_t hid;
+ int result, hint;
+ unsigned long q_flags;
+ int loopcount = 0;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ CRYPTO_Q_LOCK();
+ for (;;) {
+ /*
+ * we need to make sure we don't get into a busy loop with nothing
+ * to do, the two crypto_all_*blocked vars help us find out when
+ * we are all full and can do nothing on any driver or Q. If so we
+ * wait for an unblock.
+ */
+ crypto_all_qblocked = !list_empty(&crp_q);
+
+ /*
+ * Find the first element in the queue that can be
+ * processed and look-ahead to see if multiple ops
+ * are ready for the same driver.
+ */
+ submit = NULL;
+ hint = 0;
+ list_for_each_entry(crp, &crp_q, crp_next) {
+ hid = CRYPTO_SESID2HID(crp->crp_sid);
+ cap = crypto_checkdriver(hid);
+ /*
+ * Driver cannot disappear when there is an active
+ * session.
+ */
+ KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
+ __func__, __LINE__));
+ if (cap == NULL || cap->cc_dev == NULL) {
+ /* Op needs to be migrated, process it. */
+ if (submit == NULL)
+ submit = crp;
+ break;
+ }
+ if (!cap->cc_qblocked) {
+ if (submit != NULL) {
+ /*
+ * We stop on finding another op,
+ * regardless whether its for the same
+ * driver or not. We could keep
+ * searching the queue but it might be
+ * better to just use a per-driver
+ * queue instead.
+ */
+ if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
+ hint = CRYPTO_HINT_MORE;
+ break;
+ } else {
+ submit = crp;
+ if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
+ break;
+ /* keep scanning for more are q'd */
+ }
+ }
+ }
+ if (submit != NULL) {
+ hid = CRYPTO_SESID2HID(submit->crp_sid);
+ crypto_all_qblocked = 0;
+ list_del(&submit->crp_next);
+ crypto_drivers[hid].cc_unqblocked = 1;
+ cap = crypto_checkdriver(hid);
+ CRYPTO_Q_UNLOCK();
+ KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
+ __func__, __LINE__));
+ result = crypto_invoke(cap, submit, hint);
+ CRYPTO_Q_LOCK();
+ if (result == ERESTART) {
+ /*
+ * The driver ran out of resources, mark the
+ * driver ``blocked'' for cryptop's and put
+ * the request back in the queue. It would
+ * best to put the request back where we got
+ * it but that's hard so for now we put it
+ * at the front. This should be ok; putting
+ * it at the end does not work.
+ */
+ /* XXX validate sid again? */
+ list_add(&submit->crp_next, &crp_q);
+ cryptostats.cs_blocks++;
+ if (crypto_drivers[hid].cc_unqblocked)
+ crypto_drivers[hid].cc_qblocked=0;
+ crypto_drivers[hid].cc_unqblocked=0;
+ }
+ crypto_drivers[hid].cc_unqblocked = 0;
+ }
+
+ crypto_all_kqblocked = !list_empty(&crp_kq);
+
+ /* As above, but for key ops */
+ krp = NULL;
+ list_for_each_entry(krpp, &crp_kq, krp_next) {
+ cap = crypto_checkdriver(krpp->krp_hid);
+ if (cap == NULL || cap->cc_dev == NULL) {
+ /*
+ * Operation needs to be migrated, invalidate
+ * the assigned device so it will reselect a
+ * new one below. Propagate the original
+ * crid selection flags if supplied.
+ */
+ krp->krp_hid = krp->krp_crid &
+ (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
+ if (krp->krp_hid == 0)
+ krp->krp_hid =
+ CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
+ break;
+ }
+ if (!cap->cc_kqblocked) {
+ krp = krpp;
+ break;
+ }
+ }
+ if (krp != NULL) {
+ crypto_all_kqblocked = 0;
+ list_del(&krp->krp_next);
+ crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
+ CRYPTO_Q_UNLOCK();
+ result = crypto_kinvoke(krp, krp->krp_hid);
+ CRYPTO_Q_LOCK();
+ if (result == ERESTART) {
+ /*
+ * The driver ran out of resources, mark the
+ * driver ``blocked'' for cryptkop's and put
+ * the request back in the queue. It would
+ * best to put the request back where we got
+ * it but that's hard so for now we put it
+ * at the front. This should be ok; putting
+ * it at the end does not work.
+ */
+ /* XXX validate sid again? */
+ list_add(&krp->krp_next, &crp_kq);
+ cryptostats.cs_kblocks++;
+ } else
+ crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
+ }
+
+ if (submit == NULL && krp == NULL) {
+ /*
+ * Nothing more to be processed. Sleep until we're
+ * woken because there are more ops to process.
+ * This happens either by submission or by a driver
+ * becoming unblocked and notifying us through
+ * crypto_unblock. Note that when we wakeup we
+ * start processing each queue again from the
+ * front. It's not clear that it's important to
+ * preserve this ordering since ops may finish
+ * out of order if dispatched to different devices
+ * and some become blocked while others do not.
+ */
+ dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
+ __FUNCTION__,
+ list_empty(&crp_q), crypto_all_qblocked,
+ list_empty(&crp_kq), crypto_all_kqblocked);
+ loopcount = 0;
+ CRYPTO_Q_UNLOCK();
+ wait_event_interruptible(cryptoproc_wait,
+ !(list_empty(&crp_q) || crypto_all_qblocked) ||
+ !(list_empty(&crp_kq) || crypto_all_kqblocked) ||
+ kthread_should_stop());
+ if (signal_pending (current)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ spin_lock_irq(&current->sigmask_lock);
+#endif
+ flush_signals(current);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ spin_unlock_irq(&current->sigmask_lock);
+#endif
+ }
+ CRYPTO_Q_LOCK();
+ dprintk("%s - awake\n", __FUNCTION__);
+ if (kthread_should_stop())
+ break;
+ cryptostats.cs_intrs++;
+ } else if (loopcount > crypto_max_loopcount) {
+ /*
+ * Give other processes a chance to run if we've
+ * been using the CPU exclusively for a while.
+ */
+ loopcount = 0;
+ CRYPTO_Q_UNLOCK();
+ schedule();
+ CRYPTO_Q_LOCK();
+ }
+ loopcount++;
+ }
+ CRYPTO_Q_UNLOCK();
+ return 0;
+}
+
+/*
+ * Crypto returns thread, does callbacks for processed crypto requests.
+ * Callbacks are done here, rather than in the crypto drivers, because
+ * callbacks typically are expensive and would slow interrupt handling.
+ */
+static int
+crypto_ret_proc(void *arg)
+{
+ struct cryptop *crpt;
+ struct cryptkop *krpt;
+ unsigned long r_flags;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ CRYPTO_RETQ_LOCK();
+ for (;;) {
+ /* Harvest return q's for completed ops */
+ crpt = NULL;
+ if (!list_empty(&crp_ret_q))
+ crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
+ if (crpt != NULL)
+ list_del(&crpt->crp_next);
+
+ krpt = NULL;
+ if (!list_empty(&crp_ret_kq))
+ krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
+ if (krpt != NULL)
+ list_del(&krpt->krp_next);
+
+ if (crpt != NULL || krpt != NULL) {
+ CRYPTO_RETQ_UNLOCK();
+ /*
+ * Run callbacks unlocked.
+ */
+ if (crpt != NULL)
+ crpt->crp_callback(crpt);
+ if (krpt != NULL)
+ krpt->krp_callback(krpt);
+ CRYPTO_RETQ_LOCK();
+ } else {
+ /*
+ * Nothing more to be processed. Sleep until we're
+ * woken because there are more returns to process.
+ */
+ dprintk("%s - sleeping\n", __FUNCTION__);
+ CRYPTO_RETQ_UNLOCK();
+ wait_event_interruptible(cryptoretproc_wait,
+ !list_empty(&crp_ret_q) ||
+ !list_empty(&crp_ret_kq) ||
+ kthread_should_stop());
+ if (signal_pending (current)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ spin_lock_irq(&current->sigmask_lock);
+#endif
+ flush_signals(current);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ spin_unlock_irq(&current->sigmask_lock);
+#endif
+ }
+ CRYPTO_RETQ_LOCK();
+ dprintk("%s - awake\n", __FUNCTION__);
+ if (kthread_should_stop()) {
+ dprintk("%s - EXITING!\n", __FUNCTION__);
+ break;
+ }
+ cryptostats.cs_rets++;
+ }
+ }
+ CRYPTO_RETQ_UNLOCK();
+ return 0;
+}
+
+
+#if 0 /* should put this into /proc or something */
+static void
+db_show_drivers(void)
+{
+ int hid;
+
+ db_printf("%12s %4s %4s %8s %2s %2s\n"
+ , "Device"
+ , "Ses"
+ , "Kops"
+ , "Flags"
+ , "QB"
+ , "KB"
+ );
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ const struct cryptocap *cap = &crypto_drivers[hid];
+ if (cap->cc_dev == NULL)
+ continue;
+ db_printf("%-12s %4u %4u %08x %2u %2u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , cap->cc_sessions
+ , cap->cc_koperations
+ , cap->cc_flags
+ , cap->cc_qblocked
+ , cap->cc_kqblocked
+ );
+ }
+}
+
+DB_SHOW_COMMAND(crypto, db_show_crypto)
+{
+ struct cryptop *crp;
+
+ db_show_drivers();
+ db_printf("\n");
+
+ db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
+ "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
+ "Desc", "Callback");
+ TAILQ_FOREACH(crp, &crp_q, crp_next) {
+ db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
+ , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
+ , crp->crp_ilen, crp->crp_olen
+ , crp->crp_etype
+ , crp->crp_flags
+ , crp->crp_desc
+ , crp->crp_callback
+ );
+ }
+ if (!TAILQ_EMPTY(&crp_ret_q)) {
+ db_printf("\n%4s %4s %4s %8s\n",
+ "HID", "Etype", "Flags", "Callback");
+ TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
+ db_printf("%4u %4u %04x %8p\n"
+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
+ , crp->crp_etype
+ , crp->crp_flags
+ , crp->crp_callback
+ );
+ }
+ }
+}
+
+DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
+{
+ struct cryptkop *krp;
+
+ db_show_drivers();
+ db_printf("\n");
+
+ db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
+ "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
+ TAILQ_FOREACH(krp, &crp_kq, krp_next) {
+ db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
+ , krp->krp_op
+ , krp->krp_status
+ , krp->krp_iparams, krp->krp_oparams
+ , krp->krp_crid, krp->krp_hid
+ , krp->krp_callback
+ );
+ }
+ if (!TAILQ_EMPTY(&crp_ret_q)) {
+ db_printf("%4s %5s %8s %4s %8s\n",
+ "Op", "Status", "CRID", "HID", "Callback");
+ TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
+ db_printf("%4u %5u %08x %4u %8p\n"
+ , krp->krp_op
+ , krp->krp_status
+ , krp->krp_crid, krp->krp_hid
+ , krp->krp_callback
+ );
+ }
+ }
+}
+#endif
+
+
+static int
+crypto_init(void)
+{
+ int error;
+ unsigned long cpu;
+
+ dprintk("%s(%p)\n", __FUNCTION__, (void *) crypto_init);
+
+ if (crypto_initted)
+ return 0;
+ crypto_initted = 1;
+
+ spin_lock_init(&crypto_drivers_lock);
+ spin_lock_init(&crypto_q_lock);
+ spin_lock_init(&crypto_ret_q_lock);
+
+ cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
+ 0, SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+ , NULL
+#endif
+ );
+
+ cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
+ 0, SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+ , NULL
+#endif
+ );
+
+ if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
+ printk("crypto: crypto_init cannot setup crypto zones\n");
+ error = ENOMEM;
+ goto bad;
+ }
+
+ crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
+ crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
+ GFP_KERNEL);
+ if (crypto_drivers == NULL) {
+ printk("crypto: crypto_init cannot setup crypto drivers\n");
+ error = ENOMEM;
+ goto bad;
+ }
+
+ memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
+
+ ocf_for_each_cpu(cpu) {
+ cryptoproc[cpu] = kthread_create(crypto_proc, (void *) cpu,
+ "ocf_%d", (int) cpu);
+ if (IS_ERR(cryptoproc[cpu])) {
+ error = PTR_ERR(cryptoproc[cpu]);
+ printk("crypto: crypto_init cannot start crypto thread; error %d",
+ error);
+ goto bad;
+ }
+ kthread_bind(cryptoproc[cpu], cpu);
+ wake_up_process(cryptoproc[cpu]);
+
+ cryptoretproc[cpu] = kthread_create(crypto_ret_proc, (void *) cpu,
+ "ocf_ret_%d", (int) cpu);
+ if (IS_ERR(cryptoretproc[cpu])) {
+ error = PTR_ERR(cryptoretproc[cpu]);
+ printk("crypto: crypto_init cannot start cryptoret thread; error %d",
+ error);
+ goto bad;
+ }
+ kthread_bind(cryptoretproc[cpu], cpu);
+ wake_up_process(cryptoretproc[cpu]);
+ }
+
+ return 0;
+bad:
+ crypto_exit();
+ return error;
+}
+
+
+static void
+crypto_exit(void)
+{
+ int cpu;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ /*
+ * Terminate any crypto threads.
+ */
+ ocf_for_each_cpu(cpu) {
+ kthread_stop(cryptoproc[cpu]);
+ kthread_stop(cryptoretproc[cpu]);
+ }
+
+ /*
+ * Reclaim dynamically allocated resources.
+ */
+ if (crypto_drivers != NULL)
+ kfree(crypto_drivers);
+
+ if (cryptodesc_zone != NULL)
+ kmem_cache_destroy(cryptodesc_zone);
+ if (cryptop_zone != NULL)
+ kmem_cache_destroy(cryptop_zone);
+}
+
+
+EXPORT_SYMBOL(crypto_newsession);
+EXPORT_SYMBOL(crypto_freesession);
+EXPORT_SYMBOL(crypto_get_driverid);
+EXPORT_SYMBOL(crypto_kregister);
+EXPORT_SYMBOL(crypto_register);
+EXPORT_SYMBOL(crypto_unregister);
+EXPORT_SYMBOL(crypto_unregister_all);
+EXPORT_SYMBOL(crypto_unblock);
+EXPORT_SYMBOL(crypto_dispatch);
+EXPORT_SYMBOL(crypto_kdispatch);
+EXPORT_SYMBOL(crypto_freereq);
+EXPORT_SYMBOL(crypto_getreq);
+EXPORT_SYMBOL(crypto_done);
+EXPORT_SYMBOL(crypto_kdone);
+EXPORT_SYMBOL(crypto_getfeat);
+EXPORT_SYMBOL(crypto_userasymcrypto);
+EXPORT_SYMBOL(crypto_getcaps);
+EXPORT_SYMBOL(crypto_find_driver);
+EXPORT_SYMBOL(crypto_find_device_byhid);
+
+module_init(crypto_init);
+module_exit(crypto_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
diff --git a/target/linux/generic/files/crypto/ocf/cryptocteon/Makefile b/target/linux/generic/files/crypto/ocf/cryptocteon/Makefile
new file mode 100644
index 000000000..eeed0d641
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/cryptocteon/Makefile
@@ -0,0 +1,17 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_CRYPTOCTEON) += cryptocteon.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef CONFIG_OCF_CRYPTOCTEON
+# you need the cavium crypto component installed
+EXTRA_CFLAGS += -I$(ROOTDIR)/prop/include
+endif
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/cryptocteon/README.txt b/target/linux/generic/files/crypto/ocf/cryptocteon/README.txt
new file mode 100644
index 000000000..807b2e518
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/cryptocteon/README.txt
@@ -0,0 +1,11 @@
+
+You will need the CRYPTO package installed to build this driver, and
+potentially the ADK.
+
+cavium_crypto sourced from:
+
+ adk/components/source/cavium_ipsec_kame/cavium_ipsec.c
+
+and significantly modified to suit use with OCF. All original
+copyright/ownership headers retained.
+
diff --git a/target/linux/generic/files/crypto/ocf/cryptocteon/cavium_crypto.c b/target/linux/generic/files/crypto/ocf/cryptocteon/cavium_crypto.c
new file mode 100644
index 000000000..ceaf77c5c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/cryptocteon/cavium_crypto.c
@@ -0,0 +1,2283 @@
+/*
+ * Copyright (c) 2009 David McCullough <david.mccullough@securecomputing.com>
+ *
+ * Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Cavium Networks
+ * 4. Cavium Networks' name may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import regulations
+ * in other countries. You warrant that You will comply strictly in all
+ * respects with all such regulations and acknowledge that you have the
+ * responsibility to obtain licenses to export, re-export or import the
+ * Software.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" AND
+ * WITH ALL FAULTS AND CAVIUM MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES,
+ * EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE
+ * SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+*/
+/****************************************************************************/
+
+#include <linux/scatterlist.h>
+#include <asm/octeon/octeon.h>
+#include "octeon-asm.h"
+
+/****************************************************************************/
+
+extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *);
+extern void octeon_crypto_disable(struct octeon_cop2_state *, unsigned long);
+
+#define SG_INIT(s, p, i, l) \
+ { \
+ (i) = 0; \
+ (l) = (s)[0].length; \
+ (p) = (typeof(p)) sg_virt((s)); \
+ CVMX_PREFETCH0((p)); \
+ }
+
+#define SG_CONSUME(s, p, i, l) \
+ { \
+ (p)++; \
+ (l) -= sizeof(*(p)); \
+ if ((l) < 0) { \
+ dprintk("%s, %d: l = %d\n", __FILE__, __LINE__, l); \
+ } else if ((l) == 0) { \
+ (i)++; \
+ (l) = (s)[0].length; \
+ (p) = (typeof(p)) sg_virt(s); \
+ CVMX_PREFETCH0((p)); \
+ } \
+ }
+
+#define ESP_HEADER_LENGTH 8
+#define DES_CBC_IV_LENGTH 8
+#define AES_CBC_IV_LENGTH 16
+#define ESP_HMAC_LEN 12
+
+#define ESP_HEADER_LENGTH 8
+#define DES_CBC_IV_LENGTH 8
+
+/****************************************************************************/
+
+#define CVM_LOAD_SHA_UNIT(dat, next) { \
+ if (next == 0) { \
+ next = 1; \
+ CVMX_MT_HSH_DAT (dat, 0); \
+ } else if (next == 1) { \
+ next = 2; \
+ CVMX_MT_HSH_DAT (dat, 1); \
+ } else if (next == 2) { \
+ next = 3; \
+ CVMX_MT_HSH_DAT (dat, 2); \
+ } else if (next == 3) { \
+ next = 4; \
+ CVMX_MT_HSH_DAT (dat, 3); \
+ } else if (next == 4) { \
+ next = 5; \
+ CVMX_MT_HSH_DAT (dat, 4); \
+ } else if (next == 5) { \
+ next = 6; \
+ CVMX_MT_HSH_DAT (dat, 5); \
+ } else if (next == 6) { \
+ next = 7; \
+ CVMX_MT_HSH_DAT (dat, 6); \
+ } else { \
+ CVMX_MT_HSH_STARTSHA (dat); \
+ next = 0; \
+ } \
+}
+
+#define CVM_LOAD2_SHA_UNIT(dat1, dat2, next) { \
+ if (next == 0) { \
+ CVMX_MT_HSH_DAT (dat1, 0); \
+ CVMX_MT_HSH_DAT (dat2, 1); \
+ next = 2; \
+ } else if (next == 1) { \
+ CVMX_MT_HSH_DAT (dat1, 1); \
+ CVMX_MT_HSH_DAT (dat2, 2); \
+ next = 3; \
+ } else if (next == 2) { \
+ CVMX_MT_HSH_DAT (dat1, 2); \
+ CVMX_MT_HSH_DAT (dat2, 3); \
+ next = 4; \
+ } else if (next == 3) { \
+ CVMX_MT_HSH_DAT (dat1, 3); \
+ CVMX_MT_HSH_DAT (dat2, 4); \
+ next = 5; \
+ } else if (next == 4) { \
+ CVMX_MT_HSH_DAT (dat1, 4); \
+ CVMX_MT_HSH_DAT (dat2, 5); \
+ next = 6; \
+ } else if (next == 5) { \
+ CVMX_MT_HSH_DAT (dat1, 5); \
+ CVMX_MT_HSH_DAT (dat2, 6); \
+ next = 7; \
+ } else if (next == 6) { \
+ CVMX_MT_HSH_DAT (dat1, 6); \
+ CVMX_MT_HSH_STARTSHA (dat2); \
+ next = 0; \
+ } else { \
+ CVMX_MT_HSH_STARTSHA (dat1); \
+ CVMX_MT_HSH_DAT (dat2, 0); \
+ next = 1; \
+ } \
+}
+
+/****************************************************************************/
+
+#define CVM_LOAD_MD5_UNIT(dat, next) { \
+ if (next == 0) { \
+ next = 1; \
+ CVMX_MT_HSH_DAT (dat, 0); \
+ } else if (next == 1) { \
+ next = 2; \
+ CVMX_MT_HSH_DAT (dat, 1); \
+ } else if (next == 2) { \
+ next = 3; \
+ CVMX_MT_HSH_DAT (dat, 2); \
+ } else if (next == 3) { \
+ next = 4; \
+ CVMX_MT_HSH_DAT (dat, 3); \
+ } else if (next == 4) { \
+ next = 5; \
+ CVMX_MT_HSH_DAT (dat, 4); \
+ } else if (next == 5) { \
+ next = 6; \
+ CVMX_MT_HSH_DAT (dat, 5); \
+ } else if (next == 6) { \
+ next = 7; \
+ CVMX_MT_HSH_DAT (dat, 6); \
+ } else { \
+ CVMX_MT_HSH_STARTMD5 (dat); \
+ next = 0; \
+ } \
+}
+
+#define CVM_LOAD2_MD5_UNIT(dat1, dat2, next) { \
+ if (next == 0) { \
+ CVMX_MT_HSH_DAT (dat1, 0); \
+ CVMX_MT_HSH_DAT (dat2, 1); \
+ next = 2; \
+ } else if (next == 1) { \
+ CVMX_MT_HSH_DAT (dat1, 1); \
+ CVMX_MT_HSH_DAT (dat2, 2); \
+ next = 3; \
+ } else if (next == 2) { \
+ CVMX_MT_HSH_DAT (dat1, 2); \
+ CVMX_MT_HSH_DAT (dat2, 3); \
+ next = 4; \
+ } else if (next == 3) { \
+ CVMX_MT_HSH_DAT (dat1, 3); \
+ CVMX_MT_HSH_DAT (dat2, 4); \
+ next = 5; \
+ } else if (next == 4) { \
+ CVMX_MT_HSH_DAT (dat1, 4); \
+ CVMX_MT_HSH_DAT (dat2, 5); \
+ next = 6; \
+ } else if (next == 5) { \
+ CVMX_MT_HSH_DAT (dat1, 5); \
+ CVMX_MT_HSH_DAT (dat2, 6); \
+ next = 7; \
+ } else if (next == 6) { \
+ CVMX_MT_HSH_DAT (dat1, 6); \
+ CVMX_MT_HSH_STARTMD5 (dat2); \
+ next = 0; \
+ } else { \
+ CVMX_MT_HSH_STARTMD5 (dat1); \
+ CVMX_MT_HSH_DAT (dat2, 0); \
+ next = 1; \
+ } \
+}
+
+/****************************************************************************/
+
+static inline uint64_t
+swap64(uint64_t a)
+{
+ return ((a >> 56) |
+ (((a >> 48) & 0xfful) << 8) |
+ (((a >> 40) & 0xfful) << 16) |
+ (((a >> 32) & 0xfful) << 24) |
+ (((a >> 24) & 0xfful) << 32) |
+ (((a >> 16) & 0xfful) << 40) |
+ (((a >> 8) & 0xfful) << 48) | (((a >> 0) & 0xfful) << 56));
+}
+
+/****************************************************************************/
+
+void
+octo_calc_hash(__u8 auth, unsigned char *key, uint64_t *inner, uint64_t *outer)
+{
+ uint8_t hash_key[64];
+ uint64_t *key1;
+ register uint64_t xor1 = 0x3636363636363636ULL;
+ register uint64_t xor2 = 0x5c5c5c5c5c5c5c5cULL;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ memset(hash_key, 0, sizeof(hash_key));
+ memcpy(hash_key, (uint8_t *) key, (auth ? 20 : 16));
+ key1 = (uint64_t *) hash_key;
+ flags = octeon_crypto_enable(&state);
+ if (auth) {
+ CVMX_MT_HSH_IV(0x67452301EFCDAB89ULL, 0);
+ CVMX_MT_HSH_IV(0x98BADCFE10325476ULL, 1);
+ CVMX_MT_HSH_IV(0xC3D2E1F000000000ULL, 2);
+ } else {
+ CVMX_MT_HSH_IV(0x0123456789ABCDEFULL, 0);
+ CVMX_MT_HSH_IV(0xFEDCBA9876543210ULL, 1);
+ }
+
+ CVMX_MT_HSH_DAT((*key1 ^ xor1), 0);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor1), 1);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor1), 2);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor1), 3);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor1), 4);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor1), 5);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor1), 6);
+ key1++;
+ if (auth)
+ CVMX_MT_HSH_STARTSHA((*key1 ^ xor1));
+ else
+ CVMX_MT_HSH_STARTMD5((*key1 ^ xor1));
+
+ CVMX_MF_HSH_IV(inner[0], 0);
+ CVMX_MF_HSH_IV(inner[1], 1);
+ if (auth) {
+ inner[2] = 0;
+ CVMX_MF_HSH_IV(((uint64_t *) inner)[2], 2);
+ }
+
+ memset(hash_key, 0, sizeof(hash_key));
+ memcpy(hash_key, (uint8_t *) key, (auth ? 20 : 16));
+ key1 = (uint64_t *) hash_key;
+ if (auth) {
+ CVMX_MT_HSH_IV(0x67452301EFCDAB89ULL, 0);
+ CVMX_MT_HSH_IV(0x98BADCFE10325476ULL, 1);
+ CVMX_MT_HSH_IV(0xC3D2E1F000000000ULL, 2);
+ } else {
+ CVMX_MT_HSH_IV(0x0123456789ABCDEFULL, 0);
+ CVMX_MT_HSH_IV(0xFEDCBA9876543210ULL, 1);
+ }
+
+ CVMX_MT_HSH_DAT((*key1 ^ xor2), 0);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor2), 1);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor2), 2);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor2), 3);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor2), 4);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor2), 5);
+ key1++;
+ CVMX_MT_HSH_DAT((*key1 ^ xor2), 6);
+ key1++;
+ if (auth)
+ CVMX_MT_HSH_STARTSHA((*key1 ^ xor2));
+ else
+ CVMX_MT_HSH_STARTMD5((*key1 ^ xor2));
+
+ CVMX_MF_HSH_IV(outer[0], 0);
+ CVMX_MF_HSH_IV(outer[1], 1);
+ if (auth) {
+ outer[2] = 0;
+ CVMX_MF_HSH_IV(outer[2], 2);
+ }
+ octeon_crypto_disable(&state, flags);
+ return;
+}
+
+/****************************************************************************/
+/* DES functions */
+
+int
+octo_des_cbc_encrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ uint64_t *data;
+ int data_i, data_l;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load 3DES Key */
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ if (od->octo_encklen == 24) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ } else if (od->octo_encklen == 8) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+
+ CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+ while (crypt_off > 0) {
+ SG_CONSUME(sg, data, data_i, data_l);
+ crypt_off -= 8;
+ }
+
+ while (crypt_len > 0) {
+ CVMX_MT_3DES_ENC_CBC(*data);
+ CVMX_MF_3DES_RESULT(*data);
+ SG_CONSUME(sg, data, data_i, data_l);
+ crypt_len -= 8;
+ }
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+
+int
+octo_des_cbc_decrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ uint64_t *data;
+ int data_i, data_l;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load 3DES Key */
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ if (od->octo_encklen == 24) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ } else if (od->octo_encklen == 8) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+
+ CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+ while (crypt_off > 0) {
+ SG_CONSUME(sg, data, data_i, data_l);
+ crypt_off -= 8;
+ }
+
+ while (crypt_len > 0) {
+ CVMX_MT_3DES_DEC_CBC(*data);
+ CVMX_MF_3DES_RESULT(*data);
+ SG_CONSUME(sg, data, data_i, data_l);
+ crypt_len -= 8;
+ }
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+/****************************************************************************/
+/* AES functions */
+
+int
+octo_aes_cbc_encrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ uint64_t *data, *pdata;
+ int data_i, data_l;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load AES Key */
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+ if (od->octo_encklen == 16) {
+ CVMX_MT_AES_KEY(0x0, 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 24) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 32) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+ CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+ while (crypt_off > 0) {
+ SG_CONSUME(sg, data, data_i, data_l);
+ crypt_off -= 8;
+ }
+
+ while (crypt_len > 0) {
+ pdata = data;
+ CVMX_MT_AES_ENC_CBC0(*data);
+ SG_CONSUME(sg, data, data_i, data_l);
+ CVMX_MT_AES_ENC_CBC1(*data);
+ CVMX_MF_AES_RESULT(*pdata, 0);
+ CVMX_MF_AES_RESULT(*data, 1);
+ SG_CONSUME(sg, data, data_i, data_l);
+ crypt_len -= 16;
+ }
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+
+int
+octo_aes_cbc_decrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ uint64_t *data, *pdata;
+ int data_i, data_l;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load AES Key */
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+ if (od->octo_encklen == 16) {
+ CVMX_MT_AES_KEY(0x0, 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 24) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 32) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+ CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+ while (crypt_off > 0) {
+ SG_CONSUME(sg, data, data_i, data_l);
+ crypt_off -= 8;
+ }
+
+ while (crypt_len > 0) {
+ pdata = data;
+ CVMX_MT_AES_DEC_CBC0(*data);
+ SG_CONSUME(sg, data, data_i, data_l);
+ CVMX_MT_AES_DEC_CBC1(*data);
+ CVMX_MF_AES_RESULT(*pdata, 0);
+ CVMX_MF_AES_RESULT(*data, 1);
+ SG_CONSUME(sg, data, data_i, data_l);
+ crypt_len -= 16;
+ }
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+/****************************************************************************/
+/* MD5 */
+
+int
+octo_null_md5_encrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ uint64_t *data;
+ uint64_t tmp1, tmp2;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 ||
+ (auth_off & 0x7) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data, data_i, data_l);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* Load MD5 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+ while (auth_off > 0) {
+ SG_CONSUME(sg, data, data_i, data_l);
+ auth_off -= 8;
+ }
+
+ while (auth_len > 0) {
+ CVM_LOAD_MD5_UNIT(*data, next);
+ auth_len -= 8;
+ SG_CONSUME(sg, data, data_i, data_l);
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_MD5_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVMX_ES64(tmp1, ((alen + 64) << 3));
+ CVM_LOAD_MD5_UNIT(tmp1, next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_ES64(tmp1, ((64 + 16) << 3));
+ CVMX_MT_HSH_STARTMD5(tmp1);
+
+ /* save the HMAC */
+ SG_INIT(sg, data, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data, data_i, data_l);
+ icv_off -= 8;
+ }
+ CVMX_MF_HSH_IV(*data, 0);
+ SG_CONSUME(sg, data, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *(uint32_t *)data = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+/****************************************************************************/
+/* SHA1 */
+
+int
+octo_null_sha1_encrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ uint64_t *data;
+ uint64_t tmp1, tmp2, tmp3;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 ||
+ (auth_off & 0x7) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data, data_i, data_l);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* Load SHA1 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+ while (auth_off > 0) {
+ SG_CONSUME(sg, data, data_i, data_l);
+ auth_off -= 8;
+ }
+
+ while (auth_len > 0) {
+ CVM_LOAD_SHA_UNIT(*data, next);
+ auth_len -= 8;
+ SG_CONSUME(sg, data, data_i, data_l);
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_MD5_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+ tmp3 = 0;
+ CVMX_MF_HSH_IV(tmp3, 2);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ tmp3 |= 0x0000000080000000;
+ CVMX_MT_HSH_DAT(tmp3, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+
+ /* save the HMAC */
+ SG_INIT(sg, data, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data, data_i, data_l);
+ icv_off -= 8;
+ }
+ CVMX_MF_HSH_IV(*data, 0);
+ SG_CONSUME(sg, data, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *(uint32_t *)data = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+/****************************************************************************/
+/* DES MD5 */
+
+int
+octo_des_cbc_md5_encrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ union {
+ uint32_t data32[2];
+ uint64_t data64[1];
+ } mydata;
+ uint64_t *data = &mydata.data64[0];
+ uint32_t *data32;
+ uint64_t tmp1, tmp2;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+ (crypt_len & 0x7) ||
+ (auth_len & 0x7) ||
+ (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data32, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load 3DES Key */
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ if (od->octo_encklen == 24) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ } else if (od->octo_encklen == 8) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+
+ CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+ /* Load MD5 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+ while (crypt_off > 0 && auth_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ crypt_off -= 4;
+ auth_off -= 4;
+ }
+
+ while (crypt_len > 0 || auth_len > 0) {
+ uint32_t *first = data32;
+ mydata.data32[0] = *first;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata.data32[1] = *data32;
+ if (crypt_off <= 0) {
+ if (crypt_len > 0) {
+ CVMX_MT_3DES_ENC_CBC(*data);
+ CVMX_MF_3DES_RESULT(*data);
+ crypt_len -= 8;
+ }
+ } else
+ crypt_off -= 8;
+ if (auth_off <= 0) {
+ if (auth_len > 0) {
+ CVM_LOAD_MD5_UNIT(*data, next);
+ auth_len -= 8;
+ }
+ } else
+ auth_off -= 8;
+ *first = mydata.data32[0];
+ *data32 = mydata.data32[1];
+ SG_CONSUME(sg, data32, data_i, data_l);
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_MD5_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVMX_ES64(tmp1, ((alen + 64) << 3));
+ CVM_LOAD_MD5_UNIT(tmp1, next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_ES64(tmp1, ((64 + 16) << 3));
+ CVMX_MT_HSH_STARTMD5(tmp1);
+
+ /* save the HMAC */
+ SG_INIT(sg, data32, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ icv_off -= 4;
+ }
+ CVMX_MF_HSH_IV(tmp1, 0);
+ *data32 = (uint32_t) (tmp1 >> 32);
+ SG_CONSUME(sg, data32, data_i, data_l);
+ *data32 = (uint32_t) tmp1;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *data32 = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+int
+octo_des_cbc_md5_decrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ union {
+ uint32_t data32[2];
+ uint64_t data64[1];
+ } mydata;
+ uint64_t *data = &mydata.data64[0];
+ uint32_t *data32;
+ uint64_t tmp1, tmp2;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+ (crypt_len & 0x7) ||
+ (auth_len & 0x7) ||
+ (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data32, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load 3DES Key */
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ if (od->octo_encklen == 24) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ } else if (od->octo_encklen == 8) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+
+ CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+ /* Load MD5 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+ while (crypt_off > 0 && auth_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ crypt_off -= 4;
+ auth_off -= 4;
+ }
+
+ while (crypt_len > 0 || auth_len > 0) {
+ uint32_t *first = data32;
+ mydata.data32[0] = *first;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata.data32[1] = *data32;
+ if (auth_off <= 0) {
+ if (auth_len > 0) {
+ CVM_LOAD_MD5_UNIT(*data, next);
+ auth_len -= 8;
+ }
+ } else
+ auth_off -= 8;
+ if (crypt_off <= 0) {
+ if (crypt_len > 0) {
+ CVMX_MT_3DES_DEC_CBC(*data);
+ CVMX_MF_3DES_RESULT(*data);
+ crypt_len -= 8;
+ }
+ } else
+ crypt_off -= 8;
+ *first = mydata.data32[0];
+ *data32 = mydata.data32[1];
+ SG_CONSUME(sg, data32, data_i, data_l);
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_MD5_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVMX_ES64(tmp1, ((alen + 64) << 3));
+ CVM_LOAD_MD5_UNIT(tmp1, next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_ES64(tmp1, ((64 + 16) << 3));
+ CVMX_MT_HSH_STARTMD5(tmp1);
+
+ /* save the HMAC */
+ SG_INIT(sg, data32, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ icv_off -= 4;
+ }
+ CVMX_MF_HSH_IV(tmp1, 0);
+ *data32 = (uint32_t) (tmp1 >> 32);
+ SG_CONSUME(sg, data32, data_i, data_l);
+ *data32 = (uint32_t) tmp1;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *data32 = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+/****************************************************************************/
+/* DES SHA */
+
+int
+octo_des_cbc_sha1_encrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ union {
+ uint32_t data32[2];
+ uint64_t data64[1];
+ } mydata;
+ uint64_t *data = &mydata.data64[0];
+ uint32_t *data32;
+ uint64_t tmp1, tmp2, tmp3;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+ (crypt_len & 0x7) ||
+ (auth_len & 0x7) ||
+ (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data32, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load 3DES Key */
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ if (od->octo_encklen == 24) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ } else if (od->octo_encklen == 8) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+
+ CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+ /* Load SHA1 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+ while (crypt_off > 0 && auth_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ crypt_off -= 4;
+ auth_off -= 4;
+ }
+
+ while (crypt_len > 0 || auth_len > 0) {
+ uint32_t *first = data32;
+ mydata.data32[0] = *first;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata.data32[1] = *data32;
+ if (crypt_off <= 0) {
+ if (crypt_len > 0) {
+ CVMX_MT_3DES_ENC_CBC(*data);
+ CVMX_MF_3DES_RESULT(*data);
+ crypt_len -= 8;
+ }
+ } else
+ crypt_off -= 8;
+ if (auth_off <= 0) {
+ if (auth_len > 0) {
+ CVM_LOAD_SHA_UNIT(*data, next);
+ auth_len -= 8;
+ }
+ } else
+ auth_off -= 8;
+ *first = mydata.data32[0];
+ *data32 = mydata.data32[1];
+ SG_CONSUME(sg, data32, data_i, data_l);
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_SHA_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+ tmp3 = 0;
+ CVMX_MF_HSH_IV(tmp3, 2);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ tmp3 |= 0x0000000080000000;
+ CVMX_MT_HSH_DAT(tmp3, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+
+ /* save the HMAC */
+ SG_INIT(sg, data32, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ icv_off -= 4;
+ }
+ CVMX_MF_HSH_IV(tmp1, 0);
+ *data32 = (uint32_t) (tmp1 >> 32);
+ SG_CONSUME(sg, data32, data_i, data_l);
+ *data32 = (uint32_t) tmp1;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *data32 = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+int
+octo_des_cbc_sha1_decrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ union {
+ uint32_t data32[2];
+ uint64_t data64[1];
+ } mydata;
+ uint64_t *data = &mydata.data64[0];
+ uint32_t *data32;
+ uint64_t tmp1, tmp2, tmp3;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+ (crypt_len & 0x7) ||
+ (auth_len & 0x7) ||
+ (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data32, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load 3DES Key */
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ if (od->octo_encklen == 24) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ } else if (od->octo_encklen == 8) {
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+ CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+
+ CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+ /* Load SHA1 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+ while (crypt_off > 0 && auth_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ crypt_off -= 4;
+ auth_off -= 4;
+ }
+
+ while (crypt_len > 0 || auth_len > 0) {
+ uint32_t *first = data32;
+ mydata.data32[0] = *first;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata.data32[1] = *data32;
+ if (auth_off <= 0) {
+ if (auth_len > 0) {
+ CVM_LOAD_SHA_UNIT(*data, next);
+ auth_len -= 8;
+ }
+ } else
+ auth_off -= 8;
+ if (crypt_off <= 0) {
+ if (crypt_len > 0) {
+ CVMX_MT_3DES_DEC_CBC(*data);
+ CVMX_MF_3DES_RESULT(*data);
+ crypt_len -= 8;
+ }
+ } else
+ crypt_off -= 8;
+ *first = mydata.data32[0];
+ *data32 = mydata.data32[1];
+ SG_CONSUME(sg, data32, data_i, data_l);
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_SHA_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+ tmp3 = 0;
+ CVMX_MF_HSH_IV(tmp3, 2);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ tmp3 |= 0x0000000080000000;
+ CVMX_MT_HSH_DAT(tmp3, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+ /* save the HMAC */
+ SG_INIT(sg, data32, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ icv_off -= 4;
+ }
+ CVMX_MF_HSH_IV(tmp1, 0);
+ *data32 = (uint32_t) (tmp1 >> 32);
+ SG_CONSUME(sg, data32, data_i, data_l);
+ *data32 = (uint32_t) tmp1;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *data32 = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+/****************************************************************************/
+/* AES MD5 */
+
+int
+octo_aes_cbc_md5_encrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ union {
+ uint32_t data32[2];
+ uint64_t data64[1];
+ } mydata[2];
+ uint64_t *pdata = &mydata[0].data64[0];
+ uint64_t *data = &mydata[1].data64[0];
+ uint32_t *data32;
+ uint64_t tmp1, tmp2;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+ (crypt_len & 0x7) ||
+ (auth_len & 0x7) ||
+ (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data32, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load AES Key */
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+ if (od->octo_encklen == 16) {
+ CVMX_MT_AES_KEY(0x0, 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 24) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 32) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+ CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+ /* Load MD5 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+ while (crypt_off > 0 && auth_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ crypt_off -= 4;
+ auth_off -= 4;
+ }
+
+ /* align auth and crypt */
+ while (crypt_off > 0 && auth_len > 0) {
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVM_LOAD_MD5_UNIT(*pdata, next);
+ crypt_off -= 8;
+ auth_len -= 8;
+ }
+
+ while (crypt_len > 0) {
+ uint32_t *pdata32[3];
+
+ pdata32[0] = data32;
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+
+ pdata32[1] = data32;
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+
+ pdata32[2] = data32;
+ mydata[1].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+
+ mydata[1].data32[1] = *data32;
+
+ CVMX_MT_AES_ENC_CBC0(*pdata);
+ CVMX_MT_AES_ENC_CBC1(*data);
+ CVMX_MF_AES_RESULT(*pdata, 0);
+ CVMX_MF_AES_RESULT(*data, 1);
+ crypt_len -= 16;
+
+ if (auth_len > 0) {
+ CVM_LOAD_MD5_UNIT(*pdata, next);
+ auth_len -= 8;
+ }
+ if (auth_len > 0) {
+ CVM_LOAD_MD5_UNIT(*data, next);
+ auth_len -= 8;
+ }
+
+ *pdata32[0] = mydata[0].data32[0];
+ *pdata32[1] = mydata[0].data32[1];
+ *pdata32[2] = mydata[1].data32[0];
+ *data32 = mydata[1].data32[1];
+
+ SG_CONSUME(sg, data32, data_i, data_l);
+ }
+
+ /* finish any left over hashing */
+ while (auth_len > 0) {
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVM_LOAD_MD5_UNIT(*pdata, next);
+ auth_len -= 8;
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_MD5_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVMX_ES64(tmp1, ((alen + 64) << 3));
+ CVM_LOAD_MD5_UNIT(tmp1, next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_ES64(tmp1, ((64 + 16) << 3));
+ CVMX_MT_HSH_STARTMD5(tmp1);
+
+ /* save the HMAC */
+ SG_INIT(sg, data32, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ icv_off -= 4;
+ }
+ CVMX_MF_HSH_IV(tmp1, 0);
+ *data32 = (uint32_t) (tmp1 >> 32);
+ SG_CONSUME(sg, data32, data_i, data_l);
+ *data32 = (uint32_t) tmp1;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *data32 = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+int
+octo_aes_cbc_md5_decrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ union {
+ uint32_t data32[2];
+ uint64_t data64[1];
+ } mydata[2];
+ uint64_t *pdata = &mydata[0].data64[0];
+ uint64_t *data = &mydata[1].data64[0];
+ uint32_t *data32;
+ uint64_t tmp1, tmp2;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+ (crypt_len & 0x7) ||
+ (auth_len & 0x7) ||
+ (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data32, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load AES Key */
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+ if (od->octo_encklen == 16) {
+ CVMX_MT_AES_KEY(0x0, 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 24) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 32) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+ CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+ /* Load MD5 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+ while (crypt_off > 0 && auth_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ crypt_off -= 4;
+ auth_off -= 4;
+ }
+
+ /* align auth and crypt */
+ while (crypt_off > 0 && auth_len > 0) {
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVM_LOAD_MD5_UNIT(*pdata, next);
+ crypt_off -= 8;
+ auth_len -= 8;
+ }
+
+ while (crypt_len > 0) {
+ uint32_t *pdata32[3];
+
+ pdata32[0] = data32;
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ pdata32[1] = data32;
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ pdata32[2] = data32;
+ mydata[1].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[1].data32[1] = *data32;
+
+ if (auth_len > 0) {
+ CVM_LOAD_MD5_UNIT(*pdata, next);
+ auth_len -= 8;
+ }
+
+ if (auth_len > 0) {
+ CVM_LOAD_MD5_UNIT(*data, next);
+ auth_len -= 8;
+ }
+
+ CVMX_MT_AES_DEC_CBC0(*pdata);
+ CVMX_MT_AES_DEC_CBC1(*data);
+ CVMX_MF_AES_RESULT(*pdata, 0);
+ CVMX_MF_AES_RESULT(*data, 1);
+ crypt_len -= 16;
+
+ *pdata32[0] = mydata[0].data32[0];
+ *pdata32[1] = mydata[0].data32[1];
+ *pdata32[2] = mydata[1].data32[0];
+ *data32 = mydata[1].data32[1];
+
+ SG_CONSUME(sg, data32, data_i, data_l);
+ }
+
+ /* finish left over hash if any */
+ while (auth_len > 0) {
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVM_LOAD_MD5_UNIT(*pdata, next);
+ auth_len -= 8;
+ }
+
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_MD5_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVMX_ES64(tmp1, ((alen + 64) << 3));
+ CVM_LOAD_MD5_UNIT(tmp1, next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_ES64(tmp1, ((64 + 16) << 3));
+ CVMX_MT_HSH_STARTMD5(tmp1);
+
+ /* save the HMAC */
+ SG_INIT(sg, data32, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ icv_off -= 4;
+ }
+ CVMX_MF_HSH_IV(tmp1, 0);
+ *data32 = (uint32_t) (tmp1 >> 32);
+ SG_CONSUME(sg, data32, data_i, data_l);
+ *data32 = (uint32_t) tmp1;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *data32 = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+/****************************************************************************/
+/* AES SHA1 */
+
+int
+octo_aes_cbc_sha1_encrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ union {
+ uint32_t data32[2];
+ uint64_t data64[1];
+ } mydata[2];
+ uint64_t *pdata = &mydata[0].data64[0];
+ uint64_t *data = &mydata[1].data64[0];
+ uint32_t *data32;
+ uint64_t tmp1, tmp2, tmp3;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s(a_off=%d a_len=%d c_off=%d c_len=%d icv_off=%d)\n",
+ __FUNCTION__, auth_off, auth_len, crypt_off, crypt_len, icv_off);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+ (crypt_len & 0x7) ||
+ (auth_len & 0x7) ||
+ (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data32, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load AES Key */
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+ if (od->octo_encklen == 16) {
+ CVMX_MT_AES_KEY(0x0, 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 24) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 32) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+ CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+ /* Load SHA IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+ while (crypt_off > 0 && auth_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ crypt_off -= 4;
+ auth_off -= 4;
+ }
+
+ /* align auth and crypt */
+ while (crypt_off > 0 && auth_len > 0) {
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVM_LOAD_SHA_UNIT(*pdata, next);
+ crypt_off -= 8;
+ auth_len -= 8;
+ }
+
+ while (crypt_len > 0) {
+ uint32_t *pdata32[3];
+
+ pdata32[0] = data32;
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ pdata32[1] = data32;
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ pdata32[2] = data32;
+ mydata[1].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[1].data32[1] = *data32;
+
+ CVMX_MT_AES_ENC_CBC0(*pdata);
+ CVMX_MT_AES_ENC_CBC1(*data);
+ CVMX_MF_AES_RESULT(*pdata, 0);
+ CVMX_MF_AES_RESULT(*data, 1);
+ crypt_len -= 16;
+
+ if (auth_len > 0) {
+ CVM_LOAD_SHA_UNIT(*pdata, next);
+ auth_len -= 8;
+ }
+ if (auth_len > 0) {
+ CVM_LOAD_SHA_UNIT(*data, next);
+ auth_len -= 8;
+ }
+
+ *pdata32[0] = mydata[0].data32[0];
+ *pdata32[1] = mydata[0].data32[1];
+ *pdata32[2] = mydata[1].data32[0];
+ *data32 = mydata[1].data32[1];
+
+ SG_CONSUME(sg, data32, data_i, data_l);
+ }
+
+ /* finish and hashing */
+ while (auth_len > 0) {
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVM_LOAD_SHA_UNIT(*pdata, next);
+ auth_len -= 8;
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_SHA_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+ tmp3 = 0;
+ CVMX_MF_HSH_IV(tmp3, 2);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ tmp3 |= 0x0000000080000000;
+ CVMX_MT_HSH_DAT(tmp3, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_MD5_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* save the HMAC */
+ SG_INIT(sg, data32, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ icv_off -= 4;
+ }
+ CVMX_MF_HSH_IV(tmp1, 0);
+ *data32 = (uint32_t) (tmp1 >> 32);
+ SG_CONSUME(sg, data32, data_i, data_l);
+ *data32 = (uint32_t) tmp1;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *data32 = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+int
+octo_aes_cbc_sha1_decrypt(
+ struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp)
+{
+ register int next = 0;
+ union {
+ uint32_t data32[2];
+ uint64_t data64[1];
+ } mydata[2];
+ uint64_t *pdata = &mydata[0].data64[0];
+ uint64_t *data = &mydata[1].data64[0];
+ uint32_t *data32;
+ uint64_t tmp1, tmp2, tmp3;
+ int data_i, data_l, alen = auth_len;
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ dprintk("%s(a_off=%d a_len=%d c_off=%d c_len=%d icv_off=%d)\n",
+ __FUNCTION__, auth_off, auth_len, crypt_off, crypt_len, icv_off);
+
+ if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+ (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+ (crypt_len & 0x7) ||
+ (auth_len & 0x7) ||
+ (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+ dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+ "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+ "icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ return -EINVAL;
+ }
+
+ SG_INIT(sg, data32, data_i, data_l);
+
+ CVMX_PREFETCH0(ivp);
+ CVMX_PREFETCH0(od->octo_enckey);
+
+ flags = octeon_crypto_enable(&state);
+
+ /* load AES Key */
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+ if (od->octo_encklen == 16) {
+ CVMX_MT_AES_KEY(0x0, 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 24) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(0x0, 3);
+ } else if (od->octo_encklen == 32) {
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+ CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+ } else {
+ octeon_crypto_disable(&state, flags);
+ dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+ return -EINVAL;
+ }
+ CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+ CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+ /* Load SHA1 IV */
+ CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+ while (crypt_off > 0 && auth_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ crypt_off -= 4;
+ auth_off -= 4;
+ }
+
+ /* align auth and crypt */
+ while (crypt_off > 0 && auth_len > 0) {
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVM_LOAD_SHA_UNIT(*pdata, next);
+ crypt_off -= 8;
+ auth_len -= 8;
+ }
+
+ while (crypt_len > 0) {
+ uint32_t *pdata32[3];
+
+ pdata32[0] = data32;
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ pdata32[1] = data32;
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ pdata32[2] = data32;
+ mydata[1].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[1].data32[1] = *data32;
+
+ if (auth_len > 0) {
+ CVM_LOAD_SHA_UNIT(*pdata, next);
+ auth_len -= 8;
+ }
+ if (auth_len > 0) {
+ CVM_LOAD_SHA_UNIT(*data, next);
+ auth_len -= 8;
+ }
+
+ CVMX_MT_AES_DEC_CBC0(*pdata);
+ CVMX_MT_AES_DEC_CBC1(*data);
+ CVMX_MF_AES_RESULT(*pdata, 0);
+ CVMX_MF_AES_RESULT(*data, 1);
+ crypt_len -= 16;
+
+ *pdata32[0] = mydata[0].data32[0];
+ *pdata32[1] = mydata[0].data32[1];
+ *pdata32[2] = mydata[1].data32[0];
+ *data32 = mydata[1].data32[1];
+
+ SG_CONSUME(sg, data32, data_i, data_l);
+ }
+
+ /* finish and leftover hashing */
+ while (auth_len > 0) {
+ mydata[0].data32[0] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ mydata[0].data32[1] = *data32;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVM_LOAD_SHA_UNIT(*pdata, next);
+ auth_len -= 8;
+ }
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_SHA_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* Finish Inner hash */
+ while (next != 7) {
+ CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+ }
+ CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+ /* Get the inner hash of HMAC */
+ CVMX_MF_HSH_IV(tmp1, 0);
+ CVMX_MF_HSH_IV(tmp2, 1);
+ tmp3 = 0;
+ CVMX_MF_HSH_IV(tmp3, 2);
+
+ /* Initialize hash unit */
+ CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+ CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+ CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+ CVMX_MT_HSH_DAT(tmp1, 0);
+ CVMX_MT_HSH_DAT(tmp2, 1);
+ tmp3 |= 0x0000000080000000;
+ CVMX_MT_HSH_DAT(tmp3, 2);
+ CVMX_MT_HSH_DATZ(3);
+ CVMX_MT_HSH_DATZ(4);
+ CVMX_MT_HSH_DATZ(5);
+ CVMX_MT_HSH_DATZ(6);
+ CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+
+ /* finish the hash */
+ CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+ if (unlikely(inplen)) {
+ uint64_t tmp = 0;
+ uint8_t *p = (uint8_t *) & tmp;
+ p[inplen] = 0x80;
+ do {
+ inplen--;
+ p[inplen] = ((uint8_t *) data)[inplen];
+ } while (inplen);
+ CVM_LOAD_MD5_UNIT(tmp, next);
+ } else {
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+ }
+#else
+ CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+ /* save the HMAC */
+ SG_INIT(sg, data32, data_i, data_l);
+ while (icv_off > 0) {
+ SG_CONSUME(sg, data32, data_i, data_l);
+ icv_off -= 4;
+ }
+ CVMX_MF_HSH_IV(tmp1, 0);
+ *data32 = (uint32_t) (tmp1 >> 32);
+ SG_CONSUME(sg, data32, data_i, data_l);
+ *data32 = (uint32_t) tmp1;
+ SG_CONSUME(sg, data32, data_i, data_l);
+ CVMX_MF_HSH_IV(tmp1, 1);
+ *data32 = (uint32_t) (tmp1 >> 32);
+
+ octeon_crypto_disable(&state, flags);
+ return 0;
+}
+
+/****************************************************************************/
diff --git a/target/linux/generic/files/crypto/ocf/cryptocteon/cryptocteon.c b/target/linux/generic/files/crypto/ocf/cryptocteon/cryptocteon.c
new file mode 100644
index 000000000..0168ad321
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/cryptocteon/cryptocteon.c
@@ -0,0 +1,576 @@
+/*
+ * Octeon Crypto for OCF
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2009-2010 David McCullough
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ * 1. distributions of this source code include the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ *
+ * 2. distributions in binary form include the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other associated materials;
+ *
+ * 3. the copyright holder's name is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+
+struct {
+ softc_device_decl sc_dev;
+} octo_softc;
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+struct octo_sess {
+ int octo_encalg;
+ #define MAX_CIPHER_KEYLEN 64
+ char octo_enckey[MAX_CIPHER_KEYLEN];
+ int octo_encklen;
+
+ int octo_macalg;
+ #define MAX_HASH_KEYLEN 64
+ char octo_mackey[MAX_HASH_KEYLEN];
+ int octo_macklen;
+ int octo_mackey_set;
+
+ int octo_mlen;
+ int octo_ivsize;
+
+ int (*octo_encrypt)(struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp);
+ int (*octo_decrypt)(struct octo_sess *od,
+ struct scatterlist *sg, int sg_len,
+ int auth_off, int auth_len,
+ int crypt_off, int crypt_len,
+ int icv_off, uint8_t *ivp);
+
+ uint64_t octo_hminner[3];
+ uint64_t octo_hmouter[3];
+};
+
+int32_t octo_id = -1;
+module_param(octo_id, int, 0444);
+MODULE_PARM_DESC(octo_id, "Read-Only OCF ID for cryptocteon driver");
+
+static struct octo_sess **octo_sessions = NULL;
+static u_int32_t octo_sesnum = 0;
+
+static int octo_process(device_t, struct cryptop *, int);
+static int octo_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int octo_freesession(device_t, u_int64_t);
+
+static device_method_t octo_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, octo_newsession),
+ DEVMETHOD(cryptodev_freesession,octo_freesession),
+ DEVMETHOD(cryptodev_process, octo_process),
+};
+
+#define debug octo_debug
+int octo_debug = 0;
+module_param(octo_debug, int, 0644);
+MODULE_PARM_DESC(octo_debug, "Enable debug");
+
+
+#include "cavium_crypto.c"
+
+
+/*
+ * Generate a new octo session. We artifically limit it to a single
+ * hash/cipher or hash-cipher combo just to make it easier, most callers
+ * do not expect more than this anyway.
+ */
+static int
+octo_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+ struct cryptoini *c, *encini = NULL, *macini = NULL;
+ struct octo_sess **ocd;
+ int i;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid == NULL || cri == NULL) {
+ dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ /*
+ * To keep it simple, we only handle hash, cipher or hash/cipher in a
+ * session, you cannot currently do multiple ciphers/hashes in one
+ * session even though it would be possibel to code this driver to
+ * handle it.
+ */
+ for (i = 0, c = cri; c && i < 2; i++) {
+ if (c->cri_alg == CRYPTO_MD5_HMAC ||
+ c->cri_alg == CRYPTO_SHA1_HMAC ||
+ c->cri_alg == CRYPTO_NULL_HMAC) {
+ if (macini) {
+ break;
+ }
+ macini = c;
+ }
+ if (c->cri_alg == CRYPTO_DES_CBC ||
+ c->cri_alg == CRYPTO_3DES_CBC ||
+ c->cri_alg == CRYPTO_AES_CBC ||
+ c->cri_alg == CRYPTO_NULL_CBC) {
+ if (encini) {
+ break;
+ }
+ encini = c;
+ }
+ c = c->cri_next;
+ }
+ if (!macini && !encini) {
+ dprintk("%s,%d - EINVAL bad cipher/hash or combination\n",
+ __FILE__, __LINE__);
+ return EINVAL;
+ }
+ if (c) {
+ dprintk("%s,%d - EINVAL cannot handle chained cipher/hash combos\n",
+ __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ /*
+ * So we have something we can do, lets setup the session
+ */
+
+ if (octo_sessions) {
+ for (i = 1; i < octo_sesnum; i++)
+ if (octo_sessions[i] == NULL)
+ break;
+ } else
+ i = 1; /* NB: to silence compiler warning */
+
+ if (octo_sessions == NULL || i == octo_sesnum) {
+ if (octo_sessions == NULL) {
+ i = 1; /* We leave octo_sessions[0] empty */
+ octo_sesnum = CRYPTO_SW_SESSIONS;
+ } else
+ octo_sesnum *= 2;
+
+ ocd = kmalloc(octo_sesnum * sizeof(struct octo_sess *), SLAB_ATOMIC);
+ if (ocd == NULL) {
+ /* Reset session number */
+ if (octo_sesnum == CRYPTO_SW_SESSIONS)
+ octo_sesnum = 0;
+ else
+ octo_sesnum /= 2;
+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ memset(ocd, 0, octo_sesnum * sizeof(struct octo_sess *));
+
+ /* Copy existing sessions */
+ if (octo_sessions) {
+ memcpy(ocd, octo_sessions,
+ (octo_sesnum / 2) * sizeof(struct octo_sess *));
+ kfree(octo_sessions);
+ }
+
+ octo_sessions = ocd;
+ }
+
+ ocd = &octo_sessions[i];
+ *sid = i;
+
+
+ *ocd = (struct octo_sess *) kmalloc(sizeof(struct octo_sess), SLAB_ATOMIC);
+ if (*ocd == NULL) {
+ octo_freesession(NULL, i);
+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ memset(*ocd, 0, sizeof(struct octo_sess));
+
+ if (encini && encini->cri_key) {
+ (*ocd)->octo_encklen = (encini->cri_klen + 7) / 8;
+ memcpy((*ocd)->octo_enckey, encini->cri_key, (*ocd)->octo_encklen);
+ }
+
+ if (macini && macini->cri_key) {
+ (*ocd)->octo_macklen = (macini->cri_klen + 7) / 8;
+ memcpy((*ocd)->octo_mackey, macini->cri_key, (*ocd)->octo_macklen);
+ }
+
+ (*ocd)->octo_mlen = 0;
+ if (encini && encini->cri_mlen)
+ (*ocd)->octo_mlen = encini->cri_mlen;
+ else if (macini && macini->cri_mlen)
+ (*ocd)->octo_mlen = macini->cri_mlen;
+ else
+ (*ocd)->octo_mlen = 12;
+
+ /*
+ * point c at the enc if it exists, otherwise the mac
+ */
+ c = encini ? encini : macini;
+
+ switch (c->cri_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ (*ocd)->octo_ivsize = 8;
+ switch (macini ? macini->cri_alg : -1) {
+ case CRYPTO_MD5_HMAC:
+ (*ocd)->octo_encrypt = octo_des_cbc_md5_encrypt;
+ (*ocd)->octo_decrypt = octo_des_cbc_md5_decrypt;
+ octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner,
+ (*ocd)->octo_hmouter);
+ break;
+ case CRYPTO_SHA1_HMAC:
+ (*ocd)->octo_encrypt = octo_des_cbc_sha1_encrypt;
+ (*ocd)->octo_decrypt = octo_des_cbc_sha1_decrypt;
+ octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner,
+ (*ocd)->octo_hmouter);
+ break;
+ case -1:
+ (*ocd)->octo_encrypt = octo_des_cbc_encrypt;
+ (*ocd)->octo_decrypt = octo_des_cbc_decrypt;
+ break;
+ default:
+ octo_freesession(NULL, i);
+ dprintk("%s,%d: EINVALn", __FILE__, __LINE__);
+ return EINVAL;
+ }
+ break;
+ case CRYPTO_AES_CBC:
+ (*ocd)->octo_ivsize = 16;
+ switch (macini ? macini->cri_alg : -1) {
+ case CRYPTO_MD5_HMAC:
+ (*ocd)->octo_encrypt = octo_aes_cbc_md5_encrypt;
+ (*ocd)->octo_decrypt = octo_aes_cbc_md5_decrypt;
+ octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner,
+ (*ocd)->octo_hmouter);
+ break;
+ case CRYPTO_SHA1_HMAC:
+ (*ocd)->octo_encrypt = octo_aes_cbc_sha1_encrypt;
+ (*ocd)->octo_decrypt = octo_aes_cbc_sha1_decrypt;
+ octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner,
+ (*ocd)->octo_hmouter);
+ break;
+ case -1:
+ (*ocd)->octo_encrypt = octo_aes_cbc_encrypt;
+ (*ocd)->octo_decrypt = octo_aes_cbc_decrypt;
+ break;
+ default:
+ octo_freesession(NULL, i);
+ dprintk("%s,%d: EINVALn", __FILE__, __LINE__);
+ return EINVAL;
+ }
+ break;
+ case CRYPTO_MD5_HMAC:
+ (*ocd)->octo_encrypt = octo_null_md5_encrypt;
+ (*ocd)->octo_decrypt = octo_null_md5_encrypt; /* encrypt == decrypt */
+ octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner,
+ (*ocd)->octo_hmouter);
+ break;
+ case CRYPTO_SHA1_HMAC:
+ (*ocd)->octo_encrypt = octo_null_sha1_encrypt;
+ (*ocd)->octo_decrypt = octo_null_sha1_encrypt; /* encrypt == decrypt */
+ octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner,
+ (*ocd)->octo_hmouter);
+ break;
+ default:
+ octo_freesession(NULL, i);
+ dprintk("%s,%d: EINVALn", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ (*ocd)->octo_encalg = encini ? encini->cri_alg : -1;
+ (*ocd)->octo_macalg = macini ? macini->cri_alg : -1;
+
+ return 0;
+}
+
+/*
+ * Free a session.
+ */
+static int
+octo_freesession(device_t dev, u_int64_t tid)
+{
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid > octo_sesnum || octo_sessions == NULL ||
+ octo_sessions[sid] == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return(EINVAL);
+ }
+
+ /* Silently accept and return */
+ if (sid == 0)
+ return(0);
+
+ if (octo_sessions[sid])
+ kfree(octo_sessions[sid]);
+ octo_sessions[sid] = NULL;
+ return 0;
+}
+
+/*
+ * Process a request.
+ */
+static int
+octo_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct cryptodesc *crd;
+ struct octo_sess *od;
+ u_int32_t lid;
+#define SCATTERLIST_MAX 16
+ struct scatterlist sg[SCATTERLIST_MAX];
+ int sg_num, sg_len;
+ struct sk_buff *skb = NULL;
+ struct uio *uiop = NULL;
+ struct cryptodesc *enccrd = NULL, *maccrd = NULL;
+ unsigned char *ivp = NULL;
+ unsigned char iv_data[HASH_MAX_LEN];
+ int auth_off = 0, auth_len = 0, crypt_off = 0, crypt_len = 0, icv_off = 0;
+
+ dprintk("%s()\n", __FUNCTION__);
+ /* Sanity check */
+ if (crp == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ crp->crp_etype = 0;
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ lid = crp->crp_sid & 0xffffffff;
+ if (lid >= octo_sesnum || lid == 0 || octo_sessions == NULL ||
+ octo_sessions[lid] == NULL) {
+ crp->crp_etype = ENOENT;
+ dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+ goto done;
+ }
+ od = octo_sessions[lid];
+
+ /*
+ * do some error checking outside of the loop for SKB and IOV processing
+ * this leaves us with valid skb or uiop pointers for later
+ */
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ skb = (struct sk_buff *) crp->crp_buf;
+ if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
+ printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
+ skb_shinfo(skb)->nr_frags);
+ goto done;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ uiop = (struct uio *) crp->crp_buf;
+ if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
+ printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
+ uiop->uio_iovcnt);
+ goto done;
+ }
+ }
+
+ /* point our enccrd and maccrd appropriately */
+ crd = crp->crp_desc;
+ if (crd->crd_alg == od->octo_encalg) enccrd = crd;
+ if (crd->crd_alg == od->octo_macalg) maccrd = crd;
+ crd = crd->crd_next;
+ if (crd) {
+ if (crd->crd_alg == od->octo_encalg) enccrd = crd;
+ if (crd->crd_alg == od->octo_macalg) maccrd = crd;
+ crd = crd->crd_next;
+ }
+ if (crd) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: ENOENT - descriptors do not match session\n",
+ __FILE__, __LINE__);
+ goto done;
+ }
+
+ if (enccrd) {
+ if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ ivp = enccrd->crd_iv;
+ else
+ read_random((ivp = iv_data), od->octo_ivsize);
+ if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ enccrd->crd_inject, od->octo_ivsize, ivp);
+ } else {
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+ ivp = enccrd->crd_iv;
+ } else {
+ ivp = iv_data;
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ enccrd->crd_inject, od->octo_ivsize, (caddr_t) ivp);
+ }
+ }
+
+ if (maccrd) {
+ auth_off = maccrd->crd_skip;
+ auth_len = maccrd->crd_len;
+ icv_off = maccrd->crd_inject;
+ }
+
+ crypt_off = enccrd->crd_skip;
+ crypt_len = enccrd->crd_len;
+ } else { /* if (maccrd) */
+ auth_off = maccrd->crd_skip;
+ auth_len = maccrd->crd_len;
+ icv_off = maccrd->crd_inject;
+ }
+
+
+ /*
+ * setup the SG list to cover the buffer
+ */
+ memset(sg, 0, sizeof(sg));
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ int i, len;
+
+ sg_num = 0;
+ sg_len = 0;
+
+ len = skb_headlen(skb);
+ sg_set_page(&sg[sg_num], virt_to_page(skb->data), len,
+ offset_in_page(skb->data));
+ sg_len += len;
+ sg_num++;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags && sg_num < SCATTERLIST_MAX;
+ i++) {
+ len = skb_shinfo(skb)->frags[i].size;
+ sg_set_page(&sg[sg_num], skb_frag_page(&skb_shinfo(skb)->frags[i]),
+ len, skb_shinfo(skb)->frags[i].page_offset);
+ sg_len += len;
+ sg_num++;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ int len;
+
+ sg_len = 0;
+ for (sg_num = 0; sg_len < crp->crp_ilen &&
+ sg_num < uiop->uio_iovcnt &&
+ sg_num < SCATTERLIST_MAX; sg_num++) {
+ len = uiop->uio_iov[sg_num].iov_len;
+ sg_set_page(&sg[sg_num],
+ virt_to_page(uiop->uio_iov[sg_num].iov_base), len,
+ offset_in_page(uiop->uio_iov[sg_num].iov_base));
+ sg_len += len;
+ }
+ } else {
+ sg_len = crp->crp_ilen;
+ sg_set_page(&sg[0], virt_to_page(crp->crp_buf), sg_len,
+ offset_in_page(crp->crp_buf));
+ sg_num = 1;
+ }
+ if (sg_num > 0)
+ sg_mark_end(&sg[sg_num-1]);
+
+ /*
+ * setup a new explicit key
+ */
+ if (enccrd) {
+ if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ od->octo_encklen = (enccrd->crd_klen + 7) / 8;
+ memcpy(od->octo_enckey, enccrd->crd_key, od->octo_encklen);
+ }
+ }
+ if (maccrd) {
+ if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ od->octo_macklen = (maccrd->crd_klen + 7) / 8;
+ memcpy(od->octo_mackey, maccrd->crd_key, od->octo_macklen);
+ od->octo_mackey_set = 0;
+ }
+ if (!od->octo_mackey_set) {
+ octo_calc_hash(maccrd->crd_alg == CRYPTO_MD5_HMAC ? 0 : 1,
+ maccrd->crd_key, od->octo_hminner, od->octo_hmouter);
+ od->octo_mackey_set = 1;
+ }
+ }
+
+
+ if (!enccrd || (enccrd->crd_flags & CRD_F_ENCRYPT))
+ (*od->octo_encrypt)(od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ else
+ (*od->octo_decrypt)(od, sg, sg_len,
+ auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+
+done:
+ crypto_done(crp);
+ return 0;
+}
+
+static int
+cryptocteon_init(void)
+{
+ dprintk("%s(%p)\n", __FUNCTION__, cryptocteon_init);
+
+ softc_device_init(&octo_softc, "cryptocteon", 0, octo_methods);
+
+ octo_id = crypto_get_driverid(softc_get_device(&octo_softc),
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC);
+ if (octo_id < 0) {
+ printk("Cryptocteon device cannot initialize!");
+ return -ENODEV;
+ }
+
+ crypto_register(octo_id, CRYPTO_MD5_HMAC, 0,0);
+ crypto_register(octo_id, CRYPTO_SHA1_HMAC, 0,0);
+ //crypto_register(octo_id, CRYPTO_MD5, 0,0);
+ //crypto_register(octo_id, CRYPTO_SHA1, 0,0);
+ crypto_register(octo_id, CRYPTO_DES_CBC, 0,0);
+ crypto_register(octo_id, CRYPTO_3DES_CBC, 0,0);
+ crypto_register(octo_id, CRYPTO_AES_CBC, 0,0);
+
+ return(0);
+}
+
+static void
+cryptocteon_exit(void)
+{
+ dprintk("%s()\n", __FUNCTION__);
+ crypto_unregister_all(octo_id);
+ octo_id = -1;
+}
+
+module_init(cryptocteon_init);
+module_exit(cryptocteon_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("Cryptocteon (OCF module for Cavium OCTEON crypto)");
diff --git a/target/linux/generic/files/crypto/ocf/cryptodev.c b/target/linux/generic/files/crypto/ocf/cryptodev.c
new file mode 100644
index 000000000..2ee3618cd
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/cryptodev.c
@@ -0,0 +1,1069 @@
+/* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */
+
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 2001 Theo de Raadt
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/unistd.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+
+extern asmlinkage long sys_dup(unsigned int fildes);
+
+#define debug cryptodev_debug
+int cryptodev_debug = 0;
+module_param(cryptodev_debug, int, 0644);
+MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
+
+struct csession_info {
+ u_int16_t blocksize;
+ u_int16_t minkey, maxkey;
+
+ u_int16_t keysize;
+ /* u_int16_t hashsize; */
+ u_int16_t authsize;
+ u_int16_t authkey;
+ /* u_int16_t ctxsize; */
+};
+
+struct csession {
+ struct list_head list;
+ u_int64_t sid;
+ u_int32_t ses;
+
+ wait_queue_head_t waitq;
+
+ u_int32_t cipher;
+
+ u_int32_t mac;
+
+ caddr_t key;
+ int keylen;
+ u_char tmp_iv[EALG_MAX_BLOCK_LEN];
+
+ caddr_t mackey;
+ int mackeylen;
+
+ struct csession_info info;
+
+ struct iovec iovec;
+ struct uio uio;
+ int error;
+};
+
+struct fcrypt {
+ struct list_head csessions;
+ int sesn;
+};
+
+static struct csession *csefind(struct fcrypt *, u_int);
+static int csedelete(struct fcrypt *, struct csession *);
+static struct csession *cseadd(struct fcrypt *, struct csession *);
+static struct csession *csecreate(struct fcrypt *, u_int64_t,
+ struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
+static int csefree(struct csession *);
+
+static int cryptodev_op(struct csession *, struct crypt_op *);
+static int cryptodev_key(struct crypt_kop *);
+static int cryptodev_find(struct crypt_find_op *);
+
+static int cryptodev_cb(void *);
+static int cryptodev_open(struct inode *inode, struct file *filp);
+
+/*
+ * Check a crypto identifier to see if it requested
+ * a valid crid and it's capabilities match.
+ */
+static int
+checkcrid(int crid)
+{
+ int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
+ int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
+ int caps = 0;
+
+ /* if the user hasn't selected a driver, then just call newsession */
+ if (hid == 0 && typ != 0)
+ return 0;
+
+ caps = crypto_getcaps(hid);
+
+ /* didn't find anything with capabilities */
+ if (caps == 0) {
+ dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
+ return EINVAL;
+ }
+
+ /* the user didn't specify SW or HW, so the driver is ok */
+ if (typ == 0)
+ return 0;
+
+ /* if the type specified didn't match */
+ if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
+ dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
+ hid, typ, caps);
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cryptodev_op(struct csession *cse, struct crypt_op *cop)
+{
+ struct cryptop *crp = NULL;
+ struct cryptodesc *crde = NULL, *crda = NULL;
+ int error = 0;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (cop->len > CRYPTO_MAX_DATA_LEN) {
+ dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
+ return (E2BIG);
+ }
+
+ if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
+ dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
+ cop->len);
+ return (EINVAL);
+ }
+
+ cse->uio.uio_iov = &cse->iovec;
+ cse->uio.uio_iovcnt = 1;
+ cse->uio.uio_offset = 0;
+#if 0
+ cse->uio.uio_resid = cop->len;
+ cse->uio.uio_segflg = UIO_SYSSPACE;
+ cse->uio.uio_rw = UIO_WRITE;
+ cse->uio.uio_td = td;
+#endif
+ cse->uio.uio_iov[0].iov_len = cop->len;
+ if (cse->info.authsize)
+ cse->uio.uio_iov[0].iov_len += cse->info.authsize;
+ cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
+ GFP_KERNEL);
+
+ if (cse->uio.uio_iov[0].iov_base == NULL) {
+ dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
+ (int)cse->uio.uio_iov[0].iov_len);
+ return (ENOMEM);
+ }
+
+ crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
+ if (crp == NULL) {
+ dprintk("%s: ENOMEM\n", __FUNCTION__);
+ error = ENOMEM;
+ goto bail;
+ }
+
+ if (cse->info.authsize && cse->info.blocksize) {
+ if (cop->op == COP_ENCRYPT) {
+ crde = crp->crp_desc;
+ crda = crde->crd_next;
+ } else {
+ crda = crp->crp_desc;
+ crde = crda->crd_next;
+ }
+ } else if (cse->info.authsize) {
+ crda = crp->crp_desc;
+ } else if (cse->info.blocksize) {
+ crde = crp->crp_desc;
+ } else {
+ dprintk("%s: bad request\n", __FUNCTION__);
+ error = EINVAL;
+ goto bail;
+ }
+
+ if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
+ cop->len))) {
+ dprintk("%s: bad copy\n", __FUNCTION__);
+ goto bail;
+ }
+
+ if (crda) {
+ crda->crd_skip = 0;
+ crda->crd_len = cop->len;
+ crda->crd_inject = cop->len;
+
+ crda->crd_alg = cse->mac;
+ crda->crd_key = cse->mackey;
+ crda->crd_klen = cse->mackeylen * 8;
+ }
+
+ if (crde) {
+ if (cop->op == COP_ENCRYPT)
+ crde->crd_flags |= CRD_F_ENCRYPT;
+ else
+ crde->crd_flags &= ~CRD_F_ENCRYPT;
+ crde->crd_len = cop->len;
+ crde->crd_inject = 0;
+
+ crde->crd_alg = cse->cipher;
+ crde->crd_key = cse->key;
+ crde->crd_klen = cse->keylen * 8;
+ }
+
+ crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
+ crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
+ | (cop->flags & COP_F_BATCH);
+ crp->crp_buf = (caddr_t)&cse->uio;
+ crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
+ crp->crp_sid = cse->sid;
+ crp->crp_opaque = (void *)cse;
+
+ if (cop->iv) {
+ if (crde == NULL) {
+ error = EINVAL;
+ dprintk("%s no crde\n", __FUNCTION__);
+ goto bail;
+ }
+ if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+ error = EINVAL;
+ dprintk("%s arc4 with IV\n", __FUNCTION__);
+ goto bail;
+ }
+ if ((error = copy_from_user(cse->tmp_iv, cop->iv,
+ cse->info.blocksize))) {
+ dprintk("%s bad iv copy\n", __FUNCTION__);
+ goto bail;
+ }
+ memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
+ crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
+ crde->crd_skip = 0;
+ } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+ crde->crd_skip = 0;
+ } else if (crde) {
+ crde->crd_flags |= CRD_F_IV_PRESENT;
+ crde->crd_skip = cse->info.blocksize;
+ crde->crd_len -= cse->info.blocksize;
+ }
+
+ if (cop->mac && crda == NULL) {
+ error = EINVAL;
+ dprintk("%s no crda\n", __FUNCTION__);
+ goto bail;
+ }
+
+ /*
+ * Let the dispatch run unlocked, then, interlock against the
+ * callback before checking if the operation completed and going
+ * to sleep. This insures drivers don't inherit our lock which
+ * results in a lock order reversal between crypto_dispatch forced
+ * entry and the crypto_done callback into us.
+ */
+ error = crypto_dispatch(crp);
+ if (error) {
+ dprintk("%s error in crypto_dispatch\n", __FUNCTION__);
+ goto bail;
+ }
+
+ dprintk("%s about to WAIT\n", __FUNCTION__);
+ /*
+ * we really need to wait for driver to complete to maintain
+ * state, luckily interrupts will be remembered
+ */
+ do {
+ error = wait_event_interruptible(crp->crp_waitq,
+ ((crp->crp_flags & CRYPTO_F_DONE) != 0));
+ /*
+ * we can't break out of this loop or we will leave behind
+ * a huge mess, however, staying here means if your driver
+ * is broken user applications can hang and not be killed.
+ * The solution, fix your driver :-)
+ */
+ if (error) {
+ schedule();
+ error = 0;
+ }
+ } while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
+ dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
+
+ if (crp->crp_etype != 0) {
+ error = crp->crp_etype;
+ dprintk("%s error in crp processing\n", __FUNCTION__);
+ goto bail;
+ }
+
+ if (cse->error) {
+ error = cse->error;
+ dprintk("%s error in cse processing\n", __FUNCTION__);
+ goto bail;
+ }
+
+ if (cop->dst && (error = copy_to_user(cop->dst,
+ cse->uio.uio_iov[0].iov_base, cop->len))) {
+ dprintk("%s bad dst copy\n", __FUNCTION__);
+ goto bail;
+ }
+
+ if (cop->mac &&
+ (error=copy_to_user(cop->mac,
+ (caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
+ cse->info.authsize))) {
+ dprintk("%s bad mac copy\n", __FUNCTION__);
+ goto bail;
+ }
+
+bail:
+ if (crp)
+ crypto_freereq(crp);
+ if (cse->uio.uio_iov[0].iov_base)
+ kfree(cse->uio.uio_iov[0].iov_base);
+
+ return (error);
+}
+
+static int
+cryptodev_cb(void *op)
+{
+ struct cryptop *crp = (struct cryptop *) op;
+ struct csession *cse = (struct csession *)crp->crp_opaque;
+ int error;
+
+ dprintk("%s()\n", __FUNCTION__);
+ error = crp->crp_etype;
+ if (error == EAGAIN) {
+ crp->crp_flags &= ~CRYPTO_F_DONE;
+#ifdef NOTYET
+ /*
+ * DAVIDM I am fairly sure that we should turn this into a batch
+ * request to stop bad karma/lockup, revisit
+ */
+ crp->crp_flags |= CRYPTO_F_BATCH;
+#endif
+ return crypto_dispatch(crp);
+ }
+ if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
+ cse->error = error;
+ wake_up_interruptible(&crp->crp_waitq);
+ }
+ return (0);
+}
+
+static int
+cryptodevkey_cb(void *op)
+{
+ struct cryptkop *krp = (struct cryptkop *) op;
+ dprintk("%s()\n", __FUNCTION__);
+ wake_up_interruptible(&krp->krp_waitq);
+ return (0);
+}
+
+static int
+cryptodev_key(struct crypt_kop *kop)
+{
+ struct cryptkop *krp = NULL;
+ int error = EINVAL;
+ int in, out, size, i;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
+ dprintk("%s params too big\n", __FUNCTION__);
+ return (EFBIG);
+ }
+
+ in = kop->crk_iparams;
+ out = kop->crk_oparams;
+ switch (kop->crk_op) {
+ case CRK_MOD_EXP:
+ if (in == 3 && out == 1)
+ break;
+ return (EINVAL);
+ case CRK_MOD_EXP_CRT:
+ if (in == 6 && out == 1)
+ break;
+ return (EINVAL);
+ case CRK_DSA_SIGN:
+ if (in == 5 && out == 2)
+ break;
+ return (EINVAL);
+ case CRK_DSA_VERIFY:
+ if (in == 7 && out == 0)
+ break;
+ return (EINVAL);
+ case CRK_DH_COMPUTE_KEY:
+ if (in == 3 && out == 1)
+ break;
+ return (EINVAL);
+ default:
+ return (EINVAL);
+ }
+
+ krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
+ if (!krp)
+ return (ENOMEM);
+ bzero(krp, sizeof *krp);
+ krp->krp_op = kop->crk_op;
+ krp->krp_status = kop->crk_status;
+ krp->krp_iparams = kop->crk_iparams;
+ krp->krp_oparams = kop->crk_oparams;
+ krp->krp_crid = kop->crk_crid;
+ krp->krp_status = 0;
+ krp->krp_flags = CRYPTO_KF_CBIMM;
+ krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
+ init_waitqueue_head(&krp->krp_waitq);
+
+ for (i = 0; i < CRK_MAXPARAM; i++)
+ krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
+ for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
+ size = (krp->krp_param[i].crp_nbits + 7) / 8;
+ if (size == 0)
+ continue;
+ krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
+ if (i >= krp->krp_iparams)
+ continue;
+ error = copy_from_user(krp->krp_param[i].crp_p,
+ kop->crk_param[i].crp_p, size);
+ if (error)
+ goto fail;
+ }
+
+ error = crypto_kdispatch(krp);
+ if (error)
+ goto fail;
+
+ do {
+ error = wait_event_interruptible(krp->krp_waitq,
+ ((krp->krp_flags & CRYPTO_KF_DONE) != 0));
+ /*
+ * we can't break out of this loop or we will leave behind
+ * a huge mess, however, staying here means if your driver
+ * is broken user applications can hang and not be killed.
+ * The solution, fix your driver :-)
+ */
+ if (error) {
+ schedule();
+ error = 0;
+ }
+ } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
+
+ dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
+
+ kop->crk_crid = krp->krp_crid; /* device that did the work */
+ if (krp->krp_status != 0) {
+ error = krp->krp_status;
+ goto fail;
+ }
+
+ for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
+ size = (krp->krp_param[i].crp_nbits + 7) / 8;
+ if (size == 0)
+ continue;
+ error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
+ size);
+ if (error)
+ goto fail;
+ }
+
+fail:
+ if (krp) {
+ kop->crk_status = krp->krp_status;
+ for (i = 0; i < CRK_MAXPARAM; i++) {
+ if (krp->krp_param[i].crp_p)
+ kfree(krp->krp_param[i].crp_p);
+ }
+ kfree(krp);
+ }
+ return (error);
+}
+
+static int
+cryptodev_find(struct crypt_find_op *find)
+{
+ device_t dev;
+
+ if (find->crid != -1) {
+ dev = crypto_find_device_byhid(find->crid);
+ if (dev == NULL)
+ return (ENOENT);
+ strlcpy(find->name, device_get_nameunit(dev),
+ sizeof(find->name));
+ } else {
+ find->crid = crypto_find_driver(find->name);
+ if (find->crid == -1)
+ return (ENOENT);
+ }
+ return (0);
+}
+
+static struct csession *
+csefind(struct fcrypt *fcr, u_int ses)
+{
+ struct csession *cse;
+
+ dprintk("%s()\n", __FUNCTION__);
+ list_for_each_entry(cse, &fcr->csessions, list)
+ if (cse->ses == ses)
+ return (cse);
+ return (NULL);
+}
+
+static int
+csedelete(struct fcrypt *fcr, struct csession *cse_del)
+{
+ struct csession *cse;
+
+ dprintk("%s()\n", __FUNCTION__);
+ list_for_each_entry(cse, &fcr->csessions, list) {
+ if (cse == cse_del) {
+ list_del(&cse->list);
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static struct csession *
+cseadd(struct fcrypt *fcr, struct csession *cse)
+{
+ dprintk("%s()\n", __FUNCTION__);
+ list_add_tail(&cse->list, &fcr->csessions);
+ cse->ses = fcr->sesn++;
+ return (cse);
+}
+
+static struct csession *
+csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
+ struct cryptoini *cria, struct csession_info *info)
+{
+ struct csession *cse;
+
+ dprintk("%s()\n", __FUNCTION__);
+ cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
+ if (cse == NULL)
+ return NULL;
+ memset(cse, 0, sizeof(struct csession));
+
+ INIT_LIST_HEAD(&cse->list);
+ init_waitqueue_head(&cse->waitq);
+
+ cse->key = crie->cri_key;
+ cse->keylen = crie->cri_klen/8;
+ cse->mackey = cria->cri_key;
+ cse->mackeylen = cria->cri_klen/8;
+ cse->sid = sid;
+ cse->cipher = crie->cri_alg;
+ cse->mac = cria->cri_alg;
+ cse->info = *info;
+ cseadd(fcr, cse);
+ return (cse);
+}
+
+static int
+csefree(struct csession *cse)
+{
+ int error;
+
+ dprintk("%s()\n", __FUNCTION__);
+ error = crypto_freesession(cse->sid);
+ if (cse->key)
+ kfree(cse->key);
+ if (cse->mackey)
+ kfree(cse->mackey);
+ kfree(cse);
+ return(error);
+}
+
+static int
+cryptodev_ioctl(
+ struct inode *inode,
+ struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct cryptoini cria, crie;
+ struct fcrypt *fcr = filp->private_data;
+ struct csession *cse;
+ struct csession_info info;
+ struct session2_op sop;
+ struct crypt_op cop;
+ struct crypt_kop kop;
+ struct crypt_find_op fop;
+ u_int64_t sid;
+ u_int32_t ses = 0;
+ int feat, fd, error = 0, crid;
+ mm_segment_t fs;
+
+ dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
+
+ switch (cmd) {
+
+ case CRIOGET: {
+ dprintk("%s(CRIOGET)\n", __FUNCTION__);
+ fs = get_fs();
+ set_fs(get_ds());
+ for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
+ if (files_fdtable(current->files)->fd[fd] == filp)
+ break;
+ fd = sys_dup(fd);
+ set_fs(fs);
+ put_user(fd, (int *) arg);
+ return IS_ERR_VALUE(fd) ? fd : 0;
+ }
+
+#define CIOCGSESSSTR (cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
+ case CIOCGSESSION:
+ case CIOCGSESSION2:
+ dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
+ memset(&crie, 0, sizeof(crie));
+ memset(&cria, 0, sizeof(cria));
+ memset(&info, 0, sizeof(info));
+ memset(&sop, 0, sizeof(sop));
+
+ if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
+ sizeof(struct session_op) : sizeof(sop))) {
+ dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
+ error = EFAULT;
+ goto bail;
+ }
+
+ switch (sop.cipher) {
+ case 0:
+ dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
+ break;
+ case CRYPTO_NULL_CBC:
+ info.blocksize = NULL_BLOCK_LEN;
+ info.minkey = NULL_MIN_KEY_LEN;
+ info.maxkey = NULL_MAX_KEY_LEN;
+ break;
+ case CRYPTO_DES_CBC:
+ info.blocksize = DES_BLOCK_LEN;
+ info.minkey = DES_MIN_KEY_LEN;
+ info.maxkey = DES_MAX_KEY_LEN;
+ break;
+ case CRYPTO_3DES_CBC:
+ info.blocksize = DES3_BLOCK_LEN;
+ info.minkey = DES3_MIN_KEY_LEN;
+ info.maxkey = DES3_MAX_KEY_LEN;
+ break;
+ case CRYPTO_BLF_CBC:
+ info.blocksize = BLOWFISH_BLOCK_LEN;
+ info.minkey = BLOWFISH_MIN_KEY_LEN;
+ info.maxkey = BLOWFISH_MAX_KEY_LEN;
+ break;
+ case CRYPTO_CAST_CBC:
+ info.blocksize = CAST128_BLOCK_LEN;
+ info.minkey = CAST128_MIN_KEY_LEN;
+ info.maxkey = CAST128_MAX_KEY_LEN;
+ break;
+ case CRYPTO_SKIPJACK_CBC:
+ info.blocksize = SKIPJACK_BLOCK_LEN;
+ info.minkey = SKIPJACK_MIN_KEY_LEN;
+ info.maxkey = SKIPJACK_MAX_KEY_LEN;
+ break;
+ case CRYPTO_AES_CBC:
+ info.blocksize = AES_BLOCK_LEN;
+ info.minkey = AES_MIN_KEY_LEN;
+ info.maxkey = AES_MAX_KEY_LEN;
+ break;
+ case CRYPTO_ARC4:
+ info.blocksize = ARC4_BLOCK_LEN;
+ info.minkey = ARC4_MIN_KEY_LEN;
+ info.maxkey = ARC4_MAX_KEY_LEN;
+ break;
+ case CRYPTO_CAMELLIA_CBC:
+ info.blocksize = CAMELLIA_BLOCK_LEN;
+ info.minkey = CAMELLIA_MIN_KEY_LEN;
+ info.maxkey = CAMELLIA_MAX_KEY_LEN;
+ break;
+ default:
+ dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
+ error = EINVAL;
+ goto bail;
+ }
+
+ switch (sop.mac) {
+ case 0:
+ dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
+ break;
+ case CRYPTO_NULL_HMAC:
+ info.authsize = NULL_HASH_LEN;
+ break;
+ case CRYPTO_MD5:
+ info.authsize = MD5_HASH_LEN;
+ break;
+ case CRYPTO_SHA1:
+ info.authsize = SHA1_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_256:
+ info.authsize = SHA2_256_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_384:
+ info.authsize = SHA2_384_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_512:
+ info.authsize = SHA2_512_HASH_LEN;
+ break;
+ case CRYPTO_RIPEMD160:
+ info.authsize = RIPEMD160_HASH_LEN;
+ break;
+ case CRYPTO_MD5_HMAC:
+ info.authsize = MD5_HASH_LEN;
+ info.authkey = 16;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ info.authsize = SHA1_HASH_LEN;
+ info.authkey = 20;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ info.authsize = SHA2_256_HASH_LEN;
+ info.authkey = 32;
+ break;
+ case CRYPTO_SHA2_384_HMAC:
+ info.authsize = SHA2_384_HASH_LEN;
+ info.authkey = 48;
+ break;
+ case CRYPTO_SHA2_512_HMAC:
+ info.authsize = SHA2_512_HASH_LEN;
+ info.authkey = 64;
+ break;
+ case CRYPTO_RIPEMD160_HMAC:
+ info.authsize = RIPEMD160_HASH_LEN;
+ info.authkey = 20;
+ break;
+ default:
+ dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
+ error = EINVAL;
+ goto bail;
+ }
+
+ if (info.blocksize) {
+ crie.cri_alg = sop.cipher;
+ crie.cri_klen = sop.keylen * 8;
+ if ((info.maxkey && sop.keylen > info.maxkey) ||
+ sop.keylen < info.minkey) {
+ dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
+ error = EINVAL;
+ goto bail;
+ }
+
+ crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
+ if (copy_from_user(crie.cri_key, sop.key,
+ crie.cri_klen/8)) {
+ dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
+ error = EFAULT;
+ goto bail;
+ }
+ if (info.authsize)
+ crie.cri_next = &cria;
+ }
+
+ if (info.authsize) {
+ cria.cri_alg = sop.mac;
+ cria.cri_klen = sop.mackeylen * 8;
+ if (info.authkey && sop.mackeylen != info.authkey) {
+ dprintk("%s(%s) - mackeylen %d != %d\n", __FUNCTION__,
+ CIOCGSESSSTR, sop.mackeylen, info.authkey);
+ error = EINVAL;
+ goto bail;
+ }
+
+ if (cria.cri_klen) {
+ cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
+ if (copy_from_user(cria.cri_key, sop.mackey,
+ cria.cri_klen / 8)) {
+ dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
+ error = EFAULT;
+ goto bail;
+ }
+ }
+ }
+
+ /* NB: CIOGSESSION2 has the crid */
+ if (cmd == CIOCGSESSION2) {
+ crid = sop.crid;
+ error = checkcrid(crid);
+ if (error) {
+ dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
+ CIOCGSESSSTR, error);
+ goto bail;
+ }
+ } else {
+ /* allow either HW or SW to be used */
+ crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
+ }
+ error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
+ if (error) {
+ dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
+ goto bail;
+ }
+
+ cse = csecreate(fcr, sid, &crie, &cria, &info);
+ if (cse == NULL) {
+ crypto_freesession(sid);
+ error = EINVAL;
+ dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
+ goto bail;
+ }
+ sop.ses = cse->ses;
+
+ if (cmd == CIOCGSESSION2) {
+ /* return hardware/driver id */
+ sop.crid = CRYPTO_SESID2HID(cse->sid);
+ }
+
+ if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
+ sizeof(struct session_op) : sizeof(sop))) {
+ dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
+ error = EFAULT;
+ }
+bail:
+ if (error) {
+ dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
+ if (crie.cri_key)
+ kfree(crie.cri_key);
+ if (cria.cri_key)
+ kfree(cria.cri_key);
+ }
+ break;
+ case CIOCFSESSION:
+ dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
+ get_user(ses, (uint32_t*)arg);
+ cse = csefind(fcr, ses);
+ if (cse == NULL) {
+ error = EINVAL;
+ dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
+ break;
+ }
+ csedelete(fcr, cse);
+ error = csefree(cse);
+ break;
+ case CIOCCRYPT:
+ dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
+ if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
+ dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
+ error = EFAULT;
+ goto bail;
+ }
+ cse = csefind(fcr, cop.ses);
+ if (cse == NULL) {
+ error = EINVAL;
+ dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
+ break;
+ }
+ error = cryptodev_op(cse, &cop);
+ if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
+ dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
+ error = EFAULT;
+ goto bail;
+ }
+ break;
+ case CIOCKEY:
+ case CIOCKEY2:
+ dprintk("%s(CIOCKEY)\n", __FUNCTION__);
+ if (!crypto_userasymcrypto)
+ return (EPERM); /* XXX compat? */
+ if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
+ dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
+ error = EFAULT;
+ goto bail;
+ }
+ if (cmd == CIOCKEY) {
+ /* NB: crypto core enforces s/w driver use */
+ kop.crk_crid =
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
+ }
+ error = cryptodev_key(&kop);
+ if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
+ dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
+ error = EFAULT;
+ goto bail;
+ }
+ break;
+ case CIOCASYMFEAT:
+ dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
+ if (!crypto_userasymcrypto) {
+ /*
+ * NB: if user asym crypto operations are
+ * not permitted return "no algorithms"
+ * so well-behaved applications will just
+ * fallback to doing them in software.
+ */
+ feat = 0;
+ } else
+ error = crypto_getfeat(&feat);
+ if (!error) {
+ error = copy_to_user((void*)arg, &feat, sizeof(feat));
+ }
+ break;
+ case CIOCFINDDEV:
+ if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
+ dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
+ error = EFAULT;
+ goto bail;
+ }
+ error = cryptodev_find(&fop);
+ if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
+ dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
+ error = EFAULT;
+ goto bail;
+ }
+ break;
+ default:
+ dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
+ error = EINVAL;
+ break;
+ }
+ return(-error);
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long
+cryptodev_unlocked_ioctl(
+ struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return cryptodev_ioctl(NULL, filp, cmd, arg);
+}
+#endif
+
+static int
+cryptodev_open(struct inode *inode, struct file *filp)
+{
+ struct fcrypt *fcr;
+
+ dprintk("%s()\n", __FUNCTION__);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+ /*
+ * on 2.6.35 private_data points to a miscdevice structure, we override
+ * it, which is currently safe to do.
+ */
+ if (filp->private_data) {
+ printk("cryptodev: Private data already exists - %p!\n", filp->private_data);
+ return(-ENODEV);
+ }
+#endif
+
+ fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
+ if (!fcr) {
+ dprintk("%s() - malloc failed\n", __FUNCTION__);
+ return(-ENOMEM);
+ }
+ memset(fcr, 0, sizeof(*fcr));
+
+ INIT_LIST_HEAD(&fcr->csessions);
+ filp->private_data = fcr;
+ return(0);
+}
+
+static int
+cryptodev_release(struct inode *inode, struct file *filp)
+{
+ struct fcrypt *fcr = filp->private_data;
+ struct csession *cse, *tmp;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (!filp) {
+ printk("cryptodev: No private data on release\n");
+ return(0);
+ }
+
+ list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
+ list_del(&cse->list);
+ (void)csefree(cse);
+ }
+ filp->private_data = NULL;
+ kfree(fcr);
+ return(0);
+}
+
+static struct file_operations cryptodev_fops = {
+ .owner = THIS_MODULE,
+ .open = cryptodev_open,
+ .release = cryptodev_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+ .ioctl = cryptodev_ioctl,
+#endif
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = cryptodev_unlocked_ioctl,
+#endif
+};
+
+static struct miscdevice cryptodev = {
+ .minor = CRYPTODEV_MINOR,
+ .name = "crypto",
+ .fops = &cryptodev_fops,
+};
+
+static int __init
+cryptodev_init(void)
+{
+ int rc;
+
+ dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
+ rc = misc_register(&cryptodev);
+ if (rc) {
+ printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
+ return(rc);
+ }
+
+ return(0);
+}
+
+static void __exit
+cryptodev_exit(void)
+{
+ dprintk("%s()\n", __FUNCTION__);
+ misc_deregister(&cryptodev);
+}
+
+module_init(cryptodev_init);
+module_exit(cryptodev_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
diff --git a/target/linux/generic/files/crypto/ocf/cryptodev.h b/target/linux/generic/files/crypto/ocf/cryptodev.h
new file mode 100644
index 000000000..cca0ec822
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/cryptodev.h
@@ -0,0 +1,480 @@
+/* $FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $ */
+/* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */
+
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ *
+ * Copyright (c) 2001 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef _CRYPTO_CRYPTO_H_
+#define _CRYPTO_CRYPTO_H_
+
+/* Some initial values */
+#define CRYPTO_DRIVERS_INITIAL 4
+#define CRYPTO_SW_SESSIONS 32
+
+/* Hash values */
+#define NULL_HASH_LEN 0
+#define MD5_HASH_LEN 16
+#define SHA1_HASH_LEN 20
+#define RIPEMD160_HASH_LEN 20
+#define SHA2_256_HASH_LEN 32
+#define SHA2_384_HASH_LEN 48
+#define SHA2_512_HASH_LEN 64
+#define MD5_KPDK_HASH_LEN 16
+#define SHA1_KPDK_HASH_LEN 20
+/* Maximum hash algorithm result length */
+#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */
+
+/* HMAC values */
+#define NULL_HMAC_BLOCK_LEN 1
+#define MD5_HMAC_BLOCK_LEN 64
+#define SHA1_HMAC_BLOCK_LEN 64
+#define RIPEMD160_HMAC_BLOCK_LEN 64
+#define SHA2_256_HMAC_BLOCK_LEN 64
+#define SHA2_384_HMAC_BLOCK_LEN 128
+#define SHA2_512_HMAC_BLOCK_LEN 128
+/* Maximum HMAC block length */
+#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
+#define HMAC_IPAD_VAL 0x36
+#define HMAC_OPAD_VAL 0x5C
+
+/* Encryption algorithm block sizes */
+#define NULL_BLOCK_LEN 1
+#define DES_BLOCK_LEN 8
+#define DES3_BLOCK_LEN 8
+#define BLOWFISH_BLOCK_LEN 8
+#define SKIPJACK_BLOCK_LEN 8
+#define CAST128_BLOCK_LEN 8
+#define RIJNDAEL128_BLOCK_LEN 16
+#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN
+#define CAMELLIA_BLOCK_LEN 16
+#define ARC4_BLOCK_LEN 1
+#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */
+
+/* Encryption algorithm min and max key sizes */
+#define NULL_MIN_KEY_LEN 0
+#define NULL_MAX_KEY_LEN 0
+#define DES_MIN_KEY_LEN 8
+#define DES_MAX_KEY_LEN 8
+#define DES3_MIN_KEY_LEN 24
+#define DES3_MAX_KEY_LEN 24
+#define BLOWFISH_MIN_KEY_LEN 4
+#define BLOWFISH_MAX_KEY_LEN 56
+#define SKIPJACK_MIN_KEY_LEN 10
+#define SKIPJACK_MAX_KEY_LEN 10
+#define CAST128_MIN_KEY_LEN 5
+#define CAST128_MAX_KEY_LEN 16
+#define RIJNDAEL128_MIN_KEY_LEN 16
+#define RIJNDAEL128_MAX_KEY_LEN 32
+#define AES_MIN_KEY_LEN RIJNDAEL128_MIN_KEY_LEN
+#define AES_MAX_KEY_LEN RIJNDAEL128_MAX_KEY_LEN
+#define CAMELLIA_MIN_KEY_LEN 16
+#define CAMELLIA_MAX_KEY_LEN 32
+#define ARC4_MIN_KEY_LEN 1
+#define ARC4_MAX_KEY_LEN 256
+
+/* Max size of data that can be processed */
+#define CRYPTO_MAX_DATA_LEN 64*1024 - 1
+
+#define CRYPTO_ALGORITHM_MIN 1
+#define CRYPTO_DES_CBC 1
+#define CRYPTO_3DES_CBC 2
+#define CRYPTO_BLF_CBC 3
+#define CRYPTO_CAST_CBC 4
+#define CRYPTO_SKIPJACK_CBC 5
+#define CRYPTO_MD5_HMAC 6
+#define CRYPTO_SHA1_HMAC 7
+#define CRYPTO_RIPEMD160_HMAC 8
+#define CRYPTO_MD5_KPDK 9
+#define CRYPTO_SHA1_KPDK 10
+#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
+#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
+#define CRYPTO_ARC4 12
+#define CRYPTO_MD5 13
+#define CRYPTO_SHA1 14
+#define CRYPTO_NULL_HMAC 15
+#define CRYPTO_NULL_CBC 16
+#define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */
+#define CRYPTO_SHA2_256_HMAC 18
+#define CRYPTO_SHA2_384_HMAC 19
+#define CRYPTO_SHA2_512_HMAC 20
+#define CRYPTO_CAMELLIA_CBC 21
+#define CRYPTO_SHA2_256 22
+#define CRYPTO_SHA2_384 23
+#define CRYPTO_SHA2_512 24
+#define CRYPTO_RIPEMD160 25
+#define CRYPTO_LZS_COMP 26
+#define CRYPTO_ALGORITHM_MAX 26 /* Keep updated - see above */
+
+/* Algorithm flags */
+#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */
+#define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */
+#define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */
+
+/*
+ * Crypto driver/device flags. They can set in the crid
+ * parameter when creating a session or submitting a key
+ * op to affect the device/driver assigned. If neither
+ * of these are specified then the crid is assumed to hold
+ * the driver id of an existing (and suitable) device that
+ * must be used to satisfy the request.
+ */
+#define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */
+#define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */
+
+/* NB: deprecated */
+struct session_op {
+ u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
+ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
+
+ u_int32_t keylen; /* cipher key */
+ caddr_t key;
+ int mackeylen; /* mac key */
+ caddr_t mackey;
+
+ u_int32_t ses; /* returns: session # */
+};
+
+struct session2_op {
+ u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
+ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
+
+ u_int32_t keylen; /* cipher key */
+ caddr_t key;
+ int mackeylen; /* mac key */
+ caddr_t mackey;
+
+ u_int32_t ses; /* returns: session # */
+ int crid; /* driver id + flags (rw) */
+ int pad[4]; /* for future expansion */
+};
+
+struct crypt_op {
+ u_int32_t ses;
+ u_int16_t op; /* i.e. COP_ENCRYPT */
+#define COP_NONE 0
+#define COP_ENCRYPT 1
+#define COP_DECRYPT 2
+ u_int16_t flags;
+#define COP_F_BATCH 0x0008 /* Batch op if possible */
+ u_int len;
+ caddr_t src, dst; /* become iov[] inside kernel */
+ caddr_t mac; /* must be big enough for chosen MAC */
+ caddr_t iv;
+};
+
+/*
+ * Parameters for looking up a crypto driver/device by
+ * device name or by id. The latter are returned for
+ * created sessions (crid) and completed key operations.
+ */
+struct crypt_find_op {
+ int crid; /* driver id + flags */
+ char name[32]; /* device/driver name */
+};
+
+/* bignum parameter, in packed bytes, ... */
+struct crparam {
+ caddr_t crp_p;
+ u_int crp_nbits;
+};
+
+#define CRK_MAXPARAM 8
+
+struct crypt_kop {
+ u_int crk_op; /* ie. CRK_MOD_EXP or other */
+ u_int crk_status; /* return status */
+ u_short crk_iparams; /* # of input parameters */
+ u_short crk_oparams; /* # of output parameters */
+ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */
+ struct crparam crk_param[CRK_MAXPARAM];
+};
+#define CRK_ALGORITM_MIN 0
+#define CRK_MOD_EXP 0
+#define CRK_MOD_EXP_CRT 1
+#define CRK_DSA_SIGN 2
+#define CRK_DSA_VERIFY 3
+#define CRK_DH_COMPUTE_KEY 4
+#define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */
+
+#define CRF_MOD_EXP (1 << CRK_MOD_EXP)
+#define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT)
+#define CRF_DSA_SIGN (1 << CRK_DSA_SIGN)
+#define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY)
+#define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY)
+
+/*
+ * done against open of /dev/crypto, to get a cloned descriptor.
+ * Please use F_SETFD against the cloned descriptor.
+ */
+#define CRIOGET _IOWR('c', 100, u_int32_t)
+#define CRIOASYMFEAT CIOCASYMFEAT
+#define CRIOFINDDEV CIOCFINDDEV
+
+/* the following are done against the cloned descriptor */
+#define CIOCGSESSION _IOWR('c', 101, struct session_op)
+#define CIOCFSESSION _IOW('c', 102, u_int32_t)
+#define CIOCCRYPT _IOWR('c', 103, struct crypt_op)
+#define CIOCKEY _IOWR('c', 104, struct crypt_kop)
+#define CIOCASYMFEAT _IOR('c', 105, u_int32_t)
+#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op)
+#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop)
+#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op)
+
+struct cryptotstat {
+ struct timespec acc; /* total accumulated time */
+ struct timespec min; /* min time */
+ struct timespec max; /* max time */
+ u_int32_t count; /* number of observations */
+};
+
+struct cryptostats {
+ u_int32_t cs_ops; /* symmetric crypto ops submitted */
+ u_int32_t cs_errs; /* symmetric crypto ops that failed */
+ u_int32_t cs_kops; /* asymetric/key ops submitted */
+ u_int32_t cs_kerrs; /* asymetric/key ops that failed */
+ u_int32_t cs_intrs; /* crypto swi thread activations */
+ u_int32_t cs_rets; /* crypto return thread activations */
+ u_int32_t cs_blocks; /* symmetric op driver block */
+ u_int32_t cs_kblocks; /* symmetric op driver block */
+ /*
+ * When CRYPTO_TIMING is defined at compile time and the
+ * sysctl debug.crypto is set to 1, the crypto system will
+ * accumulate statistics about how long it takes to process
+ * crypto requests at various points during processing.
+ */
+ struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */
+ struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */
+ struct cryptotstat cs_cb; /* crypto_done -> callback */
+ struct cryptotstat cs_finis; /* callback -> callback return */
+
+ u_int32_t cs_drops; /* crypto ops dropped due to congestion */
+};
+
+#ifdef __KERNEL__
+
+/* Standard initialization structure beginning */
+struct cryptoini {
+ int cri_alg; /* Algorithm to use */
+ int cri_klen; /* Key length, in bits */
+ int cri_mlen; /* Number of bytes we want from the
+ entire hash. 0 means all. */
+ caddr_t cri_key; /* key to use */
+ u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
+ struct cryptoini *cri_next;
+};
+
+/* Describe boundaries of a single crypto operation */
+struct cryptodesc {
+ int crd_skip; /* How many bytes to ignore from start */
+ int crd_len; /* How many bytes to process */
+ int crd_inject; /* Where to inject results, if applicable */
+ int crd_flags;
+
+#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
+#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
+ place, so don't copy. */
+#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
+#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
+#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
+#define CRD_F_COMP 0x0f /* Set when doing compression */
+
+ struct cryptoini CRD_INI; /* Initialization/context data */
+#define crd_iv CRD_INI.cri_iv
+#define crd_key CRD_INI.cri_key
+#define crd_alg CRD_INI.cri_alg
+#define crd_klen CRD_INI.cri_klen
+#define crd_mlen CRD_INI.cri_mlen
+
+ struct cryptodesc *crd_next;
+};
+
+/* Structure describing complete operation */
+struct cryptop {
+ struct list_head crp_next;
+ wait_queue_head_t crp_waitq;
+
+ u_int64_t crp_sid; /* Session ID */
+ int crp_ilen; /* Input data total length */
+ int crp_olen; /* Result total length */
+
+ int crp_etype; /*
+ * Error type (zero means no error).
+ * All error codes except EAGAIN
+ * indicate possible data corruption (as in,
+ * the data have been touched). On all
+ * errors, the crp_sid may have changed
+ * (reset to a new one), so the caller
+ * should always check and use the new
+ * value on future requests.
+ */
+ int crp_flags;
+
+#define CRYPTO_F_SKBUF 0x0001 /* Input/output are skbuf chains */
+#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
+#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
+#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
+#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
+#define CRYPTO_F_DONE 0x0020 /* Operation completed */
+#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */
+
+ caddr_t crp_buf; /* Data to be processed */
+ caddr_t crp_opaque; /* Opaque pointer, passed along */
+ struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
+
+ int (*crp_callback)(struct cryptop *); /* Callback function */
+};
+
+#define CRYPTO_BUF_CONTIG 0x0
+#define CRYPTO_BUF_IOV 0x1
+#define CRYPTO_BUF_SKBUF 0x2
+
+#define CRYPTO_OP_DECRYPT 0x0
+#define CRYPTO_OP_ENCRYPT 0x1
+
+/*
+ * Hints passed to process methods.
+ */
+#define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */
+
+struct cryptkop {
+ struct list_head krp_next;
+ wait_queue_head_t krp_waitq;
+
+ int krp_flags;
+#define CRYPTO_KF_DONE 0x0001 /* Operation completed */
+#define CRYPTO_KF_CBIMM 0x0002 /* Do callback immediately */
+
+ u_int krp_op; /* ie. CRK_MOD_EXP or other */
+ u_int krp_status; /* return status */
+ u_short krp_iparams; /* # of input parameters */
+ u_short krp_oparams; /* # of output parameters */
+ u_int krp_crid; /* desired device, etc. */
+ u_int32_t krp_hid;
+ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
+ int (*krp_callback)(struct cryptkop *);
+};
+
+#include <ocf-compat.h>
+
+/*
+ * Session ids are 64 bits. The lower 32 bits contain a "local id" which
+ * is a driver-private session identifier. The upper 32 bits contain a
+ * "hardware id" used by the core crypto code to identify the driver and
+ * a copy of the driver's capabilities that can be used by client code to
+ * optimize operation.
+ */
+#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
+#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000)
+#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
+
+extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
+extern int crypto_freesession(u_int64_t sid);
+#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
+#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
+#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
+extern int32_t crypto_get_driverid(device_t dev, int flags);
+extern int crypto_find_driver(const char *);
+extern device_t crypto_find_device_byhid(int hid);
+extern int crypto_getcaps(int hid);
+extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+ u_int32_t flags);
+extern int crypto_kregister(u_int32_t, int, u_int32_t);
+extern int crypto_unregister(u_int32_t driverid, int alg);
+extern int crypto_unregister_all(u_int32_t driverid);
+extern int crypto_dispatch(struct cryptop *crp);
+extern int crypto_kdispatch(struct cryptkop *);
+#define CRYPTO_SYMQ 0x1
+#define CRYPTO_ASYMQ 0x2
+extern int crypto_unblock(u_int32_t, int);
+extern void crypto_done(struct cryptop *crp);
+extern void crypto_kdone(struct cryptkop *);
+extern int crypto_getfeat(int *);
+
+extern void crypto_freereq(struct cryptop *crp);
+extern struct cryptop *crypto_getreq(int num);
+
+extern int crypto_usercrypto; /* userland may do crypto requests */
+extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
+extern int crypto_devallowsoft; /* only use hardware crypto */
+
+/*
+ * random number support, crypto_unregister_all will unregister
+ */
+extern int crypto_rregister(u_int32_t driverid,
+ int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
+extern int crypto_runregister_all(u_int32_t driverid);
+
+/*
+ * Crypto-related utility routines used mainly by drivers.
+ *
+ * XXX these don't really belong here; but for now they're
+ * kept apart from the rest of the system.
+ */
+struct uio;
+extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
+extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
+extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
+
+extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
+ caddr_t in);
+extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
+ caddr_t out);
+extern int crypto_apply(int flags, caddr_t buf, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg);
+
+#endif /* __KERNEL__ */
+#endif /* _CRYPTO_CRYPTO_H_ */
diff --git a/target/linux/generic/files/crypto/ocf/cryptosoft.c b/target/linux/generic/files/crypto/ocf/cryptosoft.c
new file mode 100644
index 000000000..aa2383d1f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/cryptosoft.c
@@ -0,0 +1,1322 @@
+/*
+ * An OCF module that uses the linux kernel cryptoapi, based on the
+ * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * but is mostly unrecognisable,
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2004-2011 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ * 1. distributions of this source code include the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ *
+ * 2. distributions in binary form include the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other associated materials;
+ *
+ * 3. the copyright holder's name is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
+#include <linux/scatterlist.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
+#include <crypto/hash.h>
+#endif
+
+#include <cryptodev.h>
+#include <uio.h>
+
+struct {
+ softc_device_decl sc_dev;
+} swcr_softc;
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+#define SW_TYPE_CIPHER 0x01
+#define SW_TYPE_HMAC 0x02
+#define SW_TYPE_HASH 0x04
+#define SW_TYPE_COMP 0x08
+#define SW_TYPE_BLKCIPHER 0x10
+#define SW_TYPE_ALG_MASK 0x1f
+
+#define SW_TYPE_ASYNC 0x8000
+
+#define SW_TYPE_INUSE 0x10000000
+
+/* We change some of the above if we have an async interface */
+
+#define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
+
+#define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
+#define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
+#define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
+
+#define SCATTERLIST_MAX 16
+
+struct swcr_data {
+ struct work_struct workq;
+ int sw_type;
+ int sw_alg;
+ struct crypto_tfm *sw_tfm;
+ spinlock_t sw_tfm_lock;
+ union {
+ struct {
+ char *sw_key;
+ int sw_klen;
+ int sw_mlen;
+ } hmac;
+ void *sw_comp_buf;
+ } u;
+ struct swcr_data *sw_next;
+};
+
+struct swcr_req {
+ struct swcr_data *sw_head;
+ struct swcr_data *sw;
+ struct cryptop *crp;
+ struct cryptodesc *crd;
+ struct scatterlist sg[SCATTERLIST_MAX];
+ unsigned char iv[EALG_MAX_BLOCK_LEN];
+ char result[HASH_MAX_LEN];
+ void *crypto_req;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static kmem_cache_t *swcr_req_cache;
+#else
+static struct kmem_cache *swcr_req_cache;
+#endif
+
+#ifndef CRYPTO_TFM_MODE_CBC
+/*
+ * As of linux-2.6.21 this is no longer defined, and presumably no longer
+ * needed to be passed into the crypto core code.
+ */
+#define CRYPTO_TFM_MODE_CBC 0
+#define CRYPTO_TFM_MODE_ECB 0
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ /*
+ * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
+ * API into old API.
+ */
+
+ /* Symmetric/Block Cipher */
+ struct blkcipher_desc
+ {
+ struct crypto_tfm *tfm;
+ void *info;
+ };
+ #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
+ #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
+ #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
+ #define crypto_blkcipher_cast(X) X
+ #define crypto_blkcipher_tfm(X) X
+ #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
+ #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
+ #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
+ #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
+ #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
+ crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
+ #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
+ crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
+ #define crypto_blkcipher_set_flags(x, y) /* nop */
+ #define crypto_free_blkcipher(x) crypto_free_tfm(x)
+ #define crypto_free_comp crypto_free_tfm
+ #define crypto_free_hash crypto_free_tfm
+
+ /* Hash/HMAC/Digest */
+ struct hash_desc
+ {
+ struct crypto_tfm *tfm;
+ };
+ #define hmac(X) #X , 0
+ #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
+ #define crypto_hash_cast(X) X
+ #define crypto_hash_tfm(X) X
+ #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
+ #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
+ #define crypto_hash_digest(W, X, Y, Z) \
+ crypto_digest_digest((W)->tfm, X, sg_num, Z)
+
+ /* Asymmetric Cipher */
+ #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
+
+ /* Compression */
+ #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
+ #define crypto_comp_tfm(X) X
+ #define crypto_comp_cast(X) X
+ #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
+ #define plain(X) #X , 0
+#else
+ #define ecb(X) "ecb(" #X ")" , 0
+ #define cbc(X) "cbc(" #X ")" , 0
+ #define hmac(X) "hmac(" #X ")" , 0
+ #define plain(X) #X , 0
+#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+/* no ablkcipher in older kernels */
+#define crypto_alloc_ablkcipher(a,b,c) (NULL)
+#define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
+#define crypto_ablkcipher_set_flags(a, b) /* nop */
+#define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
+#define crypto_has_ablkcipher(a,b,c) (0)
+#else
+#define HAVE_ABLKCIPHER
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
+/* no ahash in older kernels */
+#define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
+#define crypto_alloc_ahash(a,b,c) (NULL)
+#define crypto_ahash_digestsize(x) 0
+#else
+#define HAVE_AHASH
+#endif
+
+struct crypto_details {
+ char *alg_name;
+ int mode;
+ int sw_type;
+};
+
+static struct crypto_details crypto_details[] = {
+ [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
+ [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
+ [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
+ [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
+ [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
+ [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
+ [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
+ [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
+ [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
+ [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
+ [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
+ [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
+ [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
+ [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
+ [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
+ [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
+};
+
+int32_t swcr_id = -1;
+module_param(swcr_id, int, 0444);
+MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
+
+int swcr_fail_if_compression_grows = 1;
+module_param(swcr_fail_if_compression_grows, int, 0644);
+MODULE_PARM_DESC(swcr_fail_if_compression_grows,
+ "Treat compression that results in more data as a failure");
+
+int swcr_no_ahash = 0;
+module_param(swcr_no_ahash, int, 0644);
+MODULE_PARM_DESC(swcr_no_ahash,
+ "Do not use async hash/hmac even if available");
+
+int swcr_no_ablk = 0;
+module_param(swcr_no_ablk, int, 0644);
+MODULE_PARM_DESC(swcr_no_ablk,
+ "Do not use async blk ciphers even if available");
+
+static struct swcr_data **swcr_sessions = NULL;
+static u_int32_t swcr_sesnum = 0;
+
+static int swcr_process(device_t, struct cryptop *, int);
+static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int swcr_freesession(device_t, u_int64_t);
+
+static device_method_t swcr_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, swcr_newsession),
+ DEVMETHOD(cryptodev_freesession,swcr_freesession),
+ DEVMETHOD(cryptodev_process, swcr_process),
+};
+
+#define debug swcr_debug
+int swcr_debug = 0;
+module_param(swcr_debug, int, 0644);
+MODULE_PARM_DESC(swcr_debug, "Enable debug");
+
+static void swcr_process_req(struct swcr_req *req);
+
+/*
+ * somethings just need to be run with user context no matter whether
+ * the kernel compression libs use vmalloc/vfree for example.
+ */
+
+typedef struct {
+ struct work_struct wq;
+ void (*func)(void *arg);
+ void *arg;
+} execute_later_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+doing_it_now(struct work_struct *wq)
+{
+ execute_later_t *w = container_of(wq, execute_later_t, wq);
+ (w->func)(w->arg);
+ kfree(w);
+}
+#else
+static void
+doing_it_now(void *arg)
+{
+ execute_later_t *w = (execute_later_t *) arg;
+ (w->func)(w->arg);
+ kfree(w);
+}
+#endif
+
+static void
+execute_later(void (fn)(void *), void *arg)
+{
+ execute_later_t *w;
+
+ w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC);
+ if (w) {
+ memset(w, '\0', sizeof(w));
+ w->func = fn;
+ w->arg = arg;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ INIT_WORK(&w->wq, doing_it_now);
+#else
+ INIT_WORK(&w->wq, doing_it_now, w);
+#endif
+ schedule_work(&w->wq);
+ }
+}
+
+/*
+ * Generate a new software session.
+ */
+static int
+swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+ struct swcr_data **swd;
+ u_int32_t i;
+ int error;
+ char *algo;
+ int mode;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid == NULL || cri == NULL) {
+ dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ if (swcr_sessions) {
+ for (i = 1; i < swcr_sesnum; i++)
+ if (swcr_sessions[i] == NULL)
+ break;
+ } else
+ i = 1; /* NB: to silence compiler warning */
+
+ if (swcr_sessions == NULL || i == swcr_sesnum) {
+ if (swcr_sessions == NULL) {
+ i = 1; /* We leave swcr_sessions[0] empty */
+ swcr_sesnum = CRYPTO_SW_SESSIONS;
+ } else
+ swcr_sesnum *= 2;
+
+ swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
+ if (swd == NULL) {
+ /* Reset session number */
+ if (swcr_sesnum == CRYPTO_SW_SESSIONS)
+ swcr_sesnum = 0;
+ else
+ swcr_sesnum /= 2;
+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
+
+ /* Copy existing sessions */
+ if (swcr_sessions) {
+ memcpy(swd, swcr_sessions,
+ (swcr_sesnum / 2) * sizeof(struct swcr_data *));
+ kfree(swcr_sessions);
+ }
+
+ swcr_sessions = swd;
+ }
+
+ swd = &swcr_sessions[i];
+ *sid = i;
+
+ while (cri) {
+ *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
+ SLAB_ATOMIC);
+ if (*swd == NULL) {
+ swcr_freesession(NULL, i);
+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ memset(*swd, 0, sizeof(struct swcr_data));
+
+ if (cri->cri_alg < 0 ||
+ cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
+ printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
+ swcr_freesession(NULL, i);
+ return EINVAL;
+ }
+
+ algo = crypto_details[cri->cri_alg].alg_name;
+ if (!algo || !*algo) {
+ printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
+ swcr_freesession(NULL, i);
+ return EINVAL;
+ }
+
+ mode = crypto_details[cri->cri_alg].mode;
+ (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
+ (*swd)->sw_alg = cri->cri_alg;
+
+ spin_lock_init(&(*swd)->sw_tfm_lock);
+
+ /* Algorithm specific configuration */
+ switch (cri->cri_alg) {
+ case CRYPTO_NULL_CBC:
+ cri->cri_klen = 0; /* make it work with crypto API */
+ break;
+ default:
+ break;
+ }
+
+ if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
+ dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
+ algo, mode);
+
+ /* try async first */
+ (*swd)->sw_tfm = swcr_no_ablk ? NULL :
+ crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
+ if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) {
+ dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
+ (*swd)->sw_type |= SW_TYPE_ASYNC;
+ } else {
+ (*swd)->sw_tfm = crypto_blkcipher_tfm(
+ crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
+ if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm))
+ dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
+ }
+ if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) {
+ int err;
+ dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
+ algo,mode);
+ err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL;
+ (*swd)->sw_tfm = NULL; /* ensure NULL */
+ swcr_freesession(NULL, i);
+ return err;
+ }
+
+ if (debug) {
+ dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
+ __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
+ for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
+ cri->cri_key[i] & 0xff);
+ dprintk("\n");
+ }
+ if ((*swd)->sw_type & SW_TYPE_ASYNC) {
+ /* OCF doesn't enforce keys */
+ crypto_ablkcipher_set_flags(
+ __crypto_ablkcipher_cast((*swd)->sw_tfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+ error = crypto_ablkcipher_setkey(
+ __crypto_ablkcipher_cast((*swd)->sw_tfm),
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ } else {
+ /* OCF doesn't enforce keys */
+ crypto_blkcipher_set_flags(
+ crypto_blkcipher_cast((*swd)->sw_tfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+ error = crypto_blkcipher_setkey(
+ crypto_blkcipher_cast((*swd)->sw_tfm),
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ }
+ if (error) {
+ printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
+ (*swd)->sw_tfm->crt_flags);
+ swcr_freesession(NULL, i);
+ return error;
+ }
+ } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
+ dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
+ algo, mode);
+
+ /* try async first */
+ (*swd)->sw_tfm = swcr_no_ahash ? NULL :
+ crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
+ if ((*swd)->sw_tfm) {
+ dprintk("%s %s hash is async\n", __FUNCTION__, algo);
+ (*swd)->sw_type |= SW_TYPE_ASYNC;
+ } else {
+ dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
+ (*swd)->sw_tfm = crypto_hash_tfm(
+ crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
+ }
+
+ if (!(*swd)->sw_tfm) {
+ dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
+ algo, mode);
+ swcr_freesession(NULL, i);
+ return EINVAL;
+ }
+
+ (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
+ (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
+ SLAB_ATOMIC);
+ if ((*swd)->u.hmac.sw_key == NULL) {
+ swcr_freesession(NULL, i);
+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
+ if (cri->cri_mlen) {
+ (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
+ } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
+ (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
+ __crypto_ahash_cast((*swd)->sw_tfm));
+ } else {
+ (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
+ crypto_hash_cast((*swd)->sw_tfm));
+ }
+ } else if ((*swd)->sw_type & SW_TYPE_COMP) {
+ (*swd)->sw_tfm = crypto_comp_tfm(
+ crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
+ if (!(*swd)->sw_tfm) {
+ dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
+ algo, mode);
+ swcr_freesession(NULL, i);
+ return EINVAL;
+ }
+ (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
+ if ((*swd)->u.sw_comp_buf == NULL) {
+ swcr_freesession(NULL, i);
+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ } else {
+ printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
+ swcr_freesession(NULL, i);
+ return EINVAL;
+ }
+
+ cri = cri->cri_next;
+ swd = &((*swd)->sw_next);
+ }
+ return 0;
+}
+
+/*
+ * Free a session.
+ */
+static int
+swcr_freesession(device_t dev, u_int64_t tid)
+{
+ struct swcr_data *swd;
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid > swcr_sesnum || swcr_sessions == NULL ||
+ swcr_sessions[sid] == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return(EINVAL);
+ }
+
+ /* Silently accept and return */
+ if (sid == 0)
+ return(0);
+
+ while ((swd = swcr_sessions[sid]) != NULL) {
+ swcr_sessions[sid] = swd->sw_next;
+ if (swd->sw_tfm) {
+ switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
+#ifdef HAVE_AHASH
+ case SW_TYPE_AHMAC:
+ case SW_TYPE_AHASH:
+ crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
+ break;
+#endif
+#ifdef HAVE_ABLKCIPHER
+ case SW_TYPE_ABLKCIPHER:
+ crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
+ break;
+#endif
+ case SW_TYPE_BLKCIPHER:
+ crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
+ break;
+ case SW_TYPE_HMAC:
+ case SW_TYPE_HASH:
+ crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
+ break;
+ case SW_TYPE_COMP:
+ if (in_interrupt())
+ execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm));
+ else
+ crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
+ break;
+ default:
+ crypto_free_tfm(swd->sw_tfm);
+ break;
+ }
+ swd->sw_tfm = NULL;
+ }
+ if (swd->sw_type & SW_TYPE_COMP) {
+ if (swd->u.sw_comp_buf)
+ kfree(swd->u.sw_comp_buf);
+ } else {
+ if (swd->u.hmac.sw_key)
+ kfree(swd->u.hmac.sw_key);
+ }
+ kfree(swd);
+ }
+ return 0;
+}
+
+static void swcr_process_req_complete(struct swcr_req *req)
+{
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (req->sw->sw_type & SW_TYPE_INUSE) {
+ unsigned long flags;
+ spin_lock_irqsave(&req->sw->sw_tfm_lock, flags);
+ req->sw->sw_type &= ~SW_TYPE_INUSE;
+ spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags);
+ }
+
+ if (req->crp->crp_etype)
+ goto done;
+
+ switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
+#if defined(HAVE_AHASH)
+ case SW_TYPE_AHMAC:
+ case SW_TYPE_AHASH:
+ crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
+ req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
+ ahash_request_free(req->crypto_req);
+ break;
+#endif
+#if defined(HAVE_ABLKCIPHER)
+ case SW_TYPE_ABLKCIPHER:
+ ablkcipher_request_free(req->crypto_req);
+ break;
+#endif
+ case SW_TYPE_CIPHER:
+ case SW_TYPE_HMAC:
+ case SW_TYPE_HASH:
+ case SW_TYPE_COMP:
+ case SW_TYPE_BLKCIPHER:
+ break;
+ default:
+ req->crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ req->crd = req->crd->crd_next;
+ if (req->crd) {
+ swcr_process_req(req);
+ return;
+ }
+
+done:
+ dprintk("%s crypto_done %p\n", __FUNCTION__, req);
+ crypto_done(req->crp);
+ kmem_cache_free(swcr_req_cache, req);
+}
+
+#if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
+static void swcr_process_callback(struct crypto_async_request *creq, int err)
+{
+ struct swcr_req *req = creq->data;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (err) {
+ if (err == -EINPROGRESS)
+ return;
+ dprintk("%s() fail %d\n", __FUNCTION__, -err);
+ req->crp->crp_etype = -err;
+ }
+
+ swcr_process_req_complete(req);
+}
+#endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
+
+
+static void swcr_process_req(struct swcr_req *req)
+{
+ struct swcr_data *sw;
+ struct cryptop *crp = req->crp;
+ struct cryptodesc *crd = req->crd;
+ struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
+ struct uio *uiop = (struct uio *) crp->crp_buf;
+ int sg_num, sg_len, skip;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ /*
+ * Find the crypto context.
+ *
+ * XXX Note that the logic here prevents us from having
+ * XXX the same algorithm multiple times in a session
+ * XXX (or rather, we can but it won't give us the right
+ * XXX results). To do that, we'd need some way of differentiating
+ * XXX between the various instances of an algorithm (so we can
+ * XXX locate the correct crypto context).
+ */
+ for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
+ ;
+
+ /* No such context ? */
+ if (sw == NULL) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ /*
+ * for some types we need to ensure only one user as info is stored in
+ * the tfm during an operation that can get corrupted
+ */
+ switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
+#ifdef HAVE_AHASH
+ case SW_TYPE_AHMAC:
+ case SW_TYPE_AHASH:
+#endif
+ case SW_TYPE_HMAC:
+ case SW_TYPE_HASH: {
+ unsigned long flags;
+ spin_lock_irqsave(&sw->sw_tfm_lock, flags);
+ if (sw->sw_type & SW_TYPE_INUSE) {
+ spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
+ execute_later((void (*)(void *))swcr_process_req, (void *)req);
+ return;
+ }
+ sw->sw_type |= SW_TYPE_INUSE;
+ spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
+ } break;
+ }
+
+ req->sw = sw;
+ skip = crd->crd_skip;
+
+ /*
+ * setup the SG list skip from the start of the buffer
+ */
+ memset(req->sg, 0, sizeof(req->sg));
+ sg_init_table(req->sg, SCATTERLIST_MAX);
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ int i, len;
+
+ sg_num = 0;
+ sg_len = 0;
+
+ if (skip < skb_headlen(skb)) {
+ len = skb_headlen(skb) - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ sg_set_page(&req->sg[sg_num],
+ virt_to_page(skb->data + skip), len,
+ offset_in_page(skb->data + skip));
+ sg_len += len;
+ sg_num++;
+ skip = 0;
+ } else
+ skip -= skb_headlen(skb);
+
+ for (i = 0; sg_len < crd->crd_len &&
+ i < skb_shinfo(skb)->nr_frags &&
+ sg_num < SCATTERLIST_MAX; i++) {
+ if (skip < skb_shinfo(skb)->frags[i].size) {
+ len = skb_shinfo(skb)->frags[i].size - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ sg_set_page(&req->sg[sg_num],
+ skb_frag_page(&skb_shinfo(skb)->frags[i]),
+ len,
+ skb_shinfo(skb)->frags[i].page_offset + skip);
+ sg_len += len;
+ sg_num++;
+ skip = 0;
+ } else
+ skip -= skb_shinfo(skb)->frags[i].size;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ int len;
+
+ sg_len = 0;
+ for (sg_num = 0; sg_len < crd->crd_len &&
+ sg_num < uiop->uio_iovcnt &&
+ sg_num < SCATTERLIST_MAX; sg_num++) {
+ if (skip <= uiop->uio_iov[sg_num].iov_len) {
+ len = uiop->uio_iov[sg_num].iov_len - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ sg_set_page(&req->sg[sg_num],
+ virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
+ len,
+ offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
+ sg_len += len;
+ skip = 0;
+ } else
+ skip -= uiop->uio_iov[sg_num].iov_len;
+ }
+ } else {
+ sg_len = (crp->crp_ilen - skip);
+ if (sg_len > crd->crd_len)
+ sg_len = crd->crd_len;
+ sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
+ sg_len, offset_in_page(crp->crp_buf + skip));
+ sg_num = 1;
+ }
+ if (sg_num > 0)
+ sg_mark_end(&req->sg[sg_num-1]);
+
+ switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
+
+#ifdef HAVE_AHASH
+ case SW_TYPE_AHMAC:
+ case SW_TYPE_AHASH:
+ {
+ int ret;
+
+ /* check we have room for the result */
+ if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
+ dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
+ "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
+ crd->crd_inject, sw->u.hmac.sw_mlen);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ req->crypto_req =
+ ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC);
+ if (!req->crypto_req) {
+ crp->crp_etype = ENOMEM;
+ dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
+ goto done;
+ }
+
+ ahash_request_set_callback(req->crypto_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
+
+ memset(req->result, 0, sizeof(req->result));
+
+ if (sw->sw_type & SW_TYPE_AHMAC)
+ crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
+ sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
+ ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
+ ret = crypto_ahash_digest(req->crypto_req);
+ switch (ret) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ return;
+ default:
+ case 0:
+ dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
+ crp->crp_etype = ret;
+ goto done;
+ }
+ } break;
+#endif /* HAVE_AHASH */
+
+#ifdef HAVE_ABLKCIPHER
+ case SW_TYPE_ABLKCIPHER: {
+ int ret;
+ unsigned char *ivp = req->iv;
+ int ivsize =
+ crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
+
+ if (sg_len < crypto_ablkcipher_blocksize(
+ __crypto_ablkcipher_cast(sw->sw_tfm))) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
+ sg_len, crypto_ablkcipher_blocksize(
+ __crypto_ablkcipher_cast(sw->sw_tfm)));
+ goto done;
+ }
+
+ if (ivsize > sizeof(req->iv)) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ req->crypto_req = ablkcipher_request_alloc(
+ __crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC);
+ if (!req->crypto_req) {
+ crp->crp_etype = ENOMEM;
+ dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
+ __FILE__, __LINE__);
+ goto done;
+ }
+
+ ablkcipher_request_set_callback(req->crypto_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
+
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ int i, error;
+
+ if (debug) {
+ dprintk("%s key:", __FUNCTION__);
+ for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
+ crd->crd_key[i] & 0xff);
+ dprintk("\n");
+ }
+ /* OCF doesn't enforce keys */
+ crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+ error = crypto_ablkcipher_setkey(
+ __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
+ (crd->crd_klen + 7) / 8);
+ if (error) {
+ dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
+ error, sw->sw_tfm->crt_flags);
+ crp->crp_etype = -error;
+ }
+ }
+
+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ ivp = crd->crd_iv;
+ else
+ get_random_bytes(ivp, ivsize);
+ /*
+ * do we have to copy the IV back to the buffer ?
+ */
+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, ivsize, (caddr_t)ivp);
+ }
+ ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
+ sg_len, ivp);
+ ret = crypto_ablkcipher_encrypt(req->crypto_req);
+
+ } else { /*decrypt */
+
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ ivp = crd->crd_iv;
+ else
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, ivsize, (caddr_t)ivp);
+ ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
+ sg_len, ivp);
+ ret = crypto_ablkcipher_decrypt(req->crypto_req);
+ }
+
+ switch (ret) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ return;
+ default:
+ case 0:
+ dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
+ crp->crp_etype = ret;
+ goto done;
+ }
+ } break;
+#endif /* HAVE_ABLKCIPHER */
+
+ case SW_TYPE_BLKCIPHER: {
+ unsigned char iv[EALG_MAX_BLOCK_LEN];
+ unsigned char *ivp = iv;
+ struct blkcipher_desc desc;
+ int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
+
+ if (sg_len < crypto_blkcipher_blocksize(
+ crypto_blkcipher_cast(sw->sw_tfm))) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
+ sg_len, crypto_blkcipher_blocksize(
+ crypto_blkcipher_cast(sw->sw_tfm)));
+ goto done;
+ }
+
+ if (ivsize > sizeof(iv)) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ int i, error;
+
+ if (debug) {
+ dprintk("%s key:", __FUNCTION__);
+ for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
+ crd->crd_key[i] & 0xff);
+ dprintk("\n");
+ }
+ /* OCF doesn't enforce keys */
+ crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+ error = crypto_blkcipher_setkey(
+ crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
+ (crd->crd_klen + 7) / 8);
+ if (error) {
+ dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
+ error, sw->sw_tfm->crt_flags);
+ crp->crp_etype = -error;
+ }
+ }
+
+ memset(&desc, 0, sizeof(desc));
+ desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
+
+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+ ivp = crd->crd_iv;
+ } else {
+ get_random_bytes(ivp, ivsize);
+ }
+ /*
+ * do we have to copy the IV back to the buffer ?
+ */
+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, ivsize, (caddr_t)ivp);
+ }
+ desc.info = ivp;
+ crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
+
+ } else { /*decrypt */
+
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+ ivp = crd->crd_iv;
+ } else {
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, ivsize, (caddr_t)ivp);
+ }
+ desc.info = ivp;
+ crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
+ }
+ } break;
+
+ case SW_TYPE_HMAC:
+ case SW_TYPE_HASH:
+ {
+ char result[HASH_MAX_LEN];
+ struct hash_desc desc;
+
+ /* check we have room for the result */
+ if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
+ dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
+ "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
+ crd->crd_inject, sw->u.hmac.sw_mlen);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ memset(&desc, 0, sizeof(desc));
+ desc.tfm = crypto_hash_cast(sw->sw_tfm);
+
+ memset(result, 0, sizeof(result));
+
+ if (sw->sw_type & SW_TYPE_HMAC) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
+ req->sg, sg_num, result);
+#else
+ crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
+ sw->u.hmac.sw_klen);
+ crypto_hash_digest(&desc, req->sg, sg_len, result);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
+
+ } else { /* SW_TYPE_HASH */
+ crypto_hash_digest(&desc, req->sg, sg_len, result);
+ }
+
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, sw->u.hmac.sw_mlen, result);
+ }
+ break;
+
+ case SW_TYPE_COMP: {
+ void *ibuf = NULL;
+ void *obuf = sw->u.sw_comp_buf;
+ int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
+ int ret = 0;
+
+ /*
+ * we need to use an additional copy if there is more than one
+ * input chunk since the kernel comp routines do not handle
+ * SG yet. Otherwise we just use the input buffer as is.
+ * Rather than allocate another buffer we just split the tmp
+ * buffer we already have.
+ * Perhaps we should just use zlib directly ?
+ */
+ if (sg_num > 1) {
+ int blk;
+
+ ibuf = obuf;
+ for (blk = 0; blk < sg_num; blk++) {
+ memcpy(obuf, sg_virt(&req->sg[blk]),
+ req->sg[blk].length);
+ obuf += req->sg[blk].length;
+ }
+ olen -= sg_len;
+ } else
+ ibuf = sg_virt(&req->sg[0]);
+
+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
+ ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
+ ibuf, ilen, obuf, &olen);
+ if (!ret && olen > crd->crd_len) {
+ dprintk("cryptosoft: ERANGE compress %d into %d\n",
+ crd->crd_len, olen);
+ if (swcr_fail_if_compression_grows)
+ ret = ERANGE;
+ }
+ } else { /* decompress */
+ ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
+ ibuf, ilen, obuf, &olen);
+ if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
+ dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
+ "space for %d,at offset %d\n",
+ crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
+ ret = ETOOSMALL;
+ }
+ }
+ if (ret)
+ dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
+
+ /*
+ * on success copy result back,
+ * linux crpyto API returns -errno, we need to fix that
+ */
+ crp->crp_etype = ret < 0 ? -ret : ret;
+ if (ret == 0) {
+ /* copy back the result and return it's size */
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, olen, obuf);
+ crp->crp_olen = olen;
+ }
+ } break;
+
+ default:
+ /* Unknown/unsupported algorithm */
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+done:
+ swcr_process_req_complete(req);
+}
+
+
+/*
+ * Process a crypto request.
+ */
+static int
+swcr_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct swcr_req *req = NULL;
+ u_int32_t lid;
+
+ dprintk("%s()\n", __FUNCTION__);
+ /* Sanity check */
+ if (crp == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ crp->crp_etype = 0;
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ lid = crp->crp_sid & 0xffffffff;
+ if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
+ swcr_sessions[lid] == NULL) {
+ crp->crp_etype = ENOENT;
+ dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ /*
+ * do some error checking outside of the loop for SKB and IOV processing
+ * this leaves us with valid skb or uiop pointers for later
+ */
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
+ if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
+ printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
+ skb_shinfo(skb)->nr_frags);
+ goto done;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ struct uio *uiop = (struct uio *) crp->crp_buf;
+ if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
+ printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
+ uiop->uio_iovcnt);
+ goto done;
+ }
+ }
+
+ /*
+ * setup a new request ready for queuing
+ */
+ req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
+ if (req == NULL) {
+ dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
+ crp->crp_etype = ENOMEM;
+ goto done;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->sw_head = swcr_sessions[lid];
+ req->crp = crp;
+ req->crd = crp->crp_desc;
+
+ swcr_process_req(req);
+ return 0;
+
+done:
+ crypto_done(crp);
+ if (req)
+ kmem_cache_free(swcr_req_cache, req);
+ return 0;
+}
+
+
+static int
+cryptosoft_init(void)
+{
+ int i, sw_type, mode;
+ char *algo;
+
+ dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
+
+ swcr_req_cache = kmem_cache_create("cryptosoft_req",
+ sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+ , NULL
+#endif
+ );
+ if (!swcr_req_cache) {
+ printk("cryptosoft: failed to create request cache\n");
+ return -ENOENT;
+ }
+
+ softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
+
+ swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
+ CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
+ if (swcr_id < 0) {
+ printk("cryptosoft: Software crypto device cannot initialize!");
+ return -ENODEV;
+ }
+
+#define REGISTER(alg) \
+ crypto_register(swcr_id, alg, 0,0)
+
+ for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
+ int found;
+
+ algo = crypto_details[i].alg_name;
+ if (!algo || !*algo) {
+ dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
+ continue;
+ }
+
+ mode = crypto_details[i].mode;
+ sw_type = crypto_details[i].sw_type;
+
+ found = 0;
+ switch (sw_type & SW_TYPE_ALG_MASK) {
+ case SW_TYPE_CIPHER:
+ found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
+ break;
+ case SW_TYPE_HMAC:
+ found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
+ break;
+ case SW_TYPE_HASH:
+ found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
+ break;
+ case SW_TYPE_COMP:
+ found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
+ break;
+ case SW_TYPE_BLKCIPHER:
+ found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
+ if (!found && !swcr_no_ablk)
+ found = crypto_has_ablkcipher(algo, 0, 0);
+ break;
+ }
+ if (found) {
+ REGISTER(i);
+ } else {
+ dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
+ __FUNCTION__, sw_type, i, algo);
+ }
+ }
+ return 0;
+}
+
+static void
+cryptosoft_exit(void)
+{
+ dprintk("%s()\n", __FUNCTION__);
+ crypto_unregister_all(swcr_id);
+ swcr_id = -1;
+ kmem_cache_destroy(swcr_req_cache);
+}
+
+late_initcall(cryptosoft_init);
+module_exit(cryptosoft_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
diff --git a/target/linux/generic/files/crypto/ocf/ep80579/Makefile b/target/linux/generic/files/crypto/ocf/ep80579/Makefile
new file mode 100644
index 000000000..9aab29573
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ep80579/Makefile
@@ -0,0 +1,119 @@
+#########################################################################
+#
+# Targets supported
+# all - builds everything and installs
+# install - identical to all
+# depend - build dependencies
+# clean - clears derived objects except the .depend files
+# distclean- clears all derived objects and the .depend file
+#
+# @par
+# This file is provided under a dual BSD/GPLv2 license. When using or
+# redistributing this file, you may do so under either license.
+#
+# GPL LICENSE SUMMARY
+#
+# Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# The full GNU General Public License is included in this distribution
+# in the file called LICENSE.GPL.
+#
+# Contact Information:
+# Intel Corporation
+#
+# BSD LICENSE
+#
+# Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#
+# version: Security.L.1.0.2-229
+############################################################################
+
+
+####################Common variables and definitions########################
+
+ifndef ICP_ROOT
+$(warning ICP_ROOT is undefined. Please set the path to EP80579 release package directory \
+ "-> setenv ICP_ROOT <path>")
+all fastdep:
+ :
+else
+
+ifndef KERNEL_SOURCE_ROOT
+$(error KERNEL_SOURCE_ROOT is undefined. Please set the path to the kernel source directory \
+ "-> setenv KERNEL_SOURCE_ROOT <path>")
+endif
+
+# Ensure The ENV_DIR environmental var is defined.
+ifndef ICP_ENV_DIR
+$(error ICP_ENV_DIR is undefined. Please set the path to EP80579 driver environment.mk file \
+ "-> setenv ICP_ENV_DIR <path>")
+endif
+
+#Add your project environment Makefile
+include ${ICP_ENV_DIR}/environment.mk
+
+#include the makefile with all the default and common Make variable definitions
+include ${ICP_BUILDSYSTEM_PATH}/build_files/common.mk
+
+#Add the name for the executable, Library or Module output definitions
+OUTPUT_NAME= icp_ocf
+
+# List of Source Files to be compiled
+SOURCES= icp_common.c icp_sym.c icp_asym.c icp_ocf_linux.c
+
+#common includes between all supported OSes
+INCLUDES= -I ${ICP_API_DIR} -I${ICP_LAC_API} \
+-I${ICP_OCF_SRC_DIR}
+
+# The location of the os level makefile needs to be changed.
+include ${ICP_ENV_DIR}/${ICP_OS}_${ICP_OS_LEVEL}.mk
+
+# On the line directly below list the outputs you wish to build for,
+# e.g "lib_static lib_shared exe module" as shown below
+install: module
+
+###################Include rules makefiles########################
+include ${ICP_BUILDSYSTEM_PATH}/build_files/rules.mk
+###################End of Rules inclusion#########################
+
+endif
diff --git a/target/linux/generic/files/crypto/ocf/ep80579/environment.mk b/target/linux/generic/files/crypto/ocf/ep80579/environment.mk
new file mode 100644
index 000000000..1a663e587
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ep80579/environment.mk
@@ -0,0 +1,78 @@
+ ###########################################################################
+ #
+# This file is provided under a dual BSD/GPLv2 license. When using or
+# redistributing this file, you may do so under either license.
+#
+# GPL LICENSE SUMMARY
+#
+# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# The full GNU General Public License is included in this distribution
+# in the file called LICENSE.GPL.
+#
+# Contact Information:
+# Intel Corporation
+#
+# BSD LICENSE
+#
+# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#
+# version: Security.L.1.0.130
+ #
+ ###########################################################################
+
+
+ICP_LAC_API=$(ICP_ROOT)/Acceleration/include/lac
+ICP_BTR_API=$(ICP_ROOT)/Acceleration/include/btr
+ICP_API_DIR=$(ICP_ROOT)/Acceleration/include
+ICP_OCF_SHIM_DIR?=$(KERNEL_SOURCE_ROOT)/crypto/ocf/
+ifeq ($(wildcard $(ICP_OCF_SHIM_DIR)),)
+ICP_OCF_SHIM_DIR?=$(ROOTDIR)/modules/ocf/
+endif
+
+ICP_OS_LEVEL?=kernel_space
+
+ICP_OS?=linux_2.6
+
+ICP_CORE?=ia
+
diff --git a/target/linux/generic/files/crypto/ocf/ep80579/icp_asym.c b/target/linux/generic/files/crypto/ocf/ep80579/icp_asym.c
new file mode 100644
index 000000000..d2641c545
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ep80579/icp_asym.c
@@ -0,0 +1,1334 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: Security.L.1.0.2-229
+ *
+ ***************************************************************************/
+
+#include "icp_ocf.h"
+
+/*The following define values (containing the word 'INDEX') are used to find
+the index of each input buffer of the crypto_kop struct (see OCF cryptodev.h).
+These values were found through analysis of the OCF OpenSSL patch. If the
+calling program uses different input buffer positions, these defines will have
+to be changed.*/
+
+/*DIFFIE HELLMAN buffer index values*/
+#define ICP_DH_KRP_PARAM_PRIME_INDEX (0)
+#define ICP_DH_KRP_PARAM_BASE_INDEX (1)
+#define ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX (2)
+#define ICP_DH_KRP_PARAM_RESULT_INDEX (3)
+
+/*MOD EXP buffer index values*/
+#define ICP_MOD_EXP_KRP_PARAM_BASE_INDEX (0)
+#define ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX (1)
+#define ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX (2)
+#define ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX (3)
+
+/*MOD EXP CRT buffer index values*/
+#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX (0)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX (1)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX (2)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX (3)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX (4)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX (5)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX (6)
+
+/*DSA sign buffer index values*/
+#define ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX (0)
+#define ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX (1)
+#define ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX (2)
+#define ICP_DSA_SIGN_KRP_PARAM_G_INDEX (3)
+#define ICP_DSA_SIGN_KRP_PARAM_X_INDEX (4)
+#define ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX (5)
+#define ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX (6)
+
+/*DSA verify buffer index values*/
+#define ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX (0)
+#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX (1)
+#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX (2)
+#define ICP_DSA_VERIFY_KRP_PARAM_G_INDEX (3)
+#define ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX (4)
+#define ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX (5)
+#define ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX (6)
+
+/*DSA sign prime Q vs random number K size check values*/
+#define DONT_RUN_LESS_THAN_CHECK (0)
+#define FAIL_A_IS_GREATER_THAN_B (1)
+#define FAIL_A_IS_EQUAL_TO_B (1)
+#define SUCCESS_A_IS_LESS_THAN_B (0)
+#define DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS (500)
+
+/* We need to set a cryptokp success value just in case it is set or allocated
+ and not set to zero outside of this module */
+#define CRYPTO_OP_SUCCESS (0)
+
+/*Function to compute Diffie Hellman (DH) phase 1 or phase 2 key values*/
+static int icp_ocfDrvDHComputeKey(struct cryptkop *krp);
+
+/*Function to compute a Modular Exponentiation (Mod Exp)*/
+static int icp_ocfDrvModExp(struct cryptkop *krp);
+
+/*Function to compute a Mod Exp using the Chinease Remainder Theorem*/
+static int icp_ocfDrvModExpCRT(struct cryptkop *krp);
+
+/*Helper function to compute whether the first big number argument is less than
+ the second big number argument */
+static int
+icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck);
+
+/*Function to sign an input with DSA R and S keys*/
+static int icp_ocfDrvDsaSign(struct cryptkop *krp);
+
+/*Function to Verify a DSA buffer signature*/
+static int icp_ocfDrvDsaVerify(struct cryptkop *krp);
+
+/*Callback function for DH operation*/
+static void
+icp_ocfDrvDhP1CallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData, CpaFlatBuffer * pLocalOctetStringPV);
+
+/*Callback function for ME operation*/
+static void
+icp_ocfDrvModExpCallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData, CpaFlatBuffer * pResult);
+
+/*Callback function for ME CRT operation*/
+static void
+icp_ocfDrvModExpCRTCallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData, CpaFlatBuffer * pOutputData);
+
+/*Callback function for DSA sign operation*/
+static void
+icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData,
+ CpaBoolean protocolStatus,
+ CpaFlatBuffer * pR, CpaFlatBuffer * pS);
+
+/*Callback function for DSA Verify operation*/
+static void
+icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData, CpaBoolean verifyStatus);
+
+/* Name : icp_ocfDrvPkeProcess
+ *
+ * Description : This function will choose which PKE process to follow
+ * based on the input arguments
+ */
+int icp_ocfDrvPkeProcess(icp_device_t dev, struct cryptkop *krp, int hint)
+{
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+
+ if (NULL == krp) {
+ DPRINTK("%s(): Invalid input parameters, cryptkop = %p\n",
+ __FUNCTION__, krp);
+ return EINVAL;
+ }
+
+ if (CPA_TRUE == icp_atomic_read(&icp_ocfDrvIsExiting)) {
+ krp->krp_status = ECANCELED;
+ return ECANCELED;
+ }
+
+ switch (krp->krp_op) {
+ case CRK_DH_COMPUTE_KEY:
+ DPRINTK("%s() doing DH_COMPUTE_KEY\n", __FUNCTION__);
+ lacStatus = icp_ocfDrvDHComputeKey(krp);
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): icp_ocfDrvDHComputeKey failed "
+ "(%d).\n", __FUNCTION__, lacStatus);
+ krp->krp_status = ECANCELED;
+ return ECANCELED;
+ }
+
+ break;
+
+ case CRK_MOD_EXP:
+ DPRINTK("%s() doing MOD_EXP \n", __FUNCTION__);
+ lacStatus = icp_ocfDrvModExp(krp);
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): icp_ocfDrvModExp failed (%d).\n",
+ __FUNCTION__, lacStatus);
+ krp->krp_status = ECANCELED;
+ return ECANCELED;
+ }
+
+ break;
+
+ case CRK_MOD_EXP_CRT:
+ DPRINTK("%s() doing MOD_EXP_CRT \n", __FUNCTION__);
+ lacStatus = icp_ocfDrvModExpCRT(krp);
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): icp_ocfDrvModExpCRT "
+ "failed (%d).\n", __FUNCTION__, lacStatus);
+ krp->krp_status = ECANCELED;
+ return ECANCELED;
+ }
+
+ break;
+
+ case CRK_DSA_SIGN:
+ DPRINTK("%s() doing DSA_SIGN \n", __FUNCTION__);
+ lacStatus = icp_ocfDrvDsaSign(krp);
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): icp_ocfDrvDsaSign "
+ "failed (%d).\n", __FUNCTION__, lacStatus);
+ krp->krp_status = ECANCELED;
+ return ECANCELED;
+ }
+
+ break;
+
+ case CRK_DSA_VERIFY:
+ DPRINTK("%s() doing DSA_VERIFY \n", __FUNCTION__);
+ lacStatus = icp_ocfDrvDsaVerify(krp);
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): icp_ocfDrvDsaVerify "
+ "failed (%d).\n", __FUNCTION__, lacStatus);
+ krp->krp_status = ECANCELED;
+ return ECANCELED;
+ }
+
+ break;
+
+ default:
+ EPRINTK("%s(): Asymettric function not "
+ "supported (%d).\n", __FUNCTION__, krp->krp_op);
+ krp->krp_status = EOPNOTSUPP;
+ return EOPNOTSUPP;
+ }
+
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name : icp_ocfDrvSwapBytes
+ *
+ * Description : This function is used to swap the byte order of a buffer.
+ * It has been seen that in general we are passed little endian byte order
+ * buffers, but LAC only accepts big endian byte order buffers.
+ */
+static void inline icp_ocfDrvSwapBytes(u_int8_t * num, u_int32_t buff_len_bytes)
+{
+
+ int i;
+ u_int8_t *end_ptr;
+ u_int8_t hold_val;
+
+ end_ptr = num + (buff_len_bytes - 1);
+ buff_len_bytes = buff_len_bytes >> 1;
+ for (i = 0; i < buff_len_bytes; i++) {
+ hold_val = *num;
+ *num = *end_ptr;
+ num++;
+ *end_ptr = hold_val;
+ end_ptr--;
+ }
+}
+
+/* Name : icp_ocfDrvDHComputeKey
+ *
+ * Description : This function will map Diffie Hellman calls from OCF
+ * to the LAC API. OCF uses this function for Diffie Hellman Phase1 and
+ * Phase2. LAC has a separate Diffie Hellman Phase2 call, however both phases
+ * break down to a modular exponentiation.
+ */
+static int icp_ocfDrvDHComputeKey(struct cryptkop *krp)
+{
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+ void *callbackTag = NULL;
+ CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
+ CpaFlatBuffer *pLocalOctetStringPV = NULL;
+ uint32_t dh_prime_len_bytes = 0, dh_prime_len_bits = 0;
+
+ /* Input checks - check prime is a multiple of 8 bits to allow for
+ allocation later */
+ dh_prime_len_bits =
+ (krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_nbits);
+
+ /* LAC can reject prime lengths based on prime key sizes, we just
+ need to make sure we can allocate space for the base and
+ exponent buffers correctly */
+ if ((dh_prime_len_bits % NUM_BITS_IN_BYTE) != 0) {
+ APRINTK("%s(): Warning Prime number buffer size is not a "
+ "multiple of 8 bits\n", __FUNCTION__);
+ }
+
+ /* Result storage space should be the same size as the prime as this
+ value can take up the same amount of storage space */
+ if (dh_prime_len_bits !=
+ krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits) {
+ DPRINTK("%s(): Return Buffer must be the same size "
+ "as the Prime buffer\n", __FUNCTION__);
+ krp->krp_status = EINVAL;
+ return EINVAL;
+ }
+ /* Switch to size in bytes */
+ BITS_TO_BYTES(dh_prime_len_bytes, dh_prime_len_bits);
+
+ callbackTag = krp;
+
+/*All allocations are set to ICP_M_NOWAIT due to the possibility of getting
+called in interrupt context*/
+ pPhase1OpData = icp_kmem_cache_zalloc(drvDH_zone, ICP_M_NOWAIT);
+ if (NULL == pPhase1OpData) {
+ APRINTK("%s():Failed to get memory for key gen data\n",
+ __FUNCTION__);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ pLocalOctetStringPV =
+ icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+ if (NULL == pLocalOctetStringPV) {
+ APRINTK("%s():Failed to get memory for pLocalOctetStringPV\n",
+ __FUNCTION__);
+ ICP_CACHE_FREE(drvDH_zone, pPhase1OpData);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ /* Link parameters */
+ pPhase1OpData->primeP.pData =
+ krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_p;
+
+ pPhase1OpData->primeP.dataLenInBytes = dh_prime_len_bytes;
+
+ icp_ocfDrvSwapBytes(pPhase1OpData->primeP.pData, dh_prime_len_bytes);
+
+ pPhase1OpData->baseG.pData =
+ krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_p;
+
+ BITS_TO_BYTES(pPhase1OpData->baseG.dataLenInBytes,
+ krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_nbits);
+
+ icp_ocfDrvSwapBytes(pPhase1OpData->baseG.pData,
+ pPhase1OpData->baseG.dataLenInBytes);
+
+ pPhase1OpData->privateValueX.pData =
+ krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].crp_p;
+
+ BITS_TO_BYTES(pPhase1OpData->privateValueX.dataLenInBytes,
+ krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(pPhase1OpData->privateValueX.pData,
+ pPhase1OpData->privateValueX.dataLenInBytes);
+
+ /* Output parameters */
+ pLocalOctetStringPV->pData =
+ krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_p;
+
+ BITS_TO_BYTES(pLocalOctetStringPV->dataLenInBytes,
+ krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits);
+
+ lacStatus = cpaCyDhKeyGenPhase1(CPA_INSTANCE_HANDLE_SINGLE,
+ icp_ocfDrvDhP1CallBack,
+ callbackTag, pPhase1OpData,
+ pLocalOctetStringPV);
+
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): DH Phase 1 Key Gen failed (%d).\n",
+ __FUNCTION__, lacStatus);
+ icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
+ ICP_CACHE_FREE(drvDH_zone, pPhase1OpData);
+ }
+
+ return lacStatus;
+}
+
+/* Name : icp_ocfDrvModExp
+ *
+ * Description : This function will map ordinary Modular Exponentiation calls
+ * from OCF to the LAC API.
+ *
+ */
+static int icp_ocfDrvModExp(struct cryptkop *krp)
+{
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+ void *callbackTag = NULL;
+ CpaCyLnModExpOpData *pModExpOpData = NULL;
+ CpaFlatBuffer *pResult = NULL;
+
+ if ((krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits %
+ NUM_BITS_IN_BYTE) != 0) {
+ DPRINTK("%s(): Warning - modulus buffer size (%d) is not a "
+ "multiple of 8 bits\n", __FUNCTION__,
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
+ crp_nbits);
+ }
+
+ /* Result storage space should be the same size as the prime as this
+ value can take up the same amount of storage space */
+ if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits >
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_nbits) {
+ APRINTK("%s(): Return Buffer size must be the same or"
+ " greater than the Modulus buffer\n", __FUNCTION__);
+ krp->krp_status = EINVAL;
+ return EINVAL;
+ }
+
+ callbackTag = krp;
+
+ pModExpOpData = icp_kmem_cache_zalloc(drvLnModExp_zone, ICP_M_NOWAIT);
+ if (NULL == pModExpOpData) {
+ APRINTK("%s():Failed to get memory for key gen data\n",
+ __FUNCTION__);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ pResult = icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+ if (NULL == pResult) {
+ APRINTK("%s():Failed to get memory for ModExp result\n",
+ __FUNCTION__);
+ ICP_CACHE_FREE(drvLnModExp_zone, pModExpOpData);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ /* Link parameters */
+ pModExpOpData->modulus.pData =
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_p;
+ BITS_TO_BYTES(pModExpOpData->modulus.dataLenInBytes,
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(pModExpOpData->modulus.pData,
+ pModExpOpData->modulus.dataLenInBytes);
+
+ DPRINTK("%s : base (%d)\n", __FUNCTION__, krp->
+ krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
+ pModExpOpData->base.pData =
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_p;
+ BITS_TO_BYTES(pModExpOpData->base.dataLenInBytes,
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
+ crp_nbits);
+ icp_ocfDrvSwapBytes(pModExpOpData->base.pData,
+ pModExpOpData->base.dataLenInBytes);
+
+ pModExpOpData->exponent.pData =
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].crp_p;
+ BITS_TO_BYTES(pModExpOpData->exponent.dataLenInBytes,
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(pModExpOpData->exponent.pData,
+ pModExpOpData->exponent.dataLenInBytes);
+ /* Output parameters */
+ pResult->pData =
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_p,
+ BITS_TO_BYTES(pResult->dataLenInBytes,
+ krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].
+ crp_nbits);
+
+ lacStatus = cpaCyLnModExp(CPA_INSTANCE_HANDLE_SINGLE,
+ icp_ocfDrvModExpCallBack,
+ callbackTag, pModExpOpData, pResult);
+
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): Mod Exp Operation failed (%d).\n",
+ __FUNCTION__, lacStatus);
+ krp->krp_status = ECANCELED;
+ icp_ocfDrvFreeFlatBuffer(pResult);
+ ICP_CACHE_FREE(drvLnModExp_zone, pModExpOpData);
+ }
+
+ return lacStatus;
+}
+
+/* Name : icp_ocfDrvModExpCRT
+ *
+ * Description : This function will map ordinary Modular Exponentiation Chinese
+ * Remainder Theorem implementaion calls from OCF to the LAC API.
+ *
+ * Note : Mod Exp CRT for this driver is accelerated through LAC RSA type 2
+ * decrypt operation. Therefore P and Q input values must always be prime
+ * numbers. Although basic primality checks are done in LAC, it is up to the
+ * user to do any correct prime number checking before passing the inputs.
+ */
+static int icp_ocfDrvModExpCRT(struct cryptkop *krp)
+{
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+ CpaCyRsaDecryptOpData *rsaDecryptOpData = NULL;
+ void *callbackTag = NULL;
+ CpaFlatBuffer *pOutputData = NULL;
+
+ /*Parameter input checks are all done by LAC, no need to repeat
+ them here. */
+ callbackTag = krp;
+
+ rsaDecryptOpData =
+ icp_kmem_cache_zalloc(drvRSADecrypt_zone, ICP_M_NOWAIT);
+ if (NULL == rsaDecryptOpData) {
+ APRINTK("%s():Failed to get memory"
+ " for MOD EXP CRT Op data struct\n", __FUNCTION__);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ rsaDecryptOpData->pRecipientPrivateKey
+ = icp_kmem_cache_zalloc(drvRSAPrivateKey_zone, ICP_M_NOWAIT);
+ if (NULL == rsaDecryptOpData->pRecipientPrivateKey) {
+ APRINTK("%s():Failed to get memory for MOD EXP CRT"
+ " private key values struct\n", __FUNCTION__);
+ ICP_CACHE_FREE(drvRSADecrypt_zone, rsaDecryptOpData);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ rsaDecryptOpData->pRecipientPrivateKey->
+ version = CPA_CY_RSA_VERSION_TWO_PRIME;
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
+
+ pOutputData = icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+ if (NULL == pOutputData) {
+ APRINTK("%s():Failed to get memory"
+ " for MOD EXP CRT output data\n", __FUNCTION__);
+ ICP_CACHE_FREE(drvRSAPrivateKey_zone,
+ rsaDecryptOpData->pRecipientPrivateKey);
+ ICP_CACHE_FREE(drvRSADecrypt_zone, rsaDecryptOpData);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ rsaDecryptOpData->pRecipientPrivateKey->
+ version = CPA_CY_RSA_VERSION_TWO_PRIME;
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
+
+ /* Link parameters */
+ rsaDecryptOpData->inputData.pData =
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].crp_p;
+ BITS_TO_BYTES(rsaDecryptOpData->inputData.dataLenInBytes,
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(rsaDecryptOpData->inputData.pData,
+ rsaDecryptOpData->inputData.dataLenInBytes);
+
+ rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime1P.pData =
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].crp_p;
+ BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
+ prime1P.dataLenInBytes,
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.prime1P.pData,
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.prime1P.dataLenInBytes);
+
+ rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime2Q.pData =
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].crp_p;
+ BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
+ prime2Q.dataLenInBytes,
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.prime2Q.pData,
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.prime2Q.dataLenInBytes);
+
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.exponent1Dp.pData =
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].crp_p;
+ BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
+ exponent1Dp.dataLenInBytes,
+ krp->
+ krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.exponent1Dp.pData,
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.exponent1Dp.dataLenInBytes);
+
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.exponent2Dq.pData =
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].crp_p;
+ BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.exponent2Dq.dataLenInBytes,
+ krp->
+ krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.exponent2Dq.pData,
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.exponent2Dq.dataLenInBytes);
+
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.coefficientQInv.pData =
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].crp_p;
+ BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.coefficientQInv.dataLenInBytes,
+ krp->
+ krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.coefficientQInv.pData,
+ rsaDecryptOpData->pRecipientPrivateKey->
+ privateKeyRep2.coefficientQInv.dataLenInBytes);
+
+ /* Output Parameter */
+ pOutputData->pData =
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].crp_p;
+ BITS_TO_BYTES(pOutputData->dataLenInBytes,
+ krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].
+ crp_nbits);
+
+ lacStatus = cpaCyRsaDecrypt(CPA_INSTANCE_HANDLE_SINGLE,
+ icp_ocfDrvModExpCRTCallBack,
+ callbackTag, rsaDecryptOpData, pOutputData);
+
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): Mod Exp CRT Operation failed (%d).\n",
+ __FUNCTION__, lacStatus);
+ krp->krp_status = ECANCELED;
+ icp_ocfDrvFreeFlatBuffer(pOutputData);
+ ICP_CACHE_FREE(drvRSAPrivateKey_zone,
+ rsaDecryptOpData->pRecipientPrivateKey);
+ ICP_CACHE_FREE(drvRSADecrypt_zone, rsaDecryptOpData);
+ }
+
+ return lacStatus;
+}
+
+/* Name : icp_ocfDrvCheckALessThanB
+ *
+ * Description : This function will check whether the first argument is less
+ * than the second. It is used to check whether the DSA RS sign Random K
+ * value is less than the Prime Q value (as defined in the specification)
+ *
+ */
+static int
+icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck)
+{
+
+ uint8_t *MSB_K = pK->pData;
+ uint8_t *MSB_Q = pQ->pData;
+ uint32_t buffer_lengths_in_bytes = pQ->dataLenInBytes;
+
+ if (DONT_RUN_LESS_THAN_CHECK == *doCheck) {
+ return FAIL_A_IS_GREATER_THAN_B;
+ }
+
+/*Check MSBs
+if A == B, check next MSB
+if A > B, return A_IS_GREATER_THAN_B
+if A < B, return A_IS_LESS_THAN_B (success)
+*/
+ while (*MSB_K == *MSB_Q) {
+ MSB_K++;
+ MSB_Q++;
+
+ buffer_lengths_in_bytes--;
+ if (0 == buffer_lengths_in_bytes) {
+ DPRINTK("%s() Buffers have equal value!!\n",
+ __FUNCTION__);
+ return FAIL_A_IS_EQUAL_TO_B;
+ }
+
+ }
+
+ if (*MSB_K < *MSB_Q) {
+ return SUCCESS_A_IS_LESS_THAN_B;
+ } else {
+ return FAIL_A_IS_GREATER_THAN_B;
+ }
+
+}
+
+/* Name : icp_ocfDrvDsaSign
+ *
+ * Description : This function will map DSA RS Sign from OCF to the LAC API.
+ *
+ * NOTE: From looking at OCF patch to OpenSSL and even the number of input
+ * parameters, OCF expects us to generate the random seed value. This value
+ * is generated and passed to LAC, however the number is discared in the
+ * callback and not returned to the user.
+ */
+static int icp_ocfDrvDsaSign(struct cryptkop *krp)
+{
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+ CpaCyDsaRSSignOpData *dsaRsSignOpData = NULL;
+ void *callbackTag = NULL;
+ CpaCyRandGenOpData randGenOpData;
+ int primeQSizeInBytes = 0;
+ int doCheck = 0;
+ CpaFlatBuffer randData;
+ CpaBoolean protocolStatus = CPA_FALSE;
+ CpaFlatBuffer *pR = NULL;
+ CpaFlatBuffer *pS = NULL;
+
+ callbackTag = krp;
+
+ BITS_TO_BYTES(primeQSizeInBytes,
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
+ crp_nbits);
+
+ if (DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES != primeQSizeInBytes) {
+ APRINTK("%s(): DSA PRIME Q size not equal to the "
+ "FIPS defined 20bytes, = %d\n",
+ __FUNCTION__, primeQSizeInBytes);
+ krp->krp_status = EDOM;
+ return EDOM;
+ }
+
+ dsaRsSignOpData =
+ icp_kmem_cache_zalloc(drvDSARSSign_zone, ICP_M_NOWAIT);
+ if (NULL == dsaRsSignOpData) {
+ APRINTK("%s():Failed to get memory"
+ " for DSA RS Sign Op data struct\n", __FUNCTION__);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ dsaRsSignOpData->K.pData =
+ icp_kmem_cache_alloc(drvDSARSSignKValue_zone, ICP_M_NOWAIT);
+
+ if (NULL == dsaRsSignOpData->K.pData) {
+ APRINTK("%s():Failed to get memory"
+ " for DSA RS Sign Op Random value\n", __FUNCTION__);
+ ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ pR = icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+ if (NULL == pR) {
+ APRINTK("%s():Failed to get memory"
+ " for DSA signature R\n", __FUNCTION__);
+ ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+ dsaRsSignOpData->K.pData);
+ ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ pS = icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+ if (NULL == pS) {
+ APRINTK("%s():Failed to get memory"
+ " for DSA signature S\n", __FUNCTION__);
+ icp_ocfDrvFreeFlatBuffer(pR);
+ ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+ dsaRsSignOpData->K.pData);
+ ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ /*link prime number parameter for ease of processing */
+ dsaRsSignOpData->P.pData =
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].crp_p;
+ BITS_TO_BYTES(dsaRsSignOpData->P.dataLenInBytes,
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(dsaRsSignOpData->P.pData,
+ dsaRsSignOpData->P.dataLenInBytes);
+
+ dsaRsSignOpData->Q.pData =
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].crp_p;
+ BITS_TO_BYTES(dsaRsSignOpData->Q.dataLenInBytes,
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
+ crp_nbits);
+
+ icp_ocfDrvSwapBytes(dsaRsSignOpData->Q.pData,
+ dsaRsSignOpData->Q.dataLenInBytes);
+
+ /*generate random number with equal buffer size to Prime value Q,
+ but value less than Q */
+ dsaRsSignOpData->K.dataLenInBytes = dsaRsSignOpData->Q.dataLenInBytes;
+
+ randGenOpData.generateBits = CPA_TRUE;
+ randGenOpData.lenInBytes = dsaRsSignOpData->K.dataLenInBytes;
+
+ icp_ocfDrvPtrAndLenToFlatBuffer(dsaRsSignOpData->K.pData,
+ dsaRsSignOpData->K.dataLenInBytes,
+ &randData);
+
+ doCheck = 0;
+ while (icp_ocfDrvCheckALessThanB(&(dsaRsSignOpData->K),
+ &(dsaRsSignOpData->Q), &doCheck)) {
+
+ if (CPA_STATUS_SUCCESS
+ != cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
+ NULL, NULL, &randGenOpData, &randData)) {
+ APRINTK("%s(): ERROR - Failed to generate DSA RS Sign K"
+ "value\n", __FUNCTION__);
+ icp_ocfDrvFreeFlatBuffer(pS);
+ icp_ocfDrvFreeFlatBuffer(pR);
+ ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+ dsaRsSignOpData->K.pData);
+ ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+ krp->krp_status = EAGAIN;
+ return EAGAIN;
+ }
+
+ doCheck++;
+ if (DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS == doCheck) {
+ APRINTK("%s(): ERROR - Failed to find DSA RS Sign K "
+ "value less than Q value\n", __FUNCTION__);
+ icp_ocfDrvFreeFlatBuffer(pS);
+ icp_ocfDrvFreeFlatBuffer(pR);
+ ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+ dsaRsSignOpData->K.pData);
+ ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+ krp->krp_status = EAGAIN;
+ return EAGAIN;
+ }
+
+ }
+ /*Rand Data - no need to swap bytes for pK */
+
+ /* Link parameters */
+ dsaRsSignOpData->G.pData =
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_p;
+ BITS_TO_BYTES(dsaRsSignOpData->G.dataLenInBytes,
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_nbits);
+
+ icp_ocfDrvSwapBytes(dsaRsSignOpData->G.pData,
+ dsaRsSignOpData->G.dataLenInBytes);
+
+ dsaRsSignOpData->X.pData =
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_p;
+ BITS_TO_BYTES(dsaRsSignOpData->X.dataLenInBytes,
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_nbits);
+ icp_ocfDrvSwapBytes(dsaRsSignOpData->X.pData,
+ dsaRsSignOpData->X.dataLenInBytes);
+
+ /*OpenSSL dgst parameter is left in big endian byte order,
+ therefore no byte swap is required */
+ dsaRsSignOpData->M.pData =
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].crp_p;
+ BITS_TO_BYTES(dsaRsSignOpData->M.dataLenInBytes,
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].
+ crp_nbits);
+
+ /* Output Parameters */
+ pS->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].crp_p;
+ BITS_TO_BYTES(pS->dataLenInBytes,
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].
+ crp_nbits);
+
+ pR->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].crp_p;
+ BITS_TO_BYTES(pR->dataLenInBytes,
+ krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].
+ crp_nbits);
+
+ lacStatus = cpaCyDsaSignRS(CPA_INSTANCE_HANDLE_SINGLE,
+ icp_ocfDrvDsaRSSignCallBack,
+ callbackTag, dsaRsSignOpData,
+ &protocolStatus, pR, pS);
+
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): DSA RS Sign Operation failed (%d).\n",
+ __FUNCTION__, lacStatus);
+ krp->krp_status = ECANCELED;
+ icp_ocfDrvFreeFlatBuffer(pS);
+ icp_ocfDrvFreeFlatBuffer(pR);
+ ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+ dsaRsSignOpData->K.pData);
+ ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+ }
+
+ return lacStatus;
+}
+
+/* Name : icp_ocfDrvDsaVerify
+ *
+ * Description : This function will map DSA RS Verify from OCF to the LAC API.
+ *
+ */
+static int icp_ocfDrvDsaVerify(struct cryptkop *krp)
+{
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+ CpaCyDsaVerifyOpData *dsaVerifyOpData = NULL;
+ void *callbackTag = NULL;
+ CpaBoolean verifyStatus = CPA_FALSE;
+
+ callbackTag = krp;
+
+ dsaVerifyOpData =
+ icp_kmem_cache_zalloc(drvDSAVerify_zone, ICP_M_NOWAIT);
+ if (NULL == dsaVerifyOpData) {
+ APRINTK("%s():Failed to get memory"
+ " for DSA Verify Op data struct\n", __FUNCTION__);
+ krp->krp_status = ENOMEM;
+ return ENOMEM;
+ }
+
+ /* Link parameters */
+ dsaVerifyOpData->P.pData =
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].crp_p;
+ BITS_TO_BYTES(dsaVerifyOpData->P.dataLenInBytes,
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].
+ crp_nbits);
+ icp_ocfDrvSwapBytes(dsaVerifyOpData->P.pData,
+ dsaVerifyOpData->P.dataLenInBytes);
+
+ dsaVerifyOpData->Q.pData =
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].crp_p;
+ BITS_TO_BYTES(dsaVerifyOpData->Q.dataLenInBytes,
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].
+ crp_nbits);
+ icp_ocfDrvSwapBytes(dsaVerifyOpData->Q.pData,
+ dsaVerifyOpData->Q.dataLenInBytes);
+
+ dsaVerifyOpData->G.pData =
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].crp_p;
+ BITS_TO_BYTES(dsaVerifyOpData->G.dataLenInBytes,
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].
+ crp_nbits);
+ icp_ocfDrvSwapBytes(dsaVerifyOpData->G.pData,
+ dsaVerifyOpData->G.dataLenInBytes);
+
+ dsaVerifyOpData->Y.pData =
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].crp_p;
+ BITS_TO_BYTES(dsaVerifyOpData->Y.dataLenInBytes,
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].
+ crp_nbits);
+ icp_ocfDrvSwapBytes(dsaVerifyOpData->Y.pData,
+ dsaVerifyOpData->Y.dataLenInBytes);
+
+ /*OpenSSL dgst parameter is left in big endian byte order,
+ therefore no byte swap is required */
+ dsaVerifyOpData->M.pData =
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].crp_p;
+ BITS_TO_BYTES(dsaVerifyOpData->M.dataLenInBytes,
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].
+ crp_nbits);
+
+ dsaVerifyOpData->R.pData =
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].crp_p;
+ BITS_TO_BYTES(dsaVerifyOpData->R.dataLenInBytes,
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].
+ crp_nbits);
+ icp_ocfDrvSwapBytes(dsaVerifyOpData->R.pData,
+ dsaVerifyOpData->R.dataLenInBytes);
+
+ dsaVerifyOpData->S.pData =
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].crp_p;
+ BITS_TO_BYTES(dsaVerifyOpData->S.dataLenInBytes,
+ krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].
+ crp_nbits);
+ icp_ocfDrvSwapBytes(dsaVerifyOpData->S.pData,
+ dsaVerifyOpData->S.dataLenInBytes);
+
+ lacStatus = cpaCyDsaVerify(CPA_INSTANCE_HANDLE_SINGLE,
+ icp_ocfDrvDsaVerifyCallBack,
+ callbackTag, dsaVerifyOpData, &verifyStatus);
+
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): DSA Verify Operation failed (%d).\n",
+ __FUNCTION__, lacStatus);
+ ICP_CACHE_FREE(drvDSAVerify_zone, dsaVerifyOpData);
+ krp->krp_status = ECANCELED;
+ }
+
+ return lacStatus;
+}
+
+/* Name : icp_ocfDrvDhP1Callback
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the DH operation.
+ */
+static void
+icp_ocfDrvDhP1CallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData, CpaFlatBuffer * pLocalOctetStringPV)
+{
+ struct cryptkop *krp = NULL;
+ CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
+
+ if (NULL == callbackTag) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "callbackTag data is NULL\n", __FUNCTION__);
+ return;
+ }
+ krp = (struct cryptkop *)callbackTag;
+
+ if (NULL == pOpData) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "Operation Data is NULL\n", __FUNCTION__);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+ pPhase1OpData = (CpaCyDhPhase1KeyGenOpData *) pOpData;
+
+ if (NULL == pLocalOctetStringPV) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "pLocalOctetStringPV Data is NULL\n", __FUNCTION__);
+ memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
+ ICP_CACHE_FREE(drvDH_zone, pPhase1OpData);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+
+ if (CPA_STATUS_SUCCESS == status) {
+ krp->krp_status = CRYPTO_OP_SUCCESS;
+ } else {
+ APRINTK("%s(): Diffie Hellman Phase1 Key Gen failed - "
+ "Operation Status = %d\n", __FUNCTION__, status);
+ krp->krp_status = ECANCELED;
+ }
+
+ icp_ocfDrvSwapBytes(pLocalOctetStringPV->pData,
+ pLocalOctetStringPV->dataLenInBytes);
+
+ icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
+ memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
+ ICP_CACHE_FREE(drvDH_zone, pPhase1OpData);
+
+ crypto_kdone(krp);
+
+ return;
+}
+
+/* Name : icp_ocfDrvModExpCallBack
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the Mod Exp operation.
+ */
+static void
+icp_ocfDrvModExpCallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpdata, CpaFlatBuffer * pResult)
+{
+ struct cryptkop *krp = NULL;
+ CpaCyLnModExpOpData *pLnModExpOpData = NULL;
+
+ if (NULL == callbackTag) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "callbackTag data is NULL\n", __FUNCTION__);
+ return;
+ }
+ krp = (struct cryptkop *)callbackTag;
+
+ if (NULL == pOpdata) {
+ DPRINTK("%s(): Invalid Mod Exp input parameters - "
+ "Operation Data is NULL\n", __FUNCTION__);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+ pLnModExpOpData = (CpaCyLnModExpOpData *) pOpdata;
+
+ if (NULL == pResult) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "pResult data is NULL\n", __FUNCTION__);
+ krp->krp_status = ECANCELED;
+ memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
+ ICP_CACHE_FREE(drvLnModExp_zone, pLnModExpOpData);
+ crypto_kdone(krp);
+ return;
+ }
+
+ if (CPA_STATUS_SUCCESS == status) {
+ krp->krp_status = CRYPTO_OP_SUCCESS;
+ } else {
+ APRINTK("%s(): LAC Mod Exp Operation failed - "
+ "Operation Status = %d\n", __FUNCTION__, status);
+ krp->krp_status = ECANCELED;
+ }
+
+ icp_ocfDrvSwapBytes(pResult->pData, pResult->dataLenInBytes);
+
+ /*switch base size value back to original */
+ if (pLnModExpOpData->base.pData ==
+ (uint8_t *) & (krp->
+ krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
+ crp_nbits)) {
+ *((uint32_t *) pLnModExpOpData->base.pData) =
+ ntohl(*((uint32_t *) pLnModExpOpData->base.pData));
+ }
+ icp_ocfDrvFreeFlatBuffer(pResult);
+ memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
+ ICP_CACHE_FREE(drvLnModExp_zone, pLnModExpOpData);
+
+ crypto_kdone(krp);
+
+ return;
+
+}
+
+/* Name : icp_ocfDrvModExpCRTCallBack
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the Mod Exp CRT operation.
+ */
+static void
+icp_ocfDrvModExpCRTCallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData, CpaFlatBuffer * pOutputData)
+{
+ struct cryptkop *krp = NULL;
+ CpaCyRsaDecryptOpData *pDecryptData = NULL;
+
+ if (NULL == callbackTag) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "callbackTag data is NULL\n", __FUNCTION__);
+ return;
+ }
+
+ krp = (struct cryptkop *)callbackTag;
+
+ if (NULL == pOpData) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "Operation Data is NULL\n", __FUNCTION__);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+ pDecryptData = (CpaCyRsaDecryptOpData *) pOpData;
+
+ if (NULL == pOutputData) {
+ DPRINTK("%s(): Invalid input parameter - "
+ "pOutputData is NULL\n", __FUNCTION__);
+ memset(pDecryptData->pRecipientPrivateKey, 0,
+ sizeof(CpaCyRsaPrivateKey));
+ ICP_CACHE_FREE(drvRSAPrivateKey_zone,
+ pDecryptData->pRecipientPrivateKey);
+ memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
+ ICP_CACHE_FREE(drvRSADecrypt_zone, pDecryptData);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+
+ if (CPA_STATUS_SUCCESS == status) {
+ krp->krp_status = CRYPTO_OP_SUCCESS;
+ } else {
+ APRINTK("%s(): LAC Mod Exp CRT operation failed - "
+ "Operation Status = %d\n", __FUNCTION__, status);
+ krp->krp_status = ECANCELED;
+ }
+
+ icp_ocfDrvSwapBytes(pOutputData->pData, pOutputData->dataLenInBytes);
+
+ icp_ocfDrvFreeFlatBuffer(pOutputData);
+ memset(pDecryptData->pRecipientPrivateKey, 0,
+ sizeof(CpaCyRsaPrivateKey));
+ ICP_CACHE_FREE(drvRSAPrivateKey_zone,
+ pDecryptData->pRecipientPrivateKey);
+ memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
+ ICP_CACHE_FREE(drvRSADecrypt_zone, pDecryptData);
+
+ crypto_kdone(krp);
+
+ return;
+}
+
+/* Name : icp_ocfDrvDsaRSSignCallBack
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the DSA RS sign operation.
+ */
+static void
+icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData,
+ CpaBoolean protocolStatus,
+ CpaFlatBuffer * pR, CpaFlatBuffer * pS)
+{
+ struct cryptkop *krp = NULL;
+ CpaCyDsaRSSignOpData *pSignData = NULL;
+
+ if (NULL == callbackTag) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "callbackTag data is NULL\n", __FUNCTION__);
+ return;
+ }
+
+ krp = (struct cryptkop *)callbackTag;
+
+ if (NULL == pOpData) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "Operation Data is NULL\n", __FUNCTION__);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+ pSignData = (CpaCyDsaRSSignOpData *) pOpData;
+
+ if (NULL == pR) {
+ DPRINTK("%s(): Invalid input parameter - "
+ "pR sign is NULL\n", __FUNCTION__);
+ icp_ocfDrvFreeFlatBuffer(pS);
+ ICP_CACHE_FREE(drvDSARSSign_zone, pSignData);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+
+ if (NULL == pS) {
+ DPRINTK("%s(): Invalid input parameter - "
+ "pS sign is NULL\n", __FUNCTION__);
+ icp_ocfDrvFreeFlatBuffer(pR);
+ ICP_CACHE_FREE(drvDSARSSign_zone, pSignData);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+
+ if (CPA_STATUS_SUCCESS != status) {
+ APRINTK("%s(): LAC DSA RS Sign operation failed - "
+ "Operation Status = %d\n", __FUNCTION__, status);
+ krp->krp_status = ECANCELED;
+ } else {
+ krp->krp_status = CRYPTO_OP_SUCCESS;
+
+ if (CPA_TRUE != protocolStatus) {
+ DPRINTK("%s(): LAC DSA RS Sign operation failed due "
+ "to protocol error\n", __FUNCTION__);
+ krp->krp_status = EIO;
+ }
+ }
+
+ /* Swap bytes only when the callback status is successful and
+ protocolStatus is set to true */
+ if (CPA_STATUS_SUCCESS == status && CPA_TRUE == protocolStatus) {
+ icp_ocfDrvSwapBytes(pR->pData, pR->dataLenInBytes);
+ icp_ocfDrvSwapBytes(pS->pData, pS->dataLenInBytes);
+ }
+
+ icp_ocfDrvFreeFlatBuffer(pR);
+ icp_ocfDrvFreeFlatBuffer(pS);
+ memset(pSignData->K.pData, 0, pSignData->K.dataLenInBytes);
+ ICP_CACHE_FREE(drvDSARSSignKValue_zone, pSignData->K.pData);
+ memset(pSignData, 0, sizeof(CpaCyDsaRSSignOpData));
+ ICP_CACHE_FREE(drvDSARSSign_zone, pSignData);
+ crypto_kdone(krp);
+
+ return;
+}
+
+/* Name : icp_ocfDrvDsaVerifyCallback
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the DSA Verify operation.
+ */
+static void
+icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
+ CpaStatus status,
+ void *pOpData, CpaBoolean verifyStatus)
+{
+
+ struct cryptkop *krp = NULL;
+ CpaCyDsaVerifyOpData *pVerData = NULL;
+
+ if (NULL == callbackTag) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "callbackTag data is NULL\n", __FUNCTION__);
+ return;
+ }
+
+ krp = (struct cryptkop *)callbackTag;
+
+ if (NULL == pOpData) {
+ DPRINTK("%s(): Invalid input parameters - "
+ "Operation Data is NULL\n", __FUNCTION__);
+ krp->krp_status = ECANCELED;
+ crypto_kdone(krp);
+ return;
+ }
+ pVerData = (CpaCyDsaVerifyOpData *) pOpData;
+
+ if (CPA_STATUS_SUCCESS != status) {
+ APRINTK("%s(): LAC DSA Verify operation failed - "
+ "Operation Status = %d\n", __FUNCTION__, status);
+ krp->krp_status = ECANCELED;
+ } else {
+ krp->krp_status = CRYPTO_OP_SUCCESS;
+
+ if (CPA_TRUE != verifyStatus) {
+ DPRINTK("%s(): DSA signature invalid\n", __FUNCTION__);
+ krp->krp_status = EIO;
+ }
+ }
+
+ /* Swap bytes only when the callback status is successful and
+ verifyStatus is set to true */
+ /*Just swapping back the key values for now. Possibly all
+ swapped buffers need to be reverted */
+ if (CPA_STATUS_SUCCESS == status && CPA_TRUE == verifyStatus) {
+ icp_ocfDrvSwapBytes(pVerData->R.pData,
+ pVerData->R.dataLenInBytes);
+ icp_ocfDrvSwapBytes(pVerData->S.pData,
+ pVerData->S.dataLenInBytes);
+ }
+
+ memset(pVerData, 0, sizeof(CpaCyDsaVerifyOpData));
+ ICP_CACHE_FREE(drvDSAVerify_zone, pVerData);
+ crypto_kdone(krp);
+
+ return;
+}
diff --git a/target/linux/generic/files/crypto/ocf/ep80579/icp_common.c b/target/linux/generic/files/crypto/ocf/ep80579/icp_common.c
new file mode 100644
index 000000000..5d46c0adc
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ep80579/icp_common.c
@@ -0,0 +1,773 @@
+/*************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: Security.L.1.0.2-229
+ *
+ ***************************************************************************/
+
+/*
+ * An OCF module that uses Intel® QuickAssist Integrated Accelerator to do the
+ * crypto.
+ *
+ * This driver requires the ICP Access Library that is available from Intel in
+ * order to operate.
+ */
+
+#include "icp_ocf.h"
+
+#define ICP_OCF_COMP_NAME "ICP_OCF"
+#define ICP_OCF_VER_MAIN (2)
+#define ICP_OCF_VER_MJR (1)
+#define ICP_OCF_VER_MNR (0)
+
+#define MAX_DEREG_RETRIES (100)
+#define DEFAULT_DEREG_RETRIES (10)
+#define DEFAULT_DEREG_DELAY_IN_JIFFIES (10)
+
+/* This defines the maximum number of sessions possible between OCF
+ and the OCF EP80579 Driver. If set to zero, there is no limit. */
+#define DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT (0)
+#define NUM_SUPPORTED_CAPABILITIES (21)
+
+/*Slab zone names*/
+#define ICP_SESSION_DATA_NAME "icp_ocf.SesDat"
+#define ICP_OP_DATA_NAME "icp_ocf.OpDat"
+#define ICP_DH_NAME "icp_ocf.DH"
+#define ICP_MODEXP_NAME "icp_ocf.ModExp"
+#define ICP_RSA_DECRYPT_NAME "icp_ocf.RSAdec"
+#define ICP_RSA_PKEY_NAME "icp_ocf.RSApk"
+#define ICP_DSA_SIGN_NAME "icp_ocf.DSAsg"
+#define ICP_DSA_VER_NAME "icp_ocf.DSAver"
+#define ICP_RAND_VAL_NAME "icp_ocf.DSArnd"
+#define ICP_FLAT_BUFF_NAME "icp_ocf.FB"
+
+/*Slabs zones*/
+icp_kmem_cache drvSessionData_zone = NULL;
+icp_kmem_cache drvOpData_zone = NULL;
+icp_kmem_cache drvDH_zone = NULL;
+icp_kmem_cache drvLnModExp_zone = NULL;
+icp_kmem_cache drvRSADecrypt_zone = NULL;
+icp_kmem_cache drvRSAPrivateKey_zone = NULL;
+icp_kmem_cache drvDSARSSign_zone = NULL;
+icp_kmem_cache drvDSARSSignKValue_zone = NULL;
+icp_kmem_cache drvDSAVerify_zone = NULL;
+
+/*Slab zones for flatbuffers and bufferlist*/
+icp_kmem_cache drvFlatBuffer_zone = NULL;
+
+static inline int icp_cache_null_check(void)
+{
+ return (drvSessionData_zone && drvOpData_zone
+ && drvDH_zone && drvLnModExp_zone && drvRSADecrypt_zone
+ && drvRSAPrivateKey_zone && drvDSARSSign_zone
+ && drvDSARSSign_zone && drvDSARSSignKValue_zone
+ && drvDSAVerify_zone && drvFlatBuffer_zone);
+}
+
+/*Function to free all allocated slab caches before exiting the module*/
+static void icp_ocfDrvFreeCaches(void);
+
+int32_t icp_ocfDrvDriverId = INVALID_DRIVER_ID;
+
+/* Module parameter - gives the number of times LAC deregistration shall be
+ re-tried */
+int num_dereg_retries = DEFAULT_DEREG_RETRIES;
+
+/* Module parameter - gives the delay time in jiffies before a LAC session
+ shall be attempted to be deregistered again */
+int dereg_retry_delay_in_jiffies = DEFAULT_DEREG_DELAY_IN_JIFFIES;
+
+/* Module parameter - gives the maximum number of sessions possible between
+ OCF and the OCF EP80579 Driver. If set to zero, there is no limit.*/
+int max_sessions = DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT;
+
+/* This is set when the module is removed from the system, no further
+ processing can take place if this is set */
+icp_atomic_t icp_ocfDrvIsExiting = ICP_ATOMIC_INIT(0);
+
+/* This is used to show how many lac sessions were not deregistered*/
+icp_atomic_t lac_session_failed_dereg_count = ICP_ATOMIC_INIT(0);
+
+/* This is used to track the number of registered sessions between OCF and
+ * and the OCF EP80579 driver, when max_session is set to value other than
+ * zero. This ensures that the max_session set for the OCF and the driver
+ * is equal to the LAC registered sessions */
+icp_atomic_t num_ocf_to_drv_registered_sessions = ICP_ATOMIC_INIT(0);
+
+/* Head of linked list used to store session data */
+icp_drvSessionListHead_t icp_ocfDrvGlobalSymListHead;
+icp_drvSessionListHead_t icp_ocfDrvGlobalSymListHead_FreeMemList;
+
+icp_spinlock_t icp_ocfDrvSymSessInfoListSpinlock;
+
+/*Below pointer is only used in linux, FreeBSD uses the name to
+create its own variable name*/
+icp_workqueue *icp_ocfDrvFreeLacSessionWorkQ = NULL;
+ICP_WORKQUEUE_DEFINE_THREAD(icp_ocfDrvFreeLacSessionWorkQ);
+
+struct icp_drvBuffListInfo defBuffListInfo;
+
+/* Name : icp_ocfDrvInit
+ *
+ * Description : This function will register all the symmetric and asymmetric
+ * functionality that will be accelerated by the hardware. It will also
+ * get a unique driver ID from the OCF and initialise all slab caches
+ */
+ICP_MODULE_INIT_FUNC(icp_ocfDrvInit)
+{
+ int ocfStatus = 0;
+
+ IPRINTK("=== %s ver %d.%d.%d ===\n", ICP_OCF_COMP_NAME,
+ ICP_OCF_VER_MAIN, ICP_OCF_VER_MJR, ICP_OCF_VER_MNR);
+
+ if (MAX_DEREG_RETRIES < num_dereg_retries) {
+ EPRINTK("Session deregistration retry count set to greater "
+ "than %d", MAX_DEREG_RETRIES);
+ icp_module_return_code(EINVAL);
+ }
+
+ /* Initialize and Start the Cryptographic component */
+ if (CPA_STATUS_SUCCESS !=
+ cpaCyStartInstance(CPA_INSTANCE_HANDLE_SINGLE)) {
+ EPRINTK("Failed to initialize and start the instance "
+ "of the Cryptographic component.\n");
+ return icp_module_return_code(EINVAL);
+ }
+
+ icp_spin_lock_init(&icp_ocfDrvSymSessInfoListSpinlock);
+
+ /* Set the default size of BufferList to allocate */
+ memset(&defBuffListInfo, 0, sizeof(struct icp_drvBuffListInfo));
+ if (ICP_OCF_DRV_STATUS_SUCCESS !=
+ icp_ocfDrvBufferListMemInfo(ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS,
+ &defBuffListInfo)) {
+ EPRINTK("Failed to get bufferlist memory info.\n");
+ return icp_module_return_code(ENOMEM);
+ }
+
+ /*Register OCF EP80579 Driver with OCF */
+ icp_ocfDrvDriverId = ICP_CRYPTO_GET_DRIVERID();
+
+ if (icp_ocfDrvDriverId < 0) {
+ EPRINTK("%s : ICP driver failed to register with OCF!\n",
+ __FUNCTION__);
+ return icp_module_return_code(ENODEV);
+ }
+
+ /*Create all the slab caches used by the OCF EP80579 Driver */
+ drvSessionData_zone =
+ ICP_CACHE_CREATE(ICP_SESSION_DATA_NAME, struct icp_drvSessionData);
+
+ /*
+ * Allocation of the OpData includes the allocation space for meta data.
+ * The memory after the opData structure is reserved for this meta data.
+ */
+ drvOpData_zone =
+ icp_kmem_cache_create(ICP_OP_DATA_NAME,
+ sizeof(struct icp_drvOpData) +
+ defBuffListInfo.metaSize,
+ ICP_KERNEL_CACHE_ALIGN,
+ ICP_KERNEL_CACHE_NOINIT);
+
+ drvDH_zone = ICP_CACHE_CREATE(ICP_DH_NAME, CpaCyDhPhase1KeyGenOpData);
+
+ drvLnModExp_zone =
+ ICP_CACHE_CREATE(ICP_MODEXP_NAME, CpaCyLnModExpOpData);
+
+ drvRSADecrypt_zone =
+ ICP_CACHE_CREATE(ICP_RSA_DECRYPT_NAME, CpaCyRsaDecryptOpData);
+
+ drvRSAPrivateKey_zone =
+ ICP_CACHE_CREATE(ICP_RSA_PKEY_NAME, CpaCyRsaPrivateKey);
+
+ drvDSARSSign_zone =
+ ICP_CACHE_CREATE(ICP_DSA_SIGN_NAME, CpaCyDsaRSSignOpData);
+
+ /*too awkward to use a macro here */
+ drvDSARSSignKValue_zone =
+ ICP_CACHE_CREATE(ICP_RAND_VAL_NAME,
+ DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES);
+
+ drvDSAVerify_zone =
+ ICP_CACHE_CREATE(ICP_DSA_VER_NAME, CpaCyDsaVerifyOpData);
+
+ drvFlatBuffer_zone =
+ ICP_CACHE_CREATE(ICP_FLAT_BUFF_NAME, CpaFlatBuffer);
+
+ if (0 == icp_cache_null_check()) {
+ icp_ocfDrvFreeCaches();
+ EPRINTK("%s() line %d: Not enough memory!\n",
+ __FUNCTION__, __LINE__);
+ return ENOMEM;
+ }
+
+ /* Register the ICP symmetric crypto support. */
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_NULL_CBC, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_DES_CBC, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_3DES_CBC, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_AES_CBC, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_ARC4, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_MD5, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_MD5_HMAC, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA1, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA1_HMAC, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_256, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_256_HMAC,
+ ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_384, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_384_HMAC,
+ ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_512, ocfStatus);
+ ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_512_HMAC,
+ ocfStatus);
+
+ /* Register the ICP asymmetric algorithm support */
+ ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_DH_COMPUTE_KEY,
+ ocfStatus);
+ ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_MOD_EXP, ocfStatus);
+ ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_MOD_EXP_CRT, ocfStatus);
+ ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_DSA_SIGN, ocfStatus);
+ ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_DSA_VERIFY, ocfStatus);
+
+ /* Register the ICP random number generator support */
+ ICP_REG_RAND_WITH_OCF(icp_ocfDrvDriverId,
+ icp_ocfDrvReadRandom, NULL, ocfStatus);
+
+ if (OCF_ZERO_FUNCTIONALITY_REGISTERED == ocfStatus) {
+ DPRINTK("%s: Failed to register any device capabilities\n",
+ __FUNCTION__);
+ icp_ocfDrvFreeCaches();
+ icp_ocfDrvDriverId = INVALID_DRIVER_ID;
+ return icp_module_return_code(ECANCELED);
+ }
+
+ DPRINTK("%s: Registered %d of %d device capabilities\n",
+ __FUNCTION__, ocfStatus, NUM_SUPPORTED_CAPABILITIES);
+
+ /*Session data linked list used during module exit */
+ ICP_INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead);
+ ICP_INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead_FreeMemList);
+
+ ICP_WORKQUEUE_CREATE(icp_ocfDrvFreeLacSessionWorkQ, "icpwq");
+ if (ICP_WORKQUEUE_NULL_CHECK(icp_ocfDrvFreeLacSessionWorkQ)) {
+ EPRINTK("%s: Failed to create single "
+ "thread workqueue\n", __FUNCTION__);
+ icp_ocfDrvFreeCaches();
+ icp_ocfDrvDriverId = INVALID_DRIVER_ID;
+ return icp_module_return_code(ENOMEM);
+ }
+
+ return icp_module_return_code(0);
+}
+
+/* Name : icp_ocfDrvExit
+ *
+ * Description : This function will deregister all the symmetric sessions
+ * registered with the LAC component. It will also deregister all symmetric
+ * and asymmetric functionality that can be accelerated by the hardware via OCF
+ * and random number generation if it is enabled.
+ */
+ICP_MODULE_EXIT_FUNC(icp_ocfDrvExit)
+{
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+ struct icp_drvSessionData *sessionData = NULL;
+ struct icp_drvSessionData *tempSessionData = NULL;
+ int i, remaining_delay_time_in_jiffies = 0;
+
+ /* For FreeBSD the invariant macro below makes function to return */
+ /* with EBUSY value in the case of any session which has been regi- */
+ /* stered with LAC not being deregistered. */
+ /* The Linux implementation is empty since it is purely to compensate */
+ /* for a limitation of the FreeBSD 7.1 Opencrypto framework. */
+
+ ICP_MODULE_EXIT_INV();
+
+ /* There is a possibility of a process or new session command being */
+ /* sent before this variable is incremented. The aim of this variable */
+ /* is to stop a loop of calls creating a deadlock situation which */
+ /* would prevent the driver from exiting. */
+ icp_atomic_set(&icp_ocfDrvIsExiting, 1);
+
+ /*Existing sessions will be routed to another driver after these calls */
+ crypto_unregister_all(icp_ocfDrvDriverId);
+ crypto_runregister_all(icp_ocfDrvDriverId);
+
+ if (ICP_WORKQUEUE_NULL_CHECK(icp_ocfDrvFreeLacSessionWorkQ)) {
+ DPRINTK("%s: workqueue already "
+ "destroyed, therefore module exit "
+ " function already called. Exiting.\n", __FUNCTION__);
+ return ICP_MODULE_EXIT_FUNC_RETURN_VAL;
+ }
+ /*If any sessions are waiting to be deregistered, do that. This also
+ flushes the work queue */
+ ICP_WORKQUEUE_DESTROY(icp_ocfDrvFreeLacSessionWorkQ);
+
+ /*ENTER CRITICAL SECTION */
+ icp_spin_lockbh_lock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+ ICP_LIST_FOR_EACH_ENTRY_SAFE(tempSessionData, sessionData,
+ &icp_ocfDrvGlobalSymListHead, listNode) {
+ for (i = 0; i < num_dereg_retries; i++) {
+ /*No harm if bad input - LAC will handle error cases */
+ if (ICP_SESSION_RUNNING == tempSessionData->inUse) {
+ lacStatus =
+ cpaCySymRemoveSession
+ (CPA_INSTANCE_HANDLE_SINGLE,
+ tempSessionData->sessHandle);
+ if (CPA_STATUS_SUCCESS == lacStatus) {
+ /* Succesfully deregistered */
+ break;
+ } else if (CPA_STATUS_RETRY != lacStatus) {
+ icp_atomic_inc
+ (&lac_session_failed_dereg_count);
+ break;
+ }
+
+ /*schedule_timout returns the time left for completion if
+ * this task is set to TASK_INTERRUPTIBLE */
+ remaining_delay_time_in_jiffies =
+ dereg_retry_delay_in_jiffies;
+ while (0 > remaining_delay_time_in_jiffies) {
+ remaining_delay_time_in_jiffies =
+ icp_schedule_timeout
+ (&icp_ocfDrvSymSessInfoListSpinlock,
+ remaining_delay_time_in_jiffies);
+ }
+
+ DPRINTK
+ ("%s(): Retry %d to deregistrate the session\n",
+ __FUNCTION__, i);
+ }
+ }
+
+ /*remove from current list */
+ ICP_LIST_DEL(tempSessionData, listNode);
+ /*add to free mem linked list */
+ ICP_LIST_ADD(tempSessionData,
+ &icp_ocfDrvGlobalSymListHead_FreeMemList,
+ listNode);
+
+ }
+
+ /*EXIT CRITICAL SECTION */
+ icp_spin_lockbh_unlock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+ /*set back to initial values */
+ sessionData = NULL;
+ /*still have a reference in our list! */
+ tempSessionData = NULL;
+ /*free memory */
+
+ ICP_LIST_FOR_EACH_ENTRY_SAFE(tempSessionData, sessionData,
+ &icp_ocfDrvGlobalSymListHead_FreeMemList,
+ listNode) {
+
+ ICP_LIST_DEL(tempSessionData, listNode);
+ /* Free allocated CpaCySymSessionCtx */
+ if (NULL != tempSessionData->sessHandle) {
+ icp_kfree(tempSessionData->sessHandle);
+ }
+ memset(tempSessionData, 0, sizeof(struct icp_drvSessionData));
+ ICP_CACHE_FREE(drvSessionData_zone, tempSessionData);
+ }
+
+ if (0 != icp_atomic_read(&lac_session_failed_dereg_count)) {
+ DPRINTK("%s(): %d LAC sessions were not deregistered "
+ "correctly. This is not a clean exit! \n",
+ __FUNCTION__,
+ icp_atomic_read(&lac_session_failed_dereg_count));
+ }
+
+ icp_ocfDrvFreeCaches();
+ icp_ocfDrvDriverId = INVALID_DRIVER_ID;
+
+ icp_spin_lock_destroy(&icp_ocfDrvSymSessInfoListSpinlock);
+
+ /* Shutdown the Cryptographic component */
+ lacStatus = cpaCyStopInstance(CPA_INSTANCE_HANDLE_SINGLE);
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ DPRINTK("%s(): Failed to stop instance of the "
+ "Cryptographic component.(status == %d)\n",
+ __FUNCTION__, lacStatus);
+ }
+
+ return ICP_MODULE_EXIT_FUNC_RETURN_VAL;
+}
+
+/* Name : icp_ocfDrvFreeCaches
+ *
+ * Description : This function deregisters all slab caches
+ */
+static void icp_ocfDrvFreeCaches(void)
+{
+ icp_atomic_set(&icp_ocfDrvIsExiting, 1);
+
+ /*Sym Zones */
+ ICP_CACHE_DESTROY(drvSessionData_zone);
+ ICP_CACHE_DESTROY(drvOpData_zone);
+
+ /*Asym zones */
+ ICP_CACHE_DESTROY(drvDH_zone);
+ ICP_CACHE_DESTROY(drvLnModExp_zone);
+ ICP_CACHE_DESTROY(drvRSADecrypt_zone);
+ ICP_CACHE_DESTROY(drvRSAPrivateKey_zone);
+ ICP_CACHE_DESTROY(drvDSARSSignKValue_zone);
+ ICP_CACHE_DESTROY(drvDSARSSign_zone);
+ ICP_CACHE_DESTROY(drvDSAVerify_zone);
+
+ /*FlatBuffer and BufferList Zones */
+ ICP_CACHE_DESTROY(drvFlatBuffer_zone);
+
+}
+
+/* Name : icp_ocfDrvDeregRetry
+ *
+ * Description : This function will try to farm the session deregistration
+ * off to a work queue. If it fails, nothing more can be done and it
+ * returns an error
+ */
+int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister)
+{
+ struct icp_ocfDrvFreeLacSession *workstore = NULL;
+
+ DPRINTK("%s(): Retry - Deregistering session (%p)\n",
+ __FUNCTION__, sessionToDeregister);
+
+ /*make sure the session is not available to be allocated during this
+ process */
+ icp_atomic_inc(&lac_session_failed_dereg_count);
+
+ /*Farm off to work queue */
+ workstore =
+ icp_kmalloc(sizeof(struct icp_ocfDrvFreeLacSession), ICP_M_NOWAIT);
+ if (NULL == workstore) {
+ DPRINTK("%s(): unable to free session - no memory available "
+ "for work queue\n", __FUNCTION__);
+ return ENOMEM;
+ }
+
+ workstore->sessionToDeregister = sessionToDeregister;
+
+ icp_init_work(&(workstore->work),
+ icp_ocfDrvDeferedFreeLacSessionTaskFn, workstore);
+
+ ICP_WORKQUEUE_ENQUEUE(icp_ocfDrvFreeLacSessionWorkQ,
+ &(workstore->work));
+
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+
+}
+
+/* Name : icp_ocfDrvDeferedFreeLacSessionProcess
+ *
+ * Description : This function will retry (module input parameter)
+ * 'num_dereg_retries' times to deregister any symmetric session that recieves a
+ * CPA_STATUS_RETRY message from the LAC component. This function is run in
+ * Thread context because it is called from a worker thread
+ */
+void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg)
+{
+ struct icp_ocfDrvFreeLacSession *workstore = NULL;
+ CpaCySymSessionCtx sessionToDeregister = NULL;
+ int i = 0;
+ int remaining_delay_time_in_jiffies = 0;
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+
+ workstore = (struct icp_ocfDrvFreeLacSession *)arg;
+ if (NULL == workstore) {
+ DPRINTK("%s() function called with null parameter \n",
+ __FUNCTION__);
+ return;
+ }
+
+ sessionToDeregister = workstore->sessionToDeregister;
+ icp_kfree(workstore);
+
+ /*if exiting, give deregistration one more blast only */
+ if (icp_atomic_read(&icp_ocfDrvIsExiting) == CPA_TRUE) {
+ lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
+ sessionToDeregister);
+
+ if (lacStatus != CPA_STATUS_SUCCESS) {
+ DPRINTK("%s() Failed to Dereg LAC session %p "
+ "during module exit\n", __FUNCTION__,
+ sessionToDeregister);
+ return;
+ }
+
+ icp_atomic_dec(&lac_session_failed_dereg_count);
+ return;
+ }
+
+ for (i = 0; i <= num_dereg_retries; i++) {
+ lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
+ sessionToDeregister);
+
+ if (lacStatus == CPA_STATUS_SUCCESS) {
+ icp_atomic_dec(&lac_session_failed_dereg_count);
+ return;
+ }
+ if (lacStatus != CPA_STATUS_RETRY) {
+ DPRINTK("%s() Failed to deregister session - lacStatus "
+ " = %d", __FUNCTION__, lacStatus);
+ break;
+ }
+
+ /*schedule_timout returns the time left for completion if this
+ task is set to TASK_INTERRUPTIBLE */
+ remaining_delay_time_in_jiffies = dereg_retry_delay_in_jiffies;
+ while (0 < remaining_delay_time_in_jiffies) {
+ remaining_delay_time_in_jiffies =
+ icp_schedule_timeout(NULL,
+ remaining_delay_time_in_jiffies);
+ }
+
+ }
+
+ DPRINTK("%s(): Unable to deregister session\n", __FUNCTION__);
+ DPRINTK("%s(): Number of unavailable LAC sessions = %d\n", __FUNCTION__,
+ icp_atomic_read(&lac_session_failed_dereg_count));
+}
+
+/* Name : icp_ocfDrvPtrAndLenToFlatBuffer
+ *
+ * Description : This function converts a "pointer and length" buffer
+ * structure to Fredericksburg Flat Buffer (CpaFlatBuffer) format.
+ *
+ * This function assumes that the data passed in are valid.
+ */
+inline void
+icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
+ CpaFlatBuffer * pFlatBuffer)
+{
+ pFlatBuffer->pData = pData;
+ pFlatBuffer->dataLenInBytes = len;
+}
+
+/* Name : icp_ocfDrvPtrAndLenToBufferList
+ *
+ * Description : This function converts a "pointer and length" buffer
+ * structure to Fredericksburg Scatter/Gather Buffer (CpaBufferList) format.
+ *
+ * This function assumes that the data passed in are valid.
+ */
+inline void
+icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
+ CpaBufferList * pBufferList)
+{
+ pBufferList->numBuffers = 1;
+ pBufferList->pBuffers->pData = pDataIn;
+ pBufferList->pBuffers->dataLenInBytes = length;
+}
+
+/* Name : icp_ocfDrvBufferListToPtrAndLen
+ *
+ * Description : This function converts Fredericksburg Scatter/Gather Buffer
+ * (CpaBufferList) format to a "pointer and length" buffer structure.
+ *
+ * This function assumes that the data passed in are valid.
+ */
+inline void
+icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
+ void **ppDataOut, uint32_t * pLength)
+{
+ *ppDataOut = pBufferList->pBuffers->pData;
+ *pLength = pBufferList->pBuffers->dataLenInBytes;
+}
+
+/* Name : icp_ocfDrvBufferListMemInfo
+ *
+ * Description : This function will set the number of flat buffers in
+ * bufferlist, the size of memory to allocate for the pPrivateMetaData
+ * member of the CpaBufferList.
+ */
+int
+icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
+ struct icp_drvBuffListInfo *buffListInfo)
+{
+ buffListInfo->numBuffers = numBuffers;
+
+ if (CPA_STATUS_SUCCESS !=
+ cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
+ buffListInfo->numBuffers,
+ &(buffListInfo->metaSize))) {
+ EPRINTK("%s() Failed to get buffer list meta size.\n",
+ __FUNCTION__);
+ return ICP_OCF_DRV_STATUS_FAIL;
+ }
+
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name : icp_ocfDrvFreeFlatBuffer
+ *
+ * Description : This function will deallocate flat buffer.
+ */
+inline void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer)
+{
+ if (pFlatBuffer != NULL) {
+ memset(pFlatBuffer, 0, sizeof(CpaFlatBuffer));
+ ICP_CACHE_FREE(drvFlatBuffer_zone, pFlatBuffer);
+ }
+}
+
+/* Name : icp_ocfDrvAllocMetaData
+ *
+ * Description : This function will allocate memory for the
+ * pPrivateMetaData member of CpaBufferList.
+ */
+inline int
+icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
+ struct icp_drvOpData *pOpData)
+{
+ Cpa32U metaSize = 0;
+
+ if (pBufferList->numBuffers <= ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
+ uint8_t *pOpDataStartAddr = (uint8_t *) pOpData;
+
+ if (0 == defBuffListInfo.metaSize) {
+ pBufferList->pPrivateMetaData = NULL;
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+ }
+ /*
+ * The meta data allocation has been included as part of the
+ * op data. It has been pre-allocated in memory just after the
+ * icp_drvOpData structure.
+ */
+ pBufferList->pPrivateMetaData = (void *)(pOpDataStartAddr +
+ sizeof(struct
+ icp_drvOpData));
+ } else {
+ if (CPA_STATUS_SUCCESS !=
+ cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
+ pBufferList->numBuffers,
+ &metaSize)) {
+ EPRINTK("%s() Failed to get buffer list meta size.\n",
+ __FUNCTION__);
+ return ICP_OCF_DRV_STATUS_FAIL;
+ }
+
+ if (0 == metaSize) {
+ pBufferList->pPrivateMetaData = NULL;
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+ }
+
+ pBufferList->pPrivateMetaData =
+ icp_kmalloc(metaSize, ICP_M_NOWAIT);
+ }
+ if (NULL == pBufferList->pPrivateMetaData) {
+ EPRINTK("%s() Failed to allocate pPrivateMetaData.\n",
+ __FUNCTION__);
+ return ICP_OCF_DRV_STATUS_FAIL;
+ }
+
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name : icp_ocfDrvFreeMetaData
+ *
+ * Description : This function will deallocate pPrivateMetaData memory.
+ */
+inline void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList)
+{
+ if (NULL == pBufferList->pPrivateMetaData) {
+ return;
+ }
+
+ /*
+ * Only free the meta data if the BufferList has more than
+ * ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS number of buffers.
+ * Otherwise, the meta data shall be freed when the icp_drvOpData is
+ * freed.
+ */
+ if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < pBufferList->numBuffers) {
+ icp_kfree(pBufferList->pPrivateMetaData);
+ }
+}
+
+/* Module declaration, init and exit functions */
+ICP_DECLARE_MODULE(icp_ocf, icp_ocfDrvInit, icp_ocfDrvExit);
+ICP_MODULE_DESCRIPTION("OCF Driver for Intel Quick Assist crypto acceleration");
+ICP_MODULE_VERSION(icp_ocf, ICP_OCF_VER_MJR);
+ICP_MODULE_LICENSE("Dual BSD/GPL");
+ICP_MODULE_AUTHOR("Intel");
+
+/* Module parameters */
+ICP_MODULE_PARAM_INT(icp_ocf, num_dereg_retries,
+ "Number of times to retry LAC Sym Session Deregistration. "
+ "Default 10, Max 100");
+ICP_MODULE_PARAM_INT(icp_ocf, dereg_retry_delay_in_jiffies, "Delay in jiffies "
+ "(added to a schedule() function call) before a LAC Sym "
+ "Session Dereg is retried. Default 10");
+ICP_MODULE_PARAM_INT(icp_ocf, max_sessions,
+ "This sets the maximum number of sessions "
+ "between OCF and this driver. If this value is set to zero,"
+ "max session count checking is disabled. Default is zero(0)");
+
+/* Module dependencies */
+#define MODULE_MIN_VER 1
+#define CRYPTO_MAX_VER 3
+#define LAC_MAX_VER 2
+
+ICP_MODULE_DEPEND(icp_ocf, crypto, MODULE_MIN_VER, MODULE_MIN_VER,
+ CRYPTO_MAX_VER);
+ICP_MODULE_DEPEND(icp_ocf, cryptodev, MODULE_MIN_VER, MODULE_MIN_VER,
+ CRYPTO_MAX_VER);
+ICP_MODULE_DEPEND(icp_ocf, icp_crypto, MODULE_MIN_VER, MODULE_MIN_VER,
+ LAC_MAX_VER);
diff --git a/target/linux/generic/files/crypto/ocf/ep80579/icp_ocf.h b/target/linux/generic/files/crypto/ocf/ep80579/icp_ocf.h
new file mode 100644
index 000000000..d9dde8740
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ep80579/icp_ocf.h
@@ -0,0 +1,376 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: Security.L.1.0.2-229
+ *
+ ***************************************************************************/
+
+/*
+ * OCF driver header file for the Intel ICP processor.
+ */
+
+#ifndef ICP_OCF_H_
+#define ICP_OCF_H_
+
+#include <cpa.h>
+#include <cpa_cy_im.h>
+#include <cpa_cy_sym.h>
+#include <cpa_cy_rand.h>
+#include <cpa_cy_dh.h>
+#include <cpa_cy_rsa.h>
+#include <cpa_cy_ln.h>
+#include <cpa_cy_common.h>
+#include <cpa_cy_dsa.h>
+
+#include "icp_os.h"
+
+#define NUM_BITS_IN_BYTE (8)
+#define NUM_BITS_IN_BYTE_MINUS_ONE (NUM_BITS_IN_BYTE -1)
+#define INVALID_DRIVER_ID (-1)
+#define RETURN_RAND_NUM_GEN_FAILED (-1)
+
+/*This is the max block cipher initialisation vector*/
+#define MAX_IV_LEN_IN_BYTES (20)
+/*This is used to check whether the OCF to this driver session limit has
+ been disabled*/
+#define NO_OCF_TO_DRV_MAX_SESSIONS (0)
+
+/*OCF values mapped here*/
+#define ICP_SHA1_DIGEST_SIZE_IN_BYTES (SHA1_HASH_LEN)
+#define ICP_SHA256_DIGEST_SIZE_IN_BYTES (SHA2_256_HASH_LEN)
+#define ICP_SHA384_DIGEST_SIZE_IN_BYTES (SHA2_384_HASH_LEN)
+#define ICP_SHA512_DIGEST_SIZE_IN_BYTES (SHA2_512_HASH_LEN)
+#define ICP_MD5_DIGEST_SIZE_IN_BYTES (MD5_HASH_LEN)
+#define ARC4_COUNTER_LEN (ARC4_BLOCK_LEN)
+
+#define OCF_REGISTRATION_STATUS_SUCCESS (0)
+#define OCF_ZERO_FUNCTIONALITY_REGISTERED (0)
+#define ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR (0)
+#define ICP_OCF_DRV_STATUS_SUCCESS (0)
+#define ICP_OCF_DRV_STATUS_FAIL (1)
+
+/*Turn on/off debug options*/
+#define ICP_OCF_PRINT_DEBUG_MESSAGES (0)
+#define ICP_OCF_PRINT_KERN_ALERT (1)
+#define ICP_OCF_PRINT_KERN_ERRS (1)
+
+#if ICP_OCF_PRINT_DEBUG_MESSAGES == 1
+#define DPRINTK(args...) \
+{ \
+ ICP_IPRINTK(args); \
+}
+
+#else //ICP_OCF_PRINT_DEBUG_MESSAGES == 1
+
+#define DPRINTK(args...)
+
+#endif //ICP_OCF_PRINT_DEBUG_MESSAGES == 1
+
+#if ICP_OCF_PRINT_KERN_ALERT == 1
+#define APRINTK(args...) \
+{ \
+ ICP_APRINTK(args); \
+}
+
+#else //ICP_OCF_PRINT_KERN_ALERT == 1
+
+#define APRINTK(args...)
+
+#endif //ICP_OCF_PRINT_KERN_ALERT == 1
+
+#if ICP_OCF_PRINT_KERN_ERRS == 1
+#define EPRINTK(args...) \
+{ \
+ ICP_EPRINTK(args); \
+}
+
+#else //ICP_OCF_PRINT_KERN_ERRS == 1
+
+#define EPRINTK(args...)
+
+#endif //ICP_OCF_PRINT_KERN_ERRS == 1
+
+#define IPRINTK(args...) \
+{ \
+ ICP_IPRINTK(args); \
+}
+
+/*DSA Prime Q size in bytes (as defined in the standard) */
+#define DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES (20)
+
+#define BITS_TO_BYTES(bytes, bits) \
+ bytes = (bits + NUM_BITS_IN_BYTE_MINUS_ONE) / NUM_BITS_IN_BYTE
+
+typedef enum {
+ ICP_OCF_DRV_ALG_CIPHER = 0,
+ ICP_OCF_DRV_ALG_HASH
+} icp_ocf_drv_alg_type_t;
+
+typedef ICP_LIST_HEAD(icp_drvSessionListHead_s,
+ icp_drvSessionData) icp_drvSessionListHead_t;
+
+/*Values used to derisk chances of performs being called against
+deregistered sessions (for which the slab page has been reclaimed)
+This is not a fix - since page frames are reclaimed from a slab, one cannot
+rely on that memory not being re-used by another app.*/
+typedef enum {
+ ICP_SESSION_INITIALISED = 0x5C5C5C,
+ ICP_SESSION_RUNNING = 0x005C00,
+ ICP_SESSION_DEREGISTERED = 0xC5C5C5
+} usage_derisk;
+
+/* This struct is required for deferred session
+ deregistration as a work queue function can
+ only have one argument*/
+struct icp_ocfDrvFreeLacSession {
+ CpaCySymSessionCtx sessionToDeregister;
+ icp_workstruct work;
+};
+
+/*
+This is the OCF<->OCF_DRV session object:
+
+1.listNode
+ The first member is a listNode. These session objects are added to a linked
+ list in order to make it easier to remove them all at session exit time.
+
+2.inUse
+ The second member is used to give the session object state and derisk the
+ possibility of OCF batch calls executing against a deregistered session (as
+ described above).
+
+3.sessHandle
+ The third member is a LAC<->OCF_DRV session handle (initialised with the first
+ perform request for that session).
+
+4.lacSessCtx
+ The fourth is the LAC session context. All the parameters for this structure
+ are only known when the first perform request for this session occurs. That is
+ why the OCF EP80579 Driver only registers a new LAC session at perform time
+*/
+struct icp_drvSessionData {
+ ICP_LIST_ENTRY(icp_drvSessionData) listNode;
+ usage_derisk inUse;
+ CpaCySymSessionCtx sessHandle;
+ CpaCySymSessionSetupData lacSessCtx;
+};
+
+/* These are all defined in icp_common.c */
+extern icp_atomic_t lac_session_failed_dereg_count;
+extern icp_atomic_t icp_ocfDrvIsExiting;
+extern icp_atomic_t num_ocf_to_drv_registered_sessions;
+
+extern int32_t icp_ocfDrvDriverId;
+
+extern icp_drvSessionListHead_t icp_ocfDrvGlobalSymListHead;
+extern icp_drvSessionListHead_t icp_ocfDrvGlobalSymListHead_FreeMemList;
+extern icp_workqueue *icp_ocfDrvFreeLacSessionWorkQ;
+extern icp_spinlock_t icp_ocfDrvSymSessInfoListSpinlock;
+
+/*Slab zones for symettric functionality, instantiated in icp_common.c*/
+extern icp_kmem_cache drvSessionData_zone;
+extern icp_kmem_cache drvOpData_zone;
+
+/*Slabs zones for asymettric functionality, instantiated in icp_common.c*/
+extern icp_kmem_cache drvDH_zone;
+extern icp_kmem_cache drvLnModExp_zone;
+extern icp_kmem_cache drvRSADecrypt_zone;
+extern icp_kmem_cache drvRSAPrivateKey_zone;
+extern icp_kmem_cache drvDSARSSign_zone;
+extern icp_kmem_cache drvDSARSSignKValue_zone;
+extern icp_kmem_cache drvDSAVerify_zone;
+
+/* Module parameters defined in icp_cpmmon.c*/
+
+/* Module parameters - gives the number of times LAC deregistration shall be
+ re-tried */
+extern int num_dereg_retries;
+
+/* Module parameter - gives the delay time in jiffies before a LAC session
+ shall be attempted to be deregistered again */
+extern int dereg_retry_delay_in_jiffies;
+
+/* Module parameter - gives the maximum number of sessions possible between
+ OCF and the OCF EP80579 Driver. If set to zero, there is no limit.*/
+extern int max_sessions;
+
+/*Slab zones for flatbuffers and bufferlist*/
+extern icp_kmem_cache drvFlatBuffer_zone;
+
+#define ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS (16)
+
+struct icp_drvBuffListInfo {
+ Cpa16U numBuffers;
+ Cpa32U metaSize;
+ Cpa32U metaOffset;
+ Cpa32U buffListSize;
+};
+
+extern struct icp_drvBuffListInfo defBuffListInfo;
+
+/* This struct is used to keep a reference to the relevant node in the list
+ of sessionData structs, to the buffer type required by OCF and to the OCF
+ provided crp struct that needs to be returned. All this info is needed in
+ the callback function.*/
+struct icp_drvOpData {
+ CpaCySymOpData lacOpData;
+ uint32_t digestSizeInBytes;
+ struct cryptop *crp;
+ uint8_t bufferType;
+ uint8_t ivData[MAX_IV_LEN_IN_BYTES];
+ uint16_t numBufferListArray;
+ CpaBufferList srcBuffer;
+ CpaFlatBuffer bufferListArray[ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS];
+ CpaBoolean verifyResult;
+};
+
+/* Create a new session between OCF and this driver*/
+int icp_ocfDrvNewSession(icp_device_t dev, uint32_t * sild,
+ struct cryptoini *cri);
+
+/* Free a session between this driver and the Quick Assist Framework*/
+int icp_ocfDrvFreeLACSession(icp_device_t dev, uint64_t sid);
+
+/* Defer freeing a Quick Assist session*/
+void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg);
+
+/* Process OCF cryptographic request for a symmetric algorithm*/
+int icp_ocfDrvSymProcess(icp_device_t dev, struct cryptop *crp, int hint);
+
+/* Process OCF cryptographic request for an asymmetric algorithm*/
+int icp_ocfDrvPkeProcess(icp_device_t dev, struct cryptkop *krp, int hint);
+
+/* Populate a buffer with random data*/
+int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords);
+
+/* Retry Quick Assist session deregistration*/
+int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister);
+
+/* Convert an OS scatter gather list to a CPA buffer list*/
+int icp_ocfDrvPacketBuffToBufferList(icp_packet_buffer_t * pPacketBuffer,
+ CpaBufferList * bufferList);
+
+/* Convert a CPA buffer list to an OS scatter gather list*/
+int icp_ocfDrvBufferListToPacketBuff(CpaBufferList * bufferList,
+ icp_packet_buffer_t ** pPacketBuffer);
+
+/* Get the number of buffers in an OS scatter gather list*/
+uint16_t icp_ocfDrvGetPacketBuffFrags(icp_packet_buffer_t * pPacketBuffer);
+
+/* Convert a single OS buffer to a CPA Flat Buffer*/
+void icp_ocfDrvSinglePacketBuffToFlatBuffer(icp_packet_buffer_t * pPacketBuffer,
+ CpaFlatBuffer * pFlatBuffer);
+
+/* Add pointer and length to a CPA Flat Buffer structure*/
+void icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
+ CpaFlatBuffer * pFlatBuffer);
+
+/* Convert pointer and length values to a CPA buffer list*/
+void icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
+ CpaBufferList * pBufferList);
+
+/* Convert a CPA buffer list to pointer and length values*/
+void icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
+ void **ppDataOut, uint32_t * pLength);
+
+/* Set the number of flat buffers in bufferlist and the size of memory
+ to allocate for the pPrivateMetaData member of the CpaBufferList.*/
+int icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
+ struct icp_drvBuffListInfo *buffListInfo);
+
+/* Find pointer position of the digest within an OS scatter gather list*/
+uint8_t *icp_ocfDrvPacketBufferDigestPointerFind(struct icp_drvOpData
+ *drvOpData,
+ int offsetInBytes,
+ uint32_t digestSizeInBytes);
+
+/*This top level function is used to find a pointer to where a digest is
+ stored/needs to be inserted. */
+uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
+ struct cryptodesc *crp_desc);
+
+/* Free a CPA flat buffer*/
+void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer);
+
+/* This function will allocate memory for the pPrivateMetaData
+ member of CpaBufferList. */
+int icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
+ struct icp_drvOpData *pOpData);
+
+/* Free data allocated for the pPrivateMetaData
+ member of CpaBufferList.*/
+void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList);
+
+#define ICP_CACHE_CREATE(cache_ID, cache_name) \
+ icp_kmem_cache_create(cache_ID, sizeof(cache_name),ICP_KERNEL_CACHE_ALIGN,\
+ ICP_KERNEL_CACHE_NOINIT)
+
+#define ICP_CACHE_FREE(args...) \
+ icp_kmem_cache_free (args)
+
+#define ICP_CACHE_DESTROY(slab_zone)\
+{\
+ if(NULL != slab_zone){\
+ icp_kmem_cache_destroy(slab_zone);\
+ slab_zone = NULL;\
+ }\
+}
+
+#endif
+/* ICP_OCF_H_ */
diff --git a/target/linux/generic/files/crypto/ocf/ep80579/icp_sym.c b/target/linux/generic/files/crypto/ocf/ep80579/icp_sym.c
new file mode 100644
index 000000000..e1c71484a
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ep80579/icp_sym.c
@@ -0,0 +1,1153 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: Security.L.1.0.2-229
+ *
+ ***************************************************************************/
+/*
+ * An OCF module that uses the API for Intel® QuickAssist Technology to do the
+ * cryptography.
+ *
+ * This driver requires the ICP Access Library that is available from Intel in
+ * order to operate.
+ */
+
+#include "icp_ocf.h"
+
+/*This is the call back function for all symmetric cryptographic processes.
+ Its main functionality is to free driver crypto operation structure and to
+ call back to OCF*/
+static void
+icp_ocfDrvSymCallBack(void *callbackTag,
+ CpaStatus status,
+ const CpaCySymOp operationType,
+ void *pOpData,
+ CpaBufferList * pDstBuffer, CpaBoolean verifyResult);
+
+/*This function is used to extract crypto processing information from the OCF
+ inputs, so as that it may be passed onto LAC*/
+static int
+icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
+ struct cryptodesc *crp_desc);
+
+/*This function checks whether the crp_desc argument pertains to a digest or a
+ cipher operation*/
+static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc);
+
+/*This function copies all the passed in session context information and stores
+ it in a LAC context structure*/
+static int
+icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
+ CpaCySymSessionSetupData * lacSessCtx);
+
+/*This function is used to free an OCF->OCF_DRV session object*/
+static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData);
+
+/*max IOV buffs supported in a UIO structure*/
+#define NUM_IOV_SUPPORTED (1)
+
+/* Name : icp_ocfDrvSymCallBack
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the relevant symmetric operation.
+ *
+ * Notes : The callbackTag is a pointer to an icp_drvOpData. This memory
+ * object was passed to LAC for the cryptographic processing and contains all
+ * the relevant information for cleaning up buffer handles etc. so that the
+ * OCF EP80579 Driver portion of this crypto operation can be fully completed.
+ */
+static void
+icp_ocfDrvSymCallBack(void *callbackTag,
+ CpaStatus status,
+ const CpaCySymOp operationType,
+ void *pOpData,
+ CpaBufferList * pDstBuffer, CpaBoolean verifyResult)
+{
+ struct cryptop *crp = NULL;
+ struct icp_drvOpData *temp_drvOpData =
+ (struct icp_drvOpData *)callbackTag;
+ uint64_t *tempBasePtr = NULL;
+ uint32_t tempLen = 0;
+
+ if (NULL == temp_drvOpData) {
+ DPRINTK("%s(): The callback from the LAC component"
+ " has failed due to Null userOpaque data"
+ "(status == %d).\n", __FUNCTION__, status);
+ DPRINTK("%s(): Unable to call OCF back! \n", __FUNCTION__);
+ return;
+ }
+
+ crp = temp_drvOpData->crp;
+ crp->crp_etype = ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR;
+
+ if (NULL == pOpData) {
+ DPRINTK("%s(): The callback from the LAC component"
+ " has failed due to Null Symmetric Op data"
+ "(status == %d).\n", __FUNCTION__, status);
+ crp->crp_etype = ECANCELED;
+ crypto_done(crp);
+ return;
+ }
+
+ if (NULL == pDstBuffer) {
+ DPRINTK("%s(): The callback from the LAC component"
+ " has failed due to Null Dst Bufferlist data"
+ "(status == %d).\n", __FUNCTION__, status);
+ crp->crp_etype = ECANCELED;
+ crypto_done(crp);
+ return;
+ }
+
+ if (CPA_STATUS_SUCCESS == status) {
+
+ if (temp_drvOpData->bufferType == ICP_CRYPTO_F_PACKET_BUF) {
+ if (ICP_OCF_DRV_STATUS_SUCCESS !=
+ icp_ocfDrvBufferListToPacketBuff(pDstBuffer,
+ (icp_packet_buffer_t
+ **)
+ & (crp->crp_buf))) {
+ EPRINTK("%s(): BufferList to SkBuff "
+ "conversion error.\n", __FUNCTION__);
+ crp->crp_etype = EPERM;
+ }
+ } else {
+ icp_ocfDrvBufferListToPtrAndLen(pDstBuffer,
+ (void **)&tempBasePtr,
+ &tempLen);
+ crp->crp_olen = (int)tempLen;
+ }
+
+ } else {
+ DPRINTK("%s(): The callback from the LAC component has failed"
+ "(status == %d).\n", __FUNCTION__, status);
+
+ crp->crp_etype = ECANCELED;
+ }
+
+ if (temp_drvOpData->numBufferListArray >
+ ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
+ icp_kfree(pDstBuffer->pBuffers);
+ }
+ icp_ocfDrvFreeMetaData(pDstBuffer);
+ ICP_CACHE_FREE(drvOpData_zone, temp_drvOpData);
+
+ /* Invoke the OCF callback function */
+ crypto_done(crp);
+
+ return;
+}
+
+/* Name : icp_ocfDrvNewSession
+ *
+ * Description : This function will create a new Driver<->OCF session
+ *
+ * Notes : LAC session registration happens during the first perform call.
+ * That is the first time we know all information about a given session.
+ */
+int icp_ocfDrvNewSession(icp_device_t dev, uint32_t * sid,
+ struct cryptoini *cri)
+{
+ struct icp_drvSessionData *sessionData = NULL;
+ uint32_t delete_session = 0;
+
+ /* The SID passed in should be our driver ID. We can return the */
+ /* local ID (LID) which is a unique identifier which we can use */
+ /* to differentiate between the encrypt/decrypt LAC session handles */
+ if (NULL == sid) {
+ EPRINTK("%s(): Invalid input parameters - NULL sid.\n",
+ __FUNCTION__);
+ return EINVAL;
+ }
+
+ if (NULL == cri) {
+ EPRINTK("%s(): Invalid input parameters - NULL cryptoini.\n",
+ __FUNCTION__);
+ return EINVAL;
+ }
+
+ if (icp_ocfDrvDriverId != *sid) {
+ EPRINTK("%s(): Invalid input parameters - bad driver ID\n",
+ __FUNCTION__);
+ EPRINTK("\t sid = 0x08%p \n \t cri = 0x08%p \n", sid, cri);
+ return EINVAL;
+ }
+
+ sessionData = icp_kmem_cache_zalloc(drvSessionData_zone, ICP_M_NOWAIT);
+ if (NULL == sessionData) {
+ DPRINTK("%s():No memory for Session Data\n", __FUNCTION__);
+ return ENOMEM;
+ }
+
+ /*ENTER CRITICAL SECTION */
+ icp_spin_lockbh_lock(&icp_ocfDrvSymSessInfoListSpinlock);
+ /*put this check in the spinlock so no new sessions can be added to the
+ linked list when we are exiting */
+ if (CPA_TRUE == icp_atomic_read(&icp_ocfDrvIsExiting)) {
+ delete_session++;
+
+ } else if (NO_OCF_TO_DRV_MAX_SESSIONS != max_sessions) {
+ if (icp_atomic_read(&num_ocf_to_drv_registered_sessions) >=
+ (max_sessions -
+ icp_atomic_read(&lac_session_failed_dereg_count))) {
+ delete_session++;
+ } else {
+ icp_atomic_inc(&num_ocf_to_drv_registered_sessions);
+ /* Add to session data linked list */
+ ICP_LIST_ADD(sessionData, &icp_ocfDrvGlobalSymListHead,
+ listNode);
+ }
+
+ } else if (NO_OCF_TO_DRV_MAX_SESSIONS == max_sessions) {
+ ICP_LIST_ADD(sessionData, &icp_ocfDrvGlobalSymListHead,
+ listNode);
+ }
+
+ sessionData->inUse = ICP_SESSION_INITIALISED;
+
+ /*EXIT CRITICAL SECTION */
+ icp_spin_lockbh_unlock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+ if (delete_session) {
+ DPRINTK("%s():No Session handles available\n", __FUNCTION__);
+ ICP_CACHE_FREE(drvSessionData_zone, sessionData);
+ return EPERM;
+ }
+
+ if (ICP_OCF_DRV_STATUS_SUCCESS !=
+ icp_ocfDrvAlgorithmSetup(cri, &(sessionData->lacSessCtx))) {
+ DPRINTK("%s():algorithm not supported\n", __FUNCTION__);
+ icp_ocfDrvFreeOCFSession(sessionData);
+ return EINVAL;
+ }
+
+ if (cri->cri_next) {
+ if (cri->cri_next->cri_next != NULL) {
+ DPRINTK("%s():only two chained algorithms supported\n",
+ __FUNCTION__);
+ icp_ocfDrvFreeOCFSession(sessionData);
+ return EPERM;
+ }
+
+ if (ICP_OCF_DRV_STATUS_SUCCESS !=
+ icp_ocfDrvAlgorithmSetup(cri->cri_next,
+ &(sessionData->lacSessCtx))) {
+ DPRINTK("%s():second algorithm not supported\n",
+ __FUNCTION__);
+ icp_ocfDrvFreeOCFSession(sessionData);
+ return EINVAL;
+ }
+
+ sessionData->lacSessCtx.symOperation =
+ CPA_CY_SYM_OP_ALGORITHM_CHAINING;
+ }
+
+ *sid = (uint32_t) sessionData;
+
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name : icp_ocfDrvAlgorithmSetup
+ *
+ * Description : This function builds the session context data from the
+ * information supplied through OCF. Algorithm chain order and whether the
+ * session is Encrypt/Decrypt can only be found out at perform time however, so
+ * the session is registered with LAC at that time.
+ */
+static int
+icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
+ CpaCySymSessionSetupData * lacSessCtx)
+{
+
+ lacSessCtx->sessionPriority = CPA_CY_PRIORITY_NORMAL;
+
+ switch (cri->cri_alg) {
+
+ case CRYPTO_NULL_CBC:
+ DPRINTK("%s(): NULL CBC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+ lacSessCtx->cipherSetupData.cipherAlgorithm =
+ CPA_CY_SYM_CIPHER_NULL;
+ lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+ break;
+
+ case CRYPTO_DES_CBC:
+ DPRINTK("%s(): DES CBC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+ lacSessCtx->cipherSetupData.cipherAlgorithm =
+ CPA_CY_SYM_CIPHER_DES_CBC;
+ lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+ break;
+
+ case CRYPTO_3DES_CBC:
+ DPRINTK("%s(): 3DES CBC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+ lacSessCtx->cipherSetupData.cipherAlgorithm =
+ CPA_CY_SYM_CIPHER_3DES_CBC;
+ lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+ break;
+
+ case CRYPTO_AES_CBC:
+ DPRINTK("%s(): AES CBC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+ lacSessCtx->cipherSetupData.cipherAlgorithm =
+ CPA_CY_SYM_CIPHER_AES_CBC;
+ lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+ break;
+
+ case CRYPTO_ARC4:
+ DPRINTK("%s(): ARC4\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+ lacSessCtx->cipherSetupData.cipherAlgorithm =
+ CPA_CY_SYM_CIPHER_ARC4;
+ lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+ break;
+
+ case CRYPTO_SHA1:
+ DPRINTK("%s(): SHA1\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
+
+ break;
+
+ case CRYPTO_SHA1_HMAC:
+ DPRINTK("%s(): SHA1_HMAC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
+ lacSessCtx->hashSetupData.authModeSetupData.authKey =
+ cri->cri_key;
+ lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+ break;
+
+ case CRYPTO_SHA2_256:
+ DPRINTK("%s(): SHA256\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm =
+ CPA_CY_SYM_HASH_SHA256;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
+
+ break;
+
+ case CRYPTO_SHA2_256_HMAC:
+ DPRINTK("%s(): SHA256_HMAC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm =
+ CPA_CY_SYM_HASH_SHA256;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
+ lacSessCtx->hashSetupData.authModeSetupData.authKey =
+ cri->cri_key;
+ lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+ break;
+
+ case CRYPTO_SHA2_384:
+ DPRINTK("%s(): SHA384\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm =
+ CPA_CY_SYM_HASH_SHA384;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
+
+ break;
+
+ case CRYPTO_SHA2_384_HMAC:
+ DPRINTK("%s(): SHA384_HMAC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm =
+ CPA_CY_SYM_HASH_SHA384;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
+ lacSessCtx->hashSetupData.authModeSetupData.authKey =
+ cri->cri_key;
+ lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+ break;
+
+ case CRYPTO_SHA2_512:
+ DPRINTK("%s(): SHA512\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm =
+ CPA_CY_SYM_HASH_SHA512;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
+
+ break;
+
+ case CRYPTO_SHA2_512_HMAC:
+ DPRINTK("%s(): SHA512_HMAC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm =
+ CPA_CY_SYM_HASH_SHA512;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
+ lacSessCtx->hashSetupData.authModeSetupData.authKey =
+ cri->cri_key;
+ lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+ break;
+
+ case CRYPTO_MD5:
+ DPRINTK("%s(): MD5\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
+
+ break;
+
+ case CRYPTO_MD5_HMAC:
+ DPRINTK("%s(): MD5_HMAC\n", __FUNCTION__);
+ lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+ lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
+ lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+ lacSessCtx->hashSetupData.digestResultLenInBytes =
+ (cri->cri_mlen ?
+ cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
+ lacSessCtx->hashSetupData.authModeSetupData.authKey =
+ cri->cri_key;
+ lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+ cri->cri_klen / NUM_BITS_IN_BYTE;
+ lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+ break;
+
+ default:
+ DPRINTK("%s(): ALG Setup FAIL\n", __FUNCTION__);
+ return ICP_OCF_DRV_STATUS_FAIL;
+ }
+
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name : icp_ocfDrvFreeOCFSession
+ *
+ * Description : This function deletes all existing Session data representing
+ * the Cryptographic session established between OCF and this driver. This
+ * also includes freeing the memory allocated for the session context. The
+ * session object is also removed from the session linked list.
+ */
+static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData)
+{
+
+ sessionData->inUse = ICP_SESSION_DEREGISTERED;
+
+ /*ENTER CRITICAL SECTION */
+ icp_spin_lockbh_lock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+ if (CPA_TRUE == icp_atomic_read(&icp_ocfDrvIsExiting)) {
+ /*If the Driver is exiting, allow that process to
+ handle any deletions */
+ /*EXIT CRITICAL SECTION */
+ icp_spin_lockbh_unlock(&icp_ocfDrvSymSessInfoListSpinlock);
+ return;
+ }
+
+ icp_atomic_dec(&num_ocf_to_drv_registered_sessions);
+
+ ICP_LIST_DEL(sessionData, listNode);
+
+ /*EXIT CRITICAL SECTION */
+ icp_spin_lockbh_unlock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+ if (NULL != sessionData->sessHandle) {
+ icp_kfree(sessionData->sessHandle);
+ }
+ ICP_CACHE_FREE(drvSessionData_zone, sessionData);
+}
+
+/* Name : icp_ocfDrvFreeLACSession
+ *
+ * Description : This attempts to deregister a LAC session. If it fails, the
+ * deregistation retry function is called.
+ */
+int icp_ocfDrvFreeLACSession(icp_device_t dev, uint64_t sid)
+{
+ CpaCySymSessionCtx sessionToDeregister = NULL;
+ struct icp_drvSessionData *sessionData = NULL;
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+ int retval = 0;
+
+ sessionData = (struct icp_drvSessionData *)CRYPTO_SESID2LID(sid);
+ if (NULL == sessionData) {
+ EPRINTK("%s(): OCF Free session called with Null Session ID.\n",
+ __FUNCTION__);
+ return EINVAL;
+ }
+
+ sessionToDeregister = sessionData->sessHandle;
+
+ if ((ICP_SESSION_INITIALISED != sessionData->inUse) &&
+ (ICP_SESSION_RUNNING != sessionData->inUse) &&
+ (ICP_SESSION_DEREGISTERED != sessionData->inUse)) {
+ DPRINTK("%s() Session not initialised.\n", __FUNCTION__);
+ return EINVAL;
+ }
+
+ if (ICP_SESSION_RUNNING == sessionData->inUse) {
+ lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
+ sessionToDeregister);
+ if (CPA_STATUS_RETRY == lacStatus) {
+ if (ICP_OCF_DRV_STATUS_SUCCESS !=
+ icp_ocfDrvDeregRetry(&sessionToDeregister)) {
+ /* the retry function increments the
+ dereg failed count */
+ DPRINTK("%s(): LAC failed to deregister the "
+ "session. (localSessionId= %p)\n",
+ __FUNCTION__, sessionToDeregister);
+ retval = EPERM;
+ }
+
+ } else if (CPA_STATUS_SUCCESS != lacStatus) {
+ DPRINTK("%s(): LAC failed to deregister the session. "
+ "localSessionId= %p, lacStatus = %d\n",
+ __FUNCTION__, sessionToDeregister, lacStatus);
+ icp_atomic_inc(&lac_session_failed_dereg_count);
+ retval = EPERM;
+ }
+ } else {
+ DPRINTK("%s() Session not registered with LAC.\n",
+ __FUNCTION__);
+ }
+
+ icp_ocfDrvFreeOCFSession(sessionData);
+ return retval;
+
+}
+
+/* Name : icp_ocfDrvAlgCheck
+ *
+ * Description : This function checks whether the cryptodesc argument pertains
+ * to a sym or hash function
+ */
+static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc)
+{
+
+ if (crp_desc->crd_alg == CRYPTO_3DES_CBC ||
+ crp_desc->crd_alg == CRYPTO_AES_CBC ||
+ crp_desc->crd_alg == CRYPTO_DES_CBC ||
+ crp_desc->crd_alg == CRYPTO_NULL_CBC ||
+ crp_desc->crd_alg == CRYPTO_ARC4) {
+ return ICP_OCF_DRV_ALG_CIPHER;
+ }
+
+ return ICP_OCF_DRV_ALG_HASH;
+}
+
+/* Name : icp_ocfDrvSymProcess
+ *
+ * Description : This function will map symmetric functionality calls from OCF
+ * to the LAC API. It will also allocate memory to store the session context.
+ *
+ * Notes: If it is the first perform call for a given session, then a LAC
+ * session is registered. After the session is registered, no checks as
+ * to whether session paramaters have changed (e.g. alg chain order) are
+ * done.
+ */
+int icp_ocfDrvSymProcess(icp_device_t dev, struct cryptop *crp, int hint)
+{
+ struct icp_drvSessionData *sessionData = NULL;
+ struct icp_drvOpData *drvOpData = NULL;
+ CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+ Cpa32U sessionCtxSizeInBytes = 0;
+
+ if (NULL == crp) {
+ DPRINTK("%s(): Invalid input parameters, cryptop is NULL\n",
+ __FUNCTION__);
+ return EINVAL;
+ }
+
+ if (NULL == crp->crp_desc) {
+ DPRINTK("%s(): Invalid input parameters, no crp_desc attached "
+ "to crp\n", __FUNCTION__);
+ crp->crp_etype = EINVAL;
+ return EINVAL;
+ }
+
+ if (NULL == crp->crp_buf) {
+ DPRINTK("%s(): Invalid input parameters, no buffer attached "
+ "to crp\n", __FUNCTION__);
+ crp->crp_etype = EINVAL;
+ return EINVAL;
+ }
+
+ if (CPA_TRUE == icp_atomic_read(&icp_ocfDrvIsExiting)) {
+ crp->crp_etype = EFAULT;
+ return EFAULT;
+ }
+
+ sessionData = (struct icp_drvSessionData *)
+ (CRYPTO_SESID2LID(crp->crp_sid));
+ if (NULL == sessionData) {
+ DPRINTK("%s(): Invalid input parameters, Null Session ID \n",
+ __FUNCTION__);
+ crp->crp_etype = EINVAL;
+ return EINVAL;
+ }
+
+/*If we get a request against a deregisted session, cancel operation*/
+ if (ICP_SESSION_DEREGISTERED == sessionData->inUse) {
+ DPRINTK("%s(): Session ID %d was deregistered \n",
+ __FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
+ crp->crp_etype = EFAULT;
+ return EFAULT;
+ }
+
+/*If none of the session states are set, then the session structure was either
+ not initialised properly or we are reading from a freed memory area (possible
+ due to OCF batch mode not removing queued requests against deregistered
+ sessions*/
+ if (ICP_SESSION_INITIALISED != sessionData->inUse &&
+ ICP_SESSION_RUNNING != sessionData->inUse) {
+ DPRINTK("%s(): Session - ID %d - not properly initialised or "
+ "memory freed back to the kernel \n",
+ __FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
+ crp->crp_etype = EINVAL;
+ return EINVAL;
+ }
+
+ /*For the below checks, remember error checking is already done in LAC.
+ We're not validating inputs subsequent to registration */
+ if (sessionData->inUse == ICP_SESSION_INITIALISED) {
+ DPRINTK("%s(): Initialising session\n", __FUNCTION__);
+
+ if (NULL != crp->crp_desc->crd_next) {
+ if (ICP_OCF_DRV_ALG_CIPHER ==
+ icp_ocfDrvAlgCheck(crp->crp_desc)) {
+
+ sessionData->lacSessCtx.algChainOrder =
+ CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+
+ if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
+ sessionData->lacSessCtx.cipherSetupData.
+ cipherDirection =
+ CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
+ } else {
+ sessionData->lacSessCtx.cipherSetupData.
+ cipherDirection =
+ CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
+ }
+ } else {
+ sessionData->lacSessCtx.algChainOrder =
+ CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+ if (crp->crp_desc->crd_next->crd_flags &
+ CRD_F_ENCRYPT) {
+ sessionData->lacSessCtx.cipherSetupData.
+ cipherDirection =
+ CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
+ } else {
+ sessionData->lacSessCtx.cipherSetupData.
+ cipherDirection =
+ CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
+ }
+
+ }
+
+ } else if (ICP_OCF_DRV_ALG_CIPHER ==
+ icp_ocfDrvAlgCheck(crp->crp_desc)) {
+ if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
+ sessionData->lacSessCtx.cipherSetupData.
+ cipherDirection =
+ CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
+ } else {
+ sessionData->lacSessCtx.cipherSetupData.
+ cipherDirection =
+ CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
+ }
+
+ }
+
+ /*No action required for standalone Auth here */
+
+ /* Allocate memory for SymSessionCtx before the Session Registration */
+ lacStatus =
+ cpaCySymSessionCtxGetSize(CPA_INSTANCE_HANDLE_SINGLE,
+ &(sessionData->lacSessCtx),
+ &sessionCtxSizeInBytes);
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): cpaCySymSessionCtxGetSize failed - %d\n",
+ __FUNCTION__, lacStatus);
+ crp->crp_etype = EINVAL;
+ return EINVAL;
+ }
+ sessionData->sessHandle =
+ icp_kmalloc(sessionCtxSizeInBytes, ICP_M_NOWAIT);
+ if (NULL == sessionData->sessHandle) {
+ EPRINTK
+ ("%s(): Failed to get memory for SymSessionCtx\n",
+ __FUNCTION__);
+ crp->crp_etype = ENOMEM;
+ return ENOMEM;
+ }
+
+ lacStatus = cpaCySymInitSession(CPA_INSTANCE_HANDLE_SINGLE,
+ icp_ocfDrvSymCallBack,
+ &(sessionData->lacSessCtx),
+ sessionData->sessHandle);
+
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): cpaCySymInitSession failed -%d \n",
+ __FUNCTION__, lacStatus);
+ crp->crp_etype = EFAULT;
+ return EFAULT;
+ }
+
+ sessionData->inUse = ICP_SESSION_RUNNING;
+ }
+
+ drvOpData = icp_kmem_cache_zalloc(drvOpData_zone, ICP_M_NOWAIT);
+ if (NULL == drvOpData) {
+ EPRINTK("%s():Failed to get memory for drvOpData\n",
+ __FUNCTION__);
+ crp->crp_etype = ENOMEM;
+ return ENOMEM;
+ }
+
+ drvOpData->lacOpData.pSessionCtx = sessionData->sessHandle;
+ drvOpData->digestSizeInBytes = sessionData->lacSessCtx.hashSetupData.
+ digestResultLenInBytes;
+ drvOpData->crp = crp;
+
+ /* Set the default buffer list array memory allocation */
+ drvOpData->srcBuffer.pBuffers = drvOpData->bufferListArray;
+ drvOpData->numBufferListArray = ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS;
+
+ if (ICP_OCF_DRV_STATUS_SUCCESS !=
+ icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->crp_desc)) {
+ crp->crp_etype = EINVAL;
+ goto err;
+ }
+
+ if (drvOpData->crp->crp_desc->crd_next != NULL) {
+ if (icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->
+ crp_desc->crd_next)) {
+ crp->crp_etype = EINVAL;
+ goto err;
+ }
+
+ }
+
+ /*
+ * Allocate buffer list array memory if the data fragment is more than
+ * the default number (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) and not
+ * calculated already
+ */
+ if (crp->crp_flags & ICP_CRYPTO_F_PACKET_BUF) {
+ if (NULL == drvOpData->lacOpData.pDigestResult) {
+ drvOpData->numBufferListArray =
+ icp_ocfDrvGetPacketBuffFrags((icp_packet_buffer_t *)
+ crp->crp_buf);
+ }
+
+ if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS <
+ drvOpData->numBufferListArray) {
+ DPRINTK("%s() numBufferListArray more than default\n",
+ __FUNCTION__);
+ drvOpData->srcBuffer.pBuffers = NULL;
+ drvOpData->srcBuffer.pBuffers =
+ icp_kmalloc(drvOpData->numBufferListArray *
+ sizeof(CpaFlatBuffer), ICP_M_NOWAIT);
+ if (NULL == drvOpData->srcBuffer.pBuffers) {
+ EPRINTK("%s() Failed to get memory for "
+ "pBuffers\n", __FUNCTION__);
+ ICP_CACHE_FREE(drvOpData_zone, drvOpData);
+ crp->crp_etype = ENOMEM;
+ return ENOMEM;
+ }
+ }
+ }
+
+ /*
+ * Check the type of buffer structure we got and convert it into
+ * CpaBufferList format.
+ */
+ if (crp->crp_flags & ICP_CRYPTO_F_PACKET_BUF) {
+ if (ICP_OCF_DRV_STATUS_SUCCESS !=
+ icp_ocfDrvPacketBuffToBufferList((icp_packet_buffer_t *)
+ crp->crp_buf,
+ &(drvOpData->srcBuffer))) {
+ EPRINTK("%s():Failed to translate from packet buffer "
+ "to bufferlist\n", __FUNCTION__);
+ crp->crp_etype = EINVAL;
+ goto err;
+ }
+
+ drvOpData->bufferType = ICP_CRYPTO_F_PACKET_BUF;
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ /* OCF only supports IOV of one entry. */
+ if (NUM_IOV_SUPPORTED ==
+ ((struct uio *)(crp->crp_buf))->uio_iovcnt) {
+
+ icp_ocfDrvPtrAndLenToBufferList(((struct uio *)(crp->
+ crp_buf))->
+ uio_iov[0].iov_base,
+ ((struct uio *)(crp->
+ crp_buf))->
+ uio_iov[0].iov_len,
+ &(drvOpData->
+ srcBuffer));
+
+ drvOpData->bufferType = CRYPTO_F_IOV;
+
+ } else {
+ DPRINTK("%s():Unable to handle IOVs with lengths of "
+ "greater than one!\n", __FUNCTION__);
+ crp->crp_etype = EINVAL;
+ goto err;
+ }
+
+ } else {
+ icp_ocfDrvPtrAndLenToBufferList(crp->crp_buf,
+ crp->crp_ilen,
+ &(drvOpData->srcBuffer));
+
+ drvOpData->bufferType = CRYPTO_BUF_CONTIG;
+ }
+
+ /* Allocate srcBuffer's private meta data */
+ if (ICP_OCF_DRV_STATUS_SUCCESS !=
+ icp_ocfDrvAllocMetaData(&(drvOpData->srcBuffer), drvOpData)) {
+ EPRINTK("%s() icp_ocfDrvAllocMetaData failed\n", __FUNCTION__);
+ memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
+ crp->crp_etype = EINVAL;
+ goto err;
+ }
+
+ /* Perform "in-place" crypto operation */
+ lacStatus = cpaCySymPerformOp(CPA_INSTANCE_HANDLE_SINGLE,
+ (void *)drvOpData,
+ &(drvOpData->lacOpData),
+ &(drvOpData->srcBuffer),
+ &(drvOpData->srcBuffer),
+ &(drvOpData->verifyResult));
+ if (CPA_STATUS_RETRY == lacStatus) {
+ DPRINTK("%s(): cpaCySymPerformOp retry, lacStatus = %d\n",
+ __FUNCTION__, lacStatus);
+ memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
+ crp->crp_etype = ERESTART;
+ goto err;
+ }
+ if (CPA_STATUS_SUCCESS != lacStatus) {
+ EPRINTK("%s(): cpaCySymPerformOp failed, lacStatus = %d\n",
+ __FUNCTION__, lacStatus);
+ memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
+ crp->crp_etype = EINVAL;
+ goto err;
+ }
+
+ return 0; //OCF success status value
+
+ err:
+ if (drvOpData->numBufferListArray > ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
+ icp_kfree(drvOpData->srcBuffer.pBuffers);
+ }
+ icp_ocfDrvFreeMetaData(&(drvOpData->srcBuffer));
+ ICP_CACHE_FREE(drvOpData_zone, drvOpData);
+
+ return crp->crp_etype;
+}
+
+/* Name : icp_ocfDrvProcessDataSetup
+ *
+ * Description : This function will setup all the cryptographic operation data
+ * that is required by LAC to execute the operation.
+ */
+static int icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
+ struct cryptodesc *crp_desc)
+{
+ CpaCyRandGenOpData randGenOpData;
+ CpaFlatBuffer randData;
+
+ drvOpData->lacOpData.packetType = CPA_CY_SYM_PACKET_TYPE_FULL;
+
+ /* Convert from the cryptop to the ICP LAC crypto parameters */
+ switch (crp_desc->crd_alg) {
+ case CRYPTO_NULL_CBC:
+ drvOpData->lacOpData.
+ cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+ drvOpData->lacOpData.
+ messageLenToCipherInBytes = crp_desc->crd_len;
+ drvOpData->verifyResult = CPA_FALSE;
+ drvOpData->lacOpData.ivLenInBytes = NULL_BLOCK_LEN;
+ break;
+ case CRYPTO_DES_CBC:
+ drvOpData->lacOpData.
+ cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+ drvOpData->lacOpData.
+ messageLenToCipherInBytes = crp_desc->crd_len;
+ drvOpData->verifyResult = CPA_FALSE;
+ drvOpData->lacOpData.ivLenInBytes = DES_BLOCK_LEN;
+ break;
+ case CRYPTO_3DES_CBC:
+ drvOpData->lacOpData.
+ cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+ drvOpData->lacOpData.
+ messageLenToCipherInBytes = crp_desc->crd_len;
+ drvOpData->verifyResult = CPA_FALSE;
+ drvOpData->lacOpData.ivLenInBytes = DES3_BLOCK_LEN;
+ break;
+ case CRYPTO_ARC4:
+ drvOpData->lacOpData.
+ cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+ drvOpData->lacOpData.
+ messageLenToCipherInBytes = crp_desc->crd_len;
+ drvOpData->verifyResult = CPA_FALSE;
+ drvOpData->lacOpData.ivLenInBytes = ARC4_COUNTER_LEN;
+ break;
+ case CRYPTO_AES_CBC:
+ drvOpData->lacOpData.
+ cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+ drvOpData->lacOpData.
+ messageLenToCipherInBytes = crp_desc->crd_len;
+ drvOpData->verifyResult = CPA_FALSE;
+ drvOpData->lacOpData.ivLenInBytes = RIJNDAEL128_BLOCK_LEN;
+ break;
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_MD5:
+ case CRYPTO_MD5_HMAC:
+ drvOpData->lacOpData.
+ hashStartSrcOffsetInBytes = crp_desc->crd_skip;
+ drvOpData->lacOpData.
+ messageLenToHashInBytes = crp_desc->crd_len;
+ drvOpData->lacOpData.
+ pDigestResult =
+ icp_ocfDrvDigestPointerFind(drvOpData, crp_desc);
+
+ if (NULL == drvOpData->lacOpData.pDigestResult) {
+ DPRINTK("%s(): ERROR - could not calculate "
+ "Digest Result memory address\n", __FUNCTION__);
+ return ICP_OCF_DRV_STATUS_FAIL;
+ }
+
+ drvOpData->lacOpData.digestVerify = CPA_FALSE;
+ break;
+ default:
+ DPRINTK("%s(): Crypto process error - algorithm not "
+ "found \n", __FUNCTION__);
+ return ICP_OCF_DRV_STATUS_FAIL;
+ }
+
+ /* Figure out what the IV is supposed to be */
+ if ((crp_desc->crd_alg == CRYPTO_DES_CBC) ||
+ (crp_desc->crd_alg == CRYPTO_3DES_CBC) ||
+ (crp_desc->crd_alg == CRYPTO_AES_CBC)) {
+ /*ARC4 doesn't use an IV */
+ if (crp_desc->crd_flags & CRD_F_IV_EXPLICIT) {
+ /* Explicit IV provided to OCF */
+ drvOpData->lacOpData.pIv = crp_desc->crd_iv;
+ } else {
+ /* IV is not explicitly provided to OCF */
+
+ /* Point the LAC OP Data IV pointer to our allocated
+ storage location for this session. */
+ drvOpData->lacOpData.pIv = drvOpData->ivData;
+
+ if ((crp_desc->crd_flags & CRD_F_ENCRYPT) &&
+ ((crp_desc->crd_flags & CRD_F_IV_PRESENT) == 0)) {
+
+ /* Encrypting - need to create IV */
+ randGenOpData.generateBits = CPA_TRUE;
+ randGenOpData.lenInBytes = MAX_IV_LEN_IN_BYTES;
+
+ icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *)
+ drvOpData->
+ ivData,
+ MAX_IV_LEN_IN_BYTES,
+ &randData);
+
+ if (CPA_STATUS_SUCCESS !=
+ cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
+ NULL, NULL,
+ &randGenOpData, &randData)) {
+ DPRINTK("%s(): ERROR - Failed to"
+ " generate"
+ " Initialisation Vector\n",
+ __FUNCTION__);
+ return ICP_OCF_DRV_STATUS_FAIL;
+ }
+
+ crypto_copyback(drvOpData->crp->
+ crp_flags,
+ drvOpData->crp->crp_buf,
+ crp_desc->crd_inject,
+ drvOpData->lacOpData.
+ ivLenInBytes,
+ (caddr_t) (drvOpData->lacOpData.
+ pIv));
+ } else {
+ /* Reading IV from buffer */
+ crypto_copydata(drvOpData->crp->
+ crp_flags,
+ drvOpData->crp->crp_buf,
+ crp_desc->crd_inject,
+ drvOpData->lacOpData.
+ ivLenInBytes,
+ (caddr_t) (drvOpData->lacOpData.
+ pIv));
+ }
+
+ }
+
+ }
+
+ return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name : icp_ocfDrvDigestPointerFind
+ *
+ * Description : This function is used to find the memory address of where the
+ * digest information shall be stored in. Input buffer types are an skbuff, iov
+ * or flat buffer. The address is found using the buffer data start address and
+ * an offset.
+ *
+ * Note: In the case of a linux skbuff, the digest address may exist within
+ * a memory space linked to from the start buffer. These linked memory spaces
+ * must be traversed by the data length offset in order to find the digest start
+ * address. Whether there is enough space for the digest must also be checked.
+ */
+uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData * drvOpData,
+ struct cryptodesc * crp_desc)
+{
+
+ int offsetInBytes = crp_desc->crd_inject;
+ uint32_t digestSizeInBytes = drvOpData->digestSizeInBytes;
+ uint8_t *flat_buffer_base = NULL;
+ int flat_buffer_length = 0;
+
+ if (drvOpData->crp->crp_flags & ICP_CRYPTO_F_PACKET_BUF) {
+
+ return icp_ocfDrvPacketBufferDigestPointerFind(drvOpData,
+ offsetInBytes,
+ digestSizeInBytes);
+
+ } else {
+ /* IOV or flat buffer */
+ if (drvOpData->crp->crp_flags & CRYPTO_F_IOV) {
+ /*single IOV check has already been done */
+ flat_buffer_base = ((struct uio *)
+ (drvOpData->crp->crp_buf))->
+ uio_iov[0].iov_base;
+ flat_buffer_length = ((struct uio *)
+ (drvOpData->crp->crp_buf))->
+ uio_iov[0].iov_len;
+ } else {
+ flat_buffer_base = (uint8_t *) drvOpData->crp->crp_buf;
+ flat_buffer_length = drvOpData->crp->crp_ilen;
+ }
+
+ if (flat_buffer_length < (offsetInBytes + digestSizeInBytes)) {
+ DPRINTK("%s() Not enough space for Digest "
+ "(IOV/Flat Buffer) \n", __FUNCTION__);
+ return NULL;
+ } else {
+ return (uint8_t *) (flat_buffer_base + offsetInBytes);
+ }
+ }
+ DPRINTK("%s() Should not reach this point\n", __FUNCTION__);
+ return NULL;
+}
diff --git a/target/linux/generic/files/crypto/ocf/ep80579/linux_2.6_kernel_space.mk b/target/linux/generic/files/crypto/ocf/ep80579/linux_2.6_kernel_space.mk
new file mode 100644
index 000000000..96afa9a45
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ep80579/linux_2.6_kernel_space.mk
@@ -0,0 +1,69 @@
+###################
+# @par
+# This file is provided under a dual BSD/GPLv2 license. When using or
+# redistributing this file, you may do so under either license.
+#
+# GPL LICENSE SUMMARY
+#
+# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# The full GNU General Public License is included in this distribution
+# in the file called LICENSE.GPL.
+#
+# Contact Information:
+# Intel Corporation
+#
+# BSD LICENSE
+#
+# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#
+# version: Security.L.1.0.130
+###################
+
+#specific include directories in kernel space
+INCLUDES+=#e.g. -I$(OSAL_DIR)/include \
+
+#Extra Flags Specific in kernel space e.g. include path or debug flags etc. e.g to add an include path EXTRA_CFLAGS += -I$(src)/../include
+EXTRA_CFLAGS += $(INCLUDES) -O2 -Wall
+EXTRA_LDFLAGS +=-whole-archive
+
diff --git a/target/linux/generic/files/crypto/ocf/hifn/Makefile b/target/linux/generic/files/crypto/ocf/hifn/Makefile
new file mode 100644
index 000000000..163fed054
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/hifn/Makefile
@@ -0,0 +1,13 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_HIFN) += hifn7751.o
+obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/hifn/hifn7751.c b/target/linux/generic/files/crypto/ocf/hifn/hifn7751.c
new file mode 100644
index 000000000..d554f16f4
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/hifn/hifn7751.c
@@ -0,0 +1,2954 @@
+/* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
+
+/*-
+ * Invertex AEON / Hifn 7751 driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ * Copyright (c) 1999 Theo de Raadt
+ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
+ * http://www.netsec.net
+ * Copyright (c) 2003 Hifn Inc.
+ *
+ * This driver is based on a previous driver by Invertex, for which they
+ * requested: Please send any comments, feedback, bug-fixes, or feature
+ * requests to software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ *
+__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
+ */
+
+/*
+ * Driver for various Hifn encryption processors.
+ */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/io.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+#include <hifn/hifn7751reg.h>
+#include <hifn/hifn7751var.h>
+
+#if 1
+#define DPRINTF(a...) if (hifn_debug) { \
+ printk("%s: ", sc ? \
+ device_get_nameunit(sc->sc_dev) : "hifn"); \
+ printk(a); \
+ } else
+#else
+#define DPRINTF(a...)
+#endif
+
+static inline int
+pci_get_revid(struct pci_dev *dev)
+{
+ u8 rid = 0;
+ pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
+ return rid;
+}
+
+static struct hifn_stats hifnstats;
+
+#define debug hifn_debug
+int hifn_debug = 0;
+module_param(hifn_debug, int, 0644);
+MODULE_PARM_DESC(hifn_debug, "Enable debug");
+
+int hifn_maxbatch = 1;
+module_param(hifn_maxbatch, int, 0644);
+MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
+
+int hifn_cache_linesize = 0x10;
+module_param(hifn_cache_linesize, int, 0444);
+MODULE_PARM_DESC(hifn_cache_linesize, "PCI config cache line size");
+
+#ifdef MODULE_PARM
+char *hifn_pllconfig = NULL;
+MODULE_PARM(hifn_pllconfig, "s");
+#else
+char hifn_pllconfig[32]; /* This setting is RO after loading */
+module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
+#endif
+MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
+
+#ifdef HIFN_VULCANDEV
+#include <sys/conf.h>
+#include <sys/uio.h>
+
+static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
+#endif
+
+/*
+ * Prototypes and count for the pci_device structure
+ */
+static int hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void hifn_remove(struct pci_dev *dev);
+
+static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int hifn_freesession(device_t, u_int64_t);
+static int hifn_process(device_t, struct cryptop *, int);
+
+static device_method_t hifn_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, hifn_newsession),
+ DEVMETHOD(cryptodev_freesession,hifn_freesession),
+ DEVMETHOD(cryptodev_process, hifn_process),
+};
+
+static void hifn_reset_board(struct hifn_softc *, int);
+static void hifn_reset_puc(struct hifn_softc *);
+static void hifn_puc_wait(struct hifn_softc *);
+static int hifn_enable_crypto(struct hifn_softc *);
+static void hifn_set_retry(struct hifn_softc *sc);
+static void hifn_init_dma(struct hifn_softc *);
+static void hifn_init_pci_registers(struct hifn_softc *);
+static int hifn_sramsize(struct hifn_softc *);
+static int hifn_dramsize(struct hifn_softc *);
+static int hifn_ramtype(struct hifn_softc *);
+static void hifn_sessions(struct hifn_softc *);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+static irqreturn_t hifn_intr(int irq, void *arg);
+#else
+static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
+#endif
+static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
+static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
+static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
+static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
+static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
+static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
+static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
+static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
+static int hifn_init_pubrng(struct hifn_softc *);
+static void hifn_tick(unsigned long arg);
+static void hifn_abort(struct hifn_softc *);
+static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
+
+static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
+static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
+static int hifn_read_random(void *arg, u_int32_t *buf, int len);
+#endif
+
+#define HIFN_MAX_CHIPS 8
+static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
+
+static __inline u_int32_t
+READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
+{
+ u_int32_t v = readl(sc->sc_bar0 + reg);
+ sc->sc_bar0_lastreg = (bus_size_t) -1;
+ return (v);
+}
+#define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
+
+static __inline u_int32_t
+READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
+{
+ u_int32_t v = readl(sc->sc_bar1 + reg);
+ sc->sc_bar1_lastreg = (bus_size_t) -1;
+ return (v);
+}
+#define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
+
+/*
+ * map in a given buffer (great on some arches :-)
+ */
+
+static int
+pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
+{
+ struct iovec *iov = uio->uio_iov;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ buf->mapsize = 0;
+ for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
+ buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
+ iov->iov_base, iov->iov_len,
+ PCI_DMA_BIDIRECTIONAL);
+ buf->segs[buf->nsegs].ds_len = iov->iov_len;
+ buf->mapsize += iov->iov_len;
+ iov++;
+ buf->nsegs++;
+ }
+ /* identify this buffer by the first segment */
+ buf->map = (void *) buf->segs[0].ds_addr;
+ return(0);
+}
+
+/*
+ * map in a given sk_buff
+ */
+
+static int
+pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
+{
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ buf->mapsize = 0;
+
+ buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
+ skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
+ buf->segs[0].ds_len = skb_headlen(skb);
+ buf->mapsize += buf->segs[0].ds_len;
+
+ buf->nsegs = 1;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
+ buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
+ buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
+ page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+ skb_shinfo(skb)->frags[i].page_offset,
+ buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
+ buf->mapsize += buf->segs[buf->nsegs].ds_len;
+ buf->nsegs++;
+ }
+
+ /* identify this buffer by the first segment */
+ buf->map = (void *) buf->segs[0].ds_addr;
+ return(0);
+}
+
+/*
+ * map in a given contiguous buffer
+ */
+
+static int
+pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
+{
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ buf->mapsize = 0;
+ buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
+ b, len, PCI_DMA_BIDIRECTIONAL);
+ buf->segs[0].ds_len = len;
+ buf->mapsize += buf->segs[0].ds_len;
+ buf->nsegs = 1;
+
+ /* identify this buffer by the first segment */
+ buf->map = (void *) buf->segs[0].ds_addr;
+ return(0);
+}
+
+#if 0 /* not needed at this time */
+static void
+pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
+{
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+ for (i = 0; i < buf->nsegs; i++)
+ pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
+ buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
+}
+#endif
+
+static void
+pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
+{
+ int i;
+ DPRINTF("%s()\n", __FUNCTION__);
+ for (i = 0; i < buf->nsegs; i++) {
+ pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
+ buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
+ buf->segs[i].ds_addr = 0;
+ buf->segs[i].ds_len = 0;
+ }
+ buf->nsegs = 0;
+ buf->mapsize = 0;
+ buf->map = 0;
+}
+
+static const char*
+hifn_partname(struct hifn_softc *sc)
+{
+ /* XXX sprintf numbers when not decoded */
+ switch (pci_get_vendor(sc->sc_pcidev)) {
+ case PCI_VENDOR_HIFN:
+ switch (pci_get_device(sc->sc_pcidev)) {
+ case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
+ case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
+ case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
+ case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
+ case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
+ case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
+ }
+ return "Hifn unknown-part";
+ case PCI_VENDOR_INVERTEX:
+ switch (pci_get_device(sc->sc_pcidev)) {
+ case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
+ }
+ return "Invertex unknown-part";
+ case PCI_VENDOR_NETSEC:
+ switch (pci_get_device(sc->sc_pcidev)) {
+ case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
+ }
+ return "NetSec unknown-part";
+ }
+ return "Unknown-vendor unknown-part";
+}
+
+static u_int
+checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
+{
+ struct hifn_softc *sc = pci_get_drvdata(dev);
+ if (v > max) {
+ device_printf(sc->sc_dev, "Warning, %s %u out of range, "
+ "using max %u\n", what, v, max);
+ v = max;
+ } else if (v < min) {
+ device_printf(sc->sc_dev, "Warning, %s %u out of range, "
+ "using min %u\n", what, v, min);
+ v = min;
+ }
+ return v;
+}
+
+/*
+ * Select PLL configuration for 795x parts. This is complicated in
+ * that we cannot determine the optimal parameters without user input.
+ * The reference clock is derived from an external clock through a
+ * multiplier. The external clock is either the host bus (i.e. PCI)
+ * or an external clock generator. When using the PCI bus we assume
+ * the clock is either 33 or 66 MHz; for an external source we cannot
+ * tell the speed.
+ *
+ * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
+ * for an external source, followed by the frequency. We calculate
+ * the appropriate multiplier and PLL register contents accordingly.
+ * When no configuration is given we default to "pci66" since that
+ * always will allow the card to work. If a card is using the PCI
+ * bus clock and in a 33MHz slot then it will be operating at half
+ * speed until the correct information is provided.
+ *
+ * We use a default setting of "ext66" because according to Mike Ham
+ * of HiFn, almost every board in existence has an external crystal
+ * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
+ * because PCI33 can have clocks from 0 to 33Mhz, and some have
+ * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
+ */
+static void
+hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
+{
+ const char *pllspec = hifn_pllconfig;
+ u_int freq, mul, fl, fh;
+ u_int32_t pllconfig;
+ char *nxt;
+
+ if (pllspec == NULL)
+ pllspec = "ext66";
+ fl = 33, fh = 66;
+ pllconfig = 0;
+ if (strncmp(pllspec, "ext", 3) == 0) {
+ pllspec += 3;
+ pllconfig |= HIFN_PLL_REF_SEL;
+ switch (pci_get_device(dev)) {
+ case PCI_PRODUCT_HIFN_7955:
+ case PCI_PRODUCT_HIFN_7956:
+ fl = 20, fh = 100;
+ break;
+#ifdef notyet
+ case PCI_PRODUCT_HIFN_7954:
+ fl = 20, fh = 66;
+ break;
+#endif
+ }
+ } else if (strncmp(pllspec, "pci", 3) == 0)
+ pllspec += 3;
+ freq = strtoul(pllspec, &nxt, 10);
+ if (nxt == pllspec)
+ freq = 66;
+ else
+ freq = checkmaxmin(dev, "frequency", freq, fl, fh);
+ /*
+ * Calculate multiplier. We target a Fck of 266 MHz,
+ * allowing only even values, possibly rounded down.
+ * Multipliers > 8 must set the charge pump current.
+ */
+ mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
+ pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
+ if (mul > 8)
+ pllconfig |= HIFN_PLL_IS;
+ *pll = pllconfig;
+}
+
+/*
+ * Attach an interface that successfully probed.
+ */
+static int
+hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct hifn_softc *sc = NULL;
+ char rbase;
+ u_int16_t ena, rev;
+ int rseg, rc;
+ unsigned long mem_start, mem_len;
+ static int num_chips = 0;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (pci_enable_device(dev) < 0)
+ return(-ENODEV);
+
+ if (pci_set_mwi(dev))
+ return(-ENODEV);
+
+ if (!dev->irq) {
+ printk("hifn: found device with no IRQ assigned. check BIOS settings!");
+ pci_disable_device(dev);
+ return(-ENODEV);
+ }
+
+ sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return(-ENOMEM);
+ memset(sc, 0, sizeof(*sc));
+
+ softc_device_init(sc, "hifn", num_chips, hifn_methods);
+
+ sc->sc_pcidev = dev;
+ sc->sc_irq = -1;
+ sc->sc_cid = -1;
+ sc->sc_num = num_chips++;
+ if (sc->sc_num < HIFN_MAX_CHIPS)
+ hifn_chip_idx[sc->sc_num] = sc;
+
+ pci_set_drvdata(sc->sc_pcidev, sc);
+
+ spin_lock_init(&sc->sc_mtx);
+
+ /* XXX handle power management */
+
+ /*
+ * The 7951 and 795x have a random number generator and
+ * public key support; note this.
+ */
+ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
+ (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
+ pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
+ pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
+ sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
+ /*
+ * The 7811 has a random number generator and
+ * we also note it's identity 'cuz of some quirks.
+ */
+ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
+ pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
+ sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
+
+ /*
+ * The 795x parts support AES.
+ */
+ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
+ (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
+ pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
+ sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
+ /*
+ * Select PLL configuration. This depends on the
+ * bus and board design and must be manually configured
+ * if the default setting is unacceptable.
+ */
+ hifn_getpllconfig(dev, &sc->sc_pllconfig);
+ }
+
+ /*
+ * Setup PCI resources. Note that we record the bus
+ * tag and handle for each register mapping, this is
+ * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
+ * and WRITE_REG_1 macros throughout the driver.
+ */
+ mem_start = pci_resource_start(sc->sc_pcidev, 0);
+ mem_len = pci_resource_len(sc->sc_pcidev, 0);
+ sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
+ if (!sc->sc_bar0) {
+ device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
+ goto fail;
+ }
+ sc->sc_bar0_lastreg = (bus_size_t) -1;
+
+ mem_start = pci_resource_start(sc->sc_pcidev, 1);
+ mem_len = pci_resource_len(sc->sc_pcidev, 1);
+ sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
+ if (!sc->sc_bar1) {
+ device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
+ goto fail;
+ }
+ sc->sc_bar1_lastreg = (bus_size_t) -1;
+
+ /* fix up the bus size */
+ if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
+ device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
+ goto fail;
+ }
+ if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
+ device_printf(sc->sc_dev,
+ "No usable consistent DMA configuration, aborting.\n");
+ goto fail;
+ }
+
+ hifn_set_retry(sc);
+
+ /*
+ * Setup the area where the Hifn DMA's descriptors
+ * and associated data structures.
+ */
+ sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
+ sizeof(*sc->sc_dma),
+ &sc->sc_dma_physaddr);
+ if (!sc->sc_dma) {
+ device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
+ goto fail;
+ }
+ bzero(sc->sc_dma, sizeof(*sc->sc_dma));
+
+ /*
+ * Reset the board and do the ``secret handshake''
+ * to enable the crypto support. Then complete the
+ * initialization procedure by setting up the interrupt
+ * and hooking in to the system crypto support so we'll
+ * get used for system services like the crypto device,
+ * IPsec, RNG device, etc.
+ */
+ hifn_reset_board(sc, 0);
+
+ if (hifn_enable_crypto(sc) != 0) {
+ device_printf(sc->sc_dev, "crypto enabling failed\n");
+ goto fail;
+ }
+ hifn_reset_puc(sc);
+
+ hifn_init_dma(sc);
+ hifn_init_pci_registers(sc);
+
+ pci_set_master(sc->sc_pcidev);
+
+ /* XXX can't dynamically determine ram type for 795x; force dram */
+ if (sc->sc_flags & HIFN_IS_7956)
+ sc->sc_drammodel = 1;
+ else if (hifn_ramtype(sc))
+ goto fail;
+
+ if (sc->sc_drammodel == 0)
+ hifn_sramsize(sc);
+ else
+ hifn_dramsize(sc);
+
+ /*
+ * Workaround for NetSec 7751 rev A: half ram size because two
+ * of the address lines were left floating
+ */
+ if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
+ pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
+ pci_get_revid(dev) == 0x61) /*XXX???*/
+ sc->sc_ramsize >>= 1;
+
+ /*
+ * Arrange the interrupt line.
+ */
+ rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
+ if (rc) {
+ device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
+ goto fail;
+ }
+ sc->sc_irq = dev->irq;
+
+ hifn_sessions(sc);
+
+ /*
+ * NB: Keep only the low 16 bits; this masks the chip id
+ * from the 7951.
+ */
+ rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
+
+ rseg = sc->sc_ramsize / 1024;
+ rbase = 'K';
+ if (sc->sc_ramsize >= (1024 * 1024)) {
+ rbase = 'M';
+ rseg /= 1024;
+ }
+ device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
+ hifn_partname(sc), rev,
+ rseg, rbase, sc->sc_drammodel ? 'd' : 's');
+ if (sc->sc_flags & HIFN_IS_7956)
+ printf(", pll=0x%x<%s clk, %ux mult>",
+ sc->sc_pllconfig,
+ sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
+ 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
+ printf("\n");
+
+ sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid < 0) {
+ device_printf(sc->sc_dev, "could not get crypto driver id\n");
+ goto fail;
+ }
+
+ WRITE_REG_0(sc, HIFN_0_PUCNFG,
+ READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
+ ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
+
+ switch (ena) {
+ case HIFN_PUSTAT_ENA_2:
+ crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
+ if (sc->sc_flags & HIFN_HAS_AES)
+ crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+ /*FALLTHROUGH*/
+ case HIFN_PUSTAT_ENA_1:
+ crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+ break;
+ }
+
+ if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
+ hifn_init_pubrng(sc);
+
+ init_timer(&sc->sc_tickto);
+ sc->sc_tickto.function = hifn_tick;
+ sc->sc_tickto.data = (unsigned long) sc->sc_num;
+ mod_timer(&sc->sc_tickto, jiffies + HZ);
+
+ return (0);
+
+fail:
+ if (sc->sc_cid >= 0)
+ crypto_unregister_all(sc->sc_cid);
+ if (sc->sc_irq != -1)
+ free_irq(sc->sc_irq, sc);
+ if (sc->sc_dma) {
+ /* Turn off DMA polling */
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+ HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+ pci_free_consistent(sc->sc_pcidev,
+ sizeof(*sc->sc_dma),
+ sc->sc_dma, sc->sc_dma_physaddr);
+ }
+ kfree(sc);
+ return (-ENXIO);
+}
+
+/*
+ * Detach an interface that successfully probed.
+ */
+static void
+hifn_remove(struct pci_dev *dev)
+{
+ struct hifn_softc *sc = pci_get_drvdata(dev);
+ unsigned long l_flags;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
+
+ /* disable interrupts */
+ HIFN_LOCK(sc);
+ WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
+ HIFN_UNLOCK(sc);
+
+ /*XXX other resources */
+ del_timer_sync(&sc->sc_tickto);
+
+ /* Turn off DMA polling */
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+ HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+ crypto_unregister_all(sc->sc_cid);
+
+ free_irq(sc->sc_irq, sc);
+
+ pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
+ sc->sc_dma, sc->sc_dma_physaddr);
+}
+
+
+static int
+hifn_init_pubrng(struct hifn_softc *sc)
+{
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if ((sc->sc_flags & HIFN_IS_7811) == 0) {
+ /* Reset 7951 public key/rng engine */
+ WRITE_REG_1(sc, HIFN_1_PUB_RESET,
+ READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
+
+ for (i = 0; i < 100; i++) {
+ DELAY(1000);
+ if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
+ HIFN_PUBRST_RESET) == 0)
+ break;
+ }
+
+ if (i == 100) {
+ device_printf(sc->sc_dev, "public key init failed\n");
+ return (1);
+ }
+ }
+
+ /* Enable the rng, if available */
+#ifdef CONFIG_OCF_RANDOMHARVEST
+ if (sc->sc_flags & HIFN_HAS_RNG) {
+ if (sc->sc_flags & HIFN_IS_7811) {
+ u_int32_t r;
+ r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
+ if (r & HIFN_7811_RNGENA_ENA) {
+ r &= ~HIFN_7811_RNGENA_ENA;
+ WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
+ }
+ WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
+ HIFN_7811_RNGCFG_DEFL);
+ r |= HIFN_7811_RNGENA_ENA;
+ WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
+ } else
+ WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
+ READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
+ HIFN_RNGCFG_ENA);
+
+ sc->sc_rngfirst = 1;
+ crypto_rregister(sc->sc_cid, hifn_read_random, sc);
+ }
+#endif
+
+ /* Enable public key engine, if available */
+ if (sc->sc_flags & HIFN_HAS_PUBLIC) {
+ WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
+ sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
+ WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+#ifdef HIFN_VULCANDEV
+ sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
+ UID_ROOT, GID_WHEEL, 0666,
+ "vulcanpk");
+ sc->sc_pkdev->si_drv1 = sc;
+#endif
+ }
+
+ return (0);
+}
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
+static int
+hifn_read_random(void *arg, u_int32_t *buf, int len)
+{
+ struct hifn_softc *sc = (struct hifn_softc *) arg;
+ u_int32_t sts;
+ int i, rc = 0;
+
+ if (len <= 0)
+ return rc;
+
+ if (sc->sc_flags & HIFN_IS_7811) {
+ /* ONLY VALID ON 7811!!!! */
+ for (i = 0; i < 5; i++) {
+ sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
+ if (sts & HIFN_7811_RNGSTS_UFL) {
+ device_printf(sc->sc_dev,
+ "RNG underflow: disabling\n");
+ /* DAVIDM perhaps return -1 */
+ break;
+ }
+ if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
+ break;
+
+ /*
+ * There are at least two words in the RNG FIFO
+ * at this point.
+ */
+ if (rc < len)
+ buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
+ if (rc < len)
+ buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
+ }
+ } else
+ buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
+
+ /* NB: discard first data read */
+ if (sc->sc_rngfirst) {
+ sc->sc_rngfirst = 0;
+ rc = 0;
+ }
+
+ return(rc);
+}
+#endif /* CONFIG_OCF_RANDOMHARVEST */
+
+static void
+hifn_puc_wait(struct hifn_softc *sc)
+{
+ int i;
+ int reg = HIFN_0_PUCTRL;
+
+ if (sc->sc_flags & HIFN_IS_7956) {
+ reg = HIFN_0_PUCTRL2;
+ }
+
+ for (i = 5000; i > 0; i--) {
+ DELAY(1);
+ if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
+ break;
+ }
+ if (!i)
+ device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
+ READ_REG_0(sc, HIFN_0_PUCTRL));
+}
+
+/*
+ * Reset the processing unit.
+ */
+static void
+hifn_reset_puc(struct hifn_softc *sc)
+{
+ /* Reset processing unit */
+ int reg = HIFN_0_PUCTRL;
+
+ if (sc->sc_flags & HIFN_IS_7956) {
+ reg = HIFN_0_PUCTRL2;
+ }
+ WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
+
+ hifn_puc_wait(sc);
+}
+
+/*
+ * Set the Retry and TRDY registers; note that we set them to
+ * zero because the 7811 locks up when forced to retry (section
+ * 3.6 of "Specification Update SU-0014-04". Not clear if we
+ * should do this for all Hifn parts, but it doesn't seem to hurt.
+ */
+static void
+hifn_set_retry(struct hifn_softc *sc)
+{
+ DPRINTF("%s()\n", __FUNCTION__);
+ /* NB: RETRY only responds to 8-bit reads/writes */
+ pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
+ pci_write_config_byte(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
+ /* piggy back the cache line setting here */
+ pci_write_config_byte(sc->sc_pcidev, PCI_CACHE_LINE_SIZE, hifn_cache_linesize);
+}
+
+/*
+ * Resets the board. Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+static void
+hifn_reset_board(struct hifn_softc *sc, int full)
+{
+ u_int32_t reg;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+ /*
+ * Set polling in the DMA configuration register to zero. 0x7 avoids
+ * resetting the board and zeros out the other fields.
+ */
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+ HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+ /*
+ * Now that polling has been disabled, we have to wait 1 ms
+ * before resetting the board.
+ */
+ DELAY(1000);
+
+ /* Reset the DMA unit */
+ if (full) {
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
+ DELAY(1000);
+ } else {
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
+ HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
+ hifn_reset_puc(sc);
+ }
+
+ KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
+ bzero(sc->sc_dma, sizeof(*sc->sc_dma));
+
+ /* Bring dma unit out of reset */
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+ HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+ hifn_puc_wait(sc);
+ hifn_set_retry(sc);
+
+ if (sc->sc_flags & HIFN_IS_7811) {
+ for (reg = 0; reg < 1000; reg++) {
+ if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
+ HIFN_MIPSRST_CRAMINIT)
+ break;
+ DELAY(1000);
+ }
+ if (reg == 1000)
+ device_printf(sc->sc_dev, ": cram init timeout\n");
+ } else {
+ /* set up DMA configuration register #2 */
+ /* turn off all PK and BAR0 swaps */
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
+ (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
+ (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
+ (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
+ (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
+ }
+}
+
+static u_int32_t
+hifn_next_signature(u_int32_t a, u_int cnt)
+{
+ int i;
+ u_int32_t v;
+
+ for (i = 0; i < cnt; i++) {
+
+ /* get the parity */
+ v = a & 0x80080125;
+ v ^= v >> 16;
+ v ^= v >> 8;
+ v ^= v >> 4;
+ v ^= v >> 2;
+ v ^= v >> 1;
+
+ a = (v & 1) ^ (a << 1);
+ }
+
+ return a;
+}
+
+
+/*
+ * Checks to see if crypto is already enabled. If crypto isn't enable,
+ * "hifn_enable_crypto" is called to enable it. The check is important,
+ * as enabling crypto twice will lock the board.
+ */
+static int
+hifn_enable_crypto(struct hifn_softc *sc)
+{
+ u_int32_t dmacfg, ramcfg, encl, addr, i;
+ char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 };
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
+ dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
+
+ /*
+ * The RAM config register's encrypt level bit needs to be set before
+ * every read performed on the encryption level register.
+ */
+ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
+
+ encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
+
+ /*
+ * Make sure we don't re-unlock. Two unlocks kills chip until the
+ * next reboot.
+ */
+ if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
+#ifdef HIFN_DEBUG
+ if (hifn_debug)
+ device_printf(sc->sc_dev,
+ "Strong crypto already enabled!\n");
+#endif
+ goto report;
+ }
+
+ if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
+#ifdef HIFN_DEBUG
+ if (hifn_debug)
+ device_printf(sc->sc_dev,
+ "Unknown encryption level 0x%x\n", encl);
+#endif
+ return 1;
+ }
+
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
+ HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+ DELAY(1000);
+ addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
+ DELAY(1000);
+ WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
+ DELAY(1000);
+
+ for (i = 0; i <= 12; i++) {
+ addr = hifn_next_signature(addr, offtbl[i] + 0x101);
+ WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
+
+ DELAY(1000);
+ }
+
+ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
+ encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
+
+#ifdef HIFN_DEBUG
+ if (hifn_debug) {
+ if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
+ device_printf(sc->sc_dev, "Engine is permanently "
+ "locked until next system reset!\n");
+ else
+ device_printf(sc->sc_dev, "Engine enabled "
+ "successfully!\n");
+ }
+#endif
+
+report:
+ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
+
+ switch (encl) {
+ case HIFN_PUSTAT_ENA_1:
+ case HIFN_PUSTAT_ENA_2:
+ break;
+ case HIFN_PUSTAT_ENA_0:
+ default:
+ device_printf(sc->sc_dev, "disabled\n");
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Give initial values to the registers listed in the "Register Space"
+ * section of the HIFN Software Development reference manual.
+ */
+static void
+hifn_init_pci_registers(struct hifn_softc *sc)
+{
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ /* write fixed values needed by the Initialization registers */
+ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
+ WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
+ WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
+
+ /* write all 4 ring address registers */
+ WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, cmdr[0]));
+ WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, srcr[0]));
+ WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, dstr[0]));
+ WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, resr[0]));
+
+ DELAY(2000);
+
+ /* write status register */
+ WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+ HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
+ HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
+ HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
+ HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
+ HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
+ HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
+ HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
+ HIFN_DMACSR_S_WAIT |
+ HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
+ HIFN_DMACSR_C_WAIT |
+ HIFN_DMACSR_ENGINE |
+ ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
+ HIFN_DMACSR_PUBDONE : 0) |
+ ((sc->sc_flags & HIFN_IS_7811) ?
+ HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
+
+ sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
+ sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
+ HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
+ HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
+ ((sc->sc_flags & HIFN_IS_7811) ?
+ HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
+ sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
+ WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+
+
+ if (sc->sc_flags & HIFN_IS_7956) {
+ u_int32_t pll;
+
+ WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
+ HIFN_PUCNFG_TCALLPHASES |
+ HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
+
+ /* turn off the clocks and insure bypass is set */
+ pll = READ_REG_1(sc, HIFN_1_PLL);
+ pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
+ | HIFN_PLL_BP | HIFN_PLL_MBSET;
+ WRITE_REG_1(sc, HIFN_1_PLL, pll);
+ DELAY(10*1000); /* 10ms */
+
+ /* change configuration */
+ pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
+ WRITE_REG_1(sc, HIFN_1_PLL, pll);
+ DELAY(10*1000); /* 10ms */
+
+ /* disable bypass */
+ pll &= ~HIFN_PLL_BP;
+ WRITE_REG_1(sc, HIFN_1_PLL, pll);
+ /* enable clocks with new configuration */
+ pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
+ WRITE_REG_1(sc, HIFN_1_PLL, pll);
+ } else {
+ WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
+ HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
+ HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
+ (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
+ }
+
+ WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+ HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
+ ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
+ ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
+}
+
+/*
+ * The maximum number of sessions supported by the card
+ * is dependent on the amount of context ram, which
+ * encryption algorithms are enabled, and how compression
+ * is configured. This should be configured before this
+ * routine is called.
+ */
+static void
+hifn_sessions(struct hifn_softc *sc)
+{
+ u_int32_t pucnfg;
+ int ctxsize;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
+
+ if (pucnfg & HIFN_PUCNFG_COMPSING) {
+ if (pucnfg & HIFN_PUCNFG_ENCCNFG)
+ ctxsize = 128;
+ else
+ ctxsize = 512;
+ /*
+ * 7955/7956 has internal context memory of 32K
+ */
+ if (sc->sc_flags & HIFN_IS_7956)
+ sc->sc_maxses = 32768 / ctxsize;
+ else
+ sc->sc_maxses = 1 +
+ ((sc->sc_ramsize - 32768) / ctxsize);
+ } else
+ sc->sc_maxses = sc->sc_ramsize / 16384;
+
+ if (sc->sc_maxses > 2048)
+ sc->sc_maxses = 2048;
+}
+
+/*
+ * Determine ram type (sram or dram). Board should be just out of a reset
+ * state when this is called.
+ */
+static int
+hifn_ramtype(struct hifn_softc *sc)
+{
+ u_int8_t data[8], dataexpect[8];
+ int i;
+
+ for (i = 0; i < sizeof(data); i++)
+ data[i] = dataexpect[i] = 0x55;
+ if (hifn_writeramaddr(sc, 0, data))
+ return (-1);
+ if (hifn_readramaddr(sc, 0, data))
+ return (-1);
+ if (bcmp(data, dataexpect, sizeof(data)) != 0) {
+ sc->sc_drammodel = 1;
+ return (0);
+ }
+
+ for (i = 0; i < sizeof(data); i++)
+ data[i] = dataexpect[i] = 0xaa;
+ if (hifn_writeramaddr(sc, 0, data))
+ return (-1);
+ if (hifn_readramaddr(sc, 0, data))
+ return (-1);
+ if (bcmp(data, dataexpect, sizeof(data)) != 0) {
+ sc->sc_drammodel = 1;
+ return (0);
+ }
+
+ return (0);
+}
+
+#define HIFN_SRAM_MAX (32 << 20)
+#define HIFN_SRAM_STEP_SIZE 16384
+#define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
+
+static int
+hifn_sramsize(struct hifn_softc *sc)
+{
+ u_int32_t a;
+ u_int8_t data[8];
+ u_int8_t dataexpect[sizeof(data)];
+ int32_t i;
+
+ for (i = 0; i < sizeof(data); i++)
+ data[i] = dataexpect[i] = i ^ 0x5a;
+
+ for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
+ a = i * HIFN_SRAM_STEP_SIZE;
+ bcopy(&i, data, sizeof(i));
+ hifn_writeramaddr(sc, a, data);
+ }
+
+ for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
+ a = i * HIFN_SRAM_STEP_SIZE;
+ bcopy(&i, dataexpect, sizeof(i));
+ if (hifn_readramaddr(sc, a, data) < 0)
+ return (0);
+ if (bcmp(data, dataexpect, sizeof(data)) != 0)
+ return (0);
+ sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
+ }
+
+ return (0);
+}
+
+/*
+ * XXX For dram boards, one should really try all of the
+ * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
+ * is already set up correctly.
+ */
+static int
+hifn_dramsize(struct hifn_softc *sc)
+{
+ u_int32_t cnfg;
+
+ if (sc->sc_flags & HIFN_IS_7956) {
+ /*
+ * 7955/7956 have a fixed internal ram of only 32K.
+ */
+ sc->sc_ramsize = 32768;
+ } else {
+ cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
+ HIFN_PUCNFG_DRAMMASK;
+ sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
+ }
+ return (0);
+}
+
+static void
+hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (dma->cmdi == HIFN_D_CMD_RSIZE) {
+ dma->cmdi = 0;
+ dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+ wmb();
+ dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
+ HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ }
+ *cmdp = dma->cmdi++;
+ dma->cmdk = dma->cmdi;
+
+ if (dma->srci == HIFN_D_SRC_RSIZE) {
+ dma->srci = 0;
+ dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+ wmb();
+ dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
+ HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ }
+ *srcp = dma->srci++;
+ dma->srck = dma->srci;
+
+ if (dma->dsti == HIFN_D_DST_RSIZE) {
+ dma->dsti = 0;
+ dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+ wmb();
+ dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
+ HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ }
+ *dstp = dma->dsti++;
+ dma->dstk = dma->dsti;
+
+ if (dma->resi == HIFN_D_RES_RSIZE) {
+ dma->resi = 0;
+ dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+ wmb();
+ dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
+ HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ }
+ *resp = dma->resi++;
+ dma->resk = dma->resi;
+}
+
+static int
+hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+ hifn_base_command_t wc;
+ const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
+ int r, cmdi, resi, srci, dsti;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ wc.masks = htole16(3 << 13);
+ wc.session_num = htole16(addr >> 14);
+ wc.total_source_count = htole16(8);
+ wc.total_dest_count = htole16(addr & 0x3fff);
+
+ hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
+
+ WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+ HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
+ HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
+
+ /* build write command */
+ bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
+ *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
+ bcopy(data, &dma->test_src, sizeof(dma->test_src));
+
+ dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
+ + offsetof(struct hifn_dma, test_src));
+ dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
+ + offsetof(struct hifn_dma, test_dst));
+
+ dma->cmdr[cmdi].l = htole32(16 | masks);
+ dma->srcr[srci].l = htole32(8 | masks);
+ dma->dstr[dsti].l = htole32(4 | masks);
+ dma->resr[resi].l = htole32(4 | masks);
+
+ for (r = 10000; r >= 0; r--) {
+ DELAY(10);
+ if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
+ break;
+ }
+ if (r == 0) {
+ device_printf(sc->sc_dev, "writeramaddr -- "
+ "result[%d](addr %d) still valid\n", resi, addr);
+ r = -1;
+ return (-1);
+ } else
+ r = 0;
+
+ WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+ HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
+ HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
+
+ return (r);
+}
+
+static int
+hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+ hifn_base_command_t rc;
+ const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
+ int r, cmdi, srci, dsti, resi;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ rc.masks = htole16(2 << 13);
+ rc.session_num = htole16(addr >> 14);
+ rc.total_source_count = htole16(addr & 0x3fff);
+ rc.total_dest_count = htole16(8);
+
+ hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
+
+ WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+ HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
+ HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
+
+ bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
+ *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
+
+ dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, test_src));
+ dma->test_src = 0;
+ dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, test_dst));
+ dma->test_dst = 0;
+ dma->cmdr[cmdi].l = htole32(8 | masks);
+ dma->srcr[srci].l = htole32(8 | masks);
+ dma->dstr[dsti].l = htole32(8 | masks);
+ dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
+
+ for (r = 10000; r >= 0; r--) {
+ DELAY(10);
+ if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
+ break;
+ }
+ if (r == 0) {
+ device_printf(sc->sc_dev, "readramaddr -- "
+ "result[%d](addr %d) still valid\n", resi, addr);
+ r = -1;
+ } else {
+ r = 0;
+ bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
+ }
+
+ WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+ HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
+ HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
+
+ return (r);
+}
+
+/*
+ * Initialize the descriptor rings.
+ */
+static void
+hifn_init_dma(struct hifn_softc *sc)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ hifn_set_retry(sc);
+
+ /* initialize static pointer values */
+ for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
+ dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, command_bufs[i][0]));
+ for (i = 0; i < HIFN_D_RES_RSIZE; i++)
+ dma->resr[i].p = htole32(sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, result_bufs[i][0]));
+
+ dma->cmdr[HIFN_D_CMD_RSIZE].p =
+ htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
+ dma->srcr[HIFN_D_SRC_RSIZE].p =
+ htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
+ dma->dstr[HIFN_D_DST_RSIZE].p =
+ htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
+ dma->resr[HIFN_D_RES_RSIZE].p =
+ htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
+
+ dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
+ dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
+ dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
+}
+
+/*
+ * Writes out the raw command buffer space. Returns the
+ * command buffer size.
+ */
+static u_int
+hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
+{
+ struct hifn_softc *sc = NULL;
+ u_int8_t *buf_pos;
+ hifn_base_command_t *base_cmd;
+ hifn_mac_command_t *mac_cmd;
+ hifn_crypt_command_t *cry_cmd;
+ int using_mac, using_crypt, len, ivlen;
+ u_int32_t dlen, slen;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ buf_pos = buf;
+ using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
+ using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
+
+ base_cmd = (hifn_base_command_t *)buf_pos;
+ base_cmd->masks = htole16(cmd->base_masks);
+ slen = cmd->src_mapsize;
+ if (cmd->sloplen)
+ dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
+ else
+ dlen = cmd->dst_mapsize;
+ base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
+ base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
+ dlen >>= 16;
+ slen >>= 16;
+ base_cmd->session_num = htole16(
+ ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
+ ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
+ buf_pos += sizeof(hifn_base_command_t);
+
+ if (using_mac) {
+ mac_cmd = (hifn_mac_command_t *)buf_pos;
+ dlen = cmd->maccrd->crd_len;
+ mac_cmd->source_count = htole16(dlen & 0xffff);
+ dlen >>= 16;
+ mac_cmd->masks = htole16(cmd->mac_masks |
+ ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
+ mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
+ mac_cmd->reserved = 0;
+ buf_pos += sizeof(hifn_mac_command_t);
+ }
+
+ if (using_crypt) {
+ cry_cmd = (hifn_crypt_command_t *)buf_pos;
+ dlen = cmd->enccrd->crd_len;
+ cry_cmd->source_count = htole16(dlen & 0xffff);
+ dlen >>= 16;
+ cry_cmd->masks = htole16(cmd->cry_masks |
+ ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
+ cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
+ cry_cmd->reserved = 0;
+ buf_pos += sizeof(hifn_crypt_command_t);
+ }
+
+ if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
+ bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
+ buf_pos += HIFN_MAC_KEY_LENGTH;
+ }
+
+ if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
+ switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
+ case HIFN_CRYPT_CMD_ALG_3DES:
+ bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
+ buf_pos += HIFN_3DES_KEY_LENGTH;
+ break;
+ case HIFN_CRYPT_CMD_ALG_DES:
+ bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
+ buf_pos += HIFN_DES_KEY_LENGTH;
+ break;
+ case HIFN_CRYPT_CMD_ALG_RC4:
+ len = 256;
+ do {
+ int clen;
+
+ clen = MIN(cmd->cklen, len);
+ bcopy(cmd->ck, buf_pos, clen);
+ len -= clen;
+ buf_pos += clen;
+ } while (len > 0);
+ bzero(buf_pos, 4);
+ buf_pos += 4;
+ break;
+ case HIFN_CRYPT_CMD_ALG_AES:
+ /*
+ * AES keys are variable 128, 192 and
+ * 256 bits (16, 24 and 32 bytes).
+ */
+ bcopy(cmd->ck, buf_pos, cmd->cklen);
+ buf_pos += cmd->cklen;
+ break;
+ }
+ }
+
+ if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
+ switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
+ case HIFN_CRYPT_CMD_ALG_AES:
+ ivlen = HIFN_AES_IV_LENGTH;
+ break;
+ default:
+ ivlen = HIFN_IV_LENGTH;
+ break;
+ }
+ bcopy(cmd->iv, buf_pos, ivlen);
+ buf_pos += ivlen;
+ }
+
+ if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
+ bzero(buf_pos, 8);
+ buf_pos += 8;
+ }
+
+ return (buf_pos - buf);
+}
+
+static int
+hifn_dmamap_aligned(struct hifn_operand *op)
+{
+ struct hifn_softc *sc = NULL;
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ for (i = 0; i < op->nsegs; i++) {
+ if (op->segs[i].ds_addr & 3)
+ return (0);
+ if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
+ return (0);
+ }
+ return (1);
+}
+
+static __inline int
+hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+
+ if (++idx == HIFN_D_DST_RSIZE) {
+ dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
+ HIFN_D_MASKDONEIRQ);
+ HIFN_DSTR_SYNC(sc, idx,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ idx = 0;
+ }
+ return (idx);
+}
+
+static int
+hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+ struct hifn_operand *dst = &cmd->dst;
+ u_int32_t p, l;
+ int idx, used = 0, i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ idx = dma->dsti;
+ for (i = 0; i < dst->nsegs - 1; i++) {
+ dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
+ dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
+ wmb();
+ dma->dstr[idx].l |= htole32(HIFN_D_VALID);
+ HIFN_DSTR_SYNC(sc, idx,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ used++;
+
+ idx = hifn_dmamap_dstwrap(sc, idx);
+ }
+
+ if (cmd->sloplen == 0) {
+ p = dst->segs[i].ds_addr;
+ l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
+ dst->segs[i].ds_len;
+ } else {
+ p = sc->sc_dma_physaddr +
+ offsetof(struct hifn_dma, slop[cmd->slopidx]);
+ l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
+ sizeof(u_int32_t);
+
+ if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
+ dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
+ dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
+ (dst->segs[i].ds_len - cmd->sloplen));
+ wmb();
+ dma->dstr[idx].l |= htole32(HIFN_D_VALID);
+ HIFN_DSTR_SYNC(sc, idx,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ used++;
+
+ idx = hifn_dmamap_dstwrap(sc, idx);
+ }
+ }
+ dma->dstr[idx].p = htole32(p);
+ dma->dstr[idx].l = htole32(l);
+ wmb();
+ dma->dstr[idx].l |= htole32(HIFN_D_VALID);
+ HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ used++;
+
+ idx = hifn_dmamap_dstwrap(sc, idx);
+
+ dma->dsti = idx;
+ dma->dstu += used;
+ return (idx);
+}
+
+static __inline int
+hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+
+ if (++idx == HIFN_D_SRC_RSIZE) {
+ dma->srcr[idx].l = htole32(HIFN_D_VALID |
+ HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
+ HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ idx = 0;
+ }
+ return (idx);
+}
+
+static int
+hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+ struct hifn_operand *src = &cmd->src;
+ int idx, i;
+ u_int32_t last = 0;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ idx = dma->srci;
+ for (i = 0; i < src->nsegs; i++) {
+ if (i == src->nsegs - 1)
+ last = HIFN_D_LAST;
+
+ dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
+ dma->srcr[idx].l = htole32(src->segs[i].ds_len |
+ HIFN_D_MASKDONEIRQ | last);
+ wmb();
+ dma->srcr[idx].l |= htole32(HIFN_D_VALID);
+ HIFN_SRCR_SYNC(sc, idx,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+
+ idx = hifn_dmamap_srcwrap(sc, idx);
+ }
+ dma->srci = idx;
+ dma->srcu += src->nsegs;
+ return (idx);
+}
+
+
+static int
+hifn_crypto(
+ struct hifn_softc *sc,
+ struct hifn_command *cmd,
+ struct cryptop *crp,
+ int hint)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+ u_int32_t cmdlen, csr;
+ int cmdi, resi, err = 0;
+ unsigned long l_flags;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ /*
+ * need 1 cmd, and 1 res
+ *
+ * NB: check this first since it's easy.
+ */
+ HIFN_LOCK(sc);
+ if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
+ (dma->resu + 1) > HIFN_D_RES_RSIZE) {
+#ifdef HIFN_DEBUG
+ if (hifn_debug) {
+ device_printf(sc->sc_dev,
+ "cmd/result exhaustion, cmdu %u resu %u\n",
+ dma->cmdu, dma->resu);
+ }
+#endif
+ hifnstats.hst_nomem_cr++;
+ sc->sc_needwakeup |= CRYPTO_SYMQ;
+ HIFN_UNLOCK(sc);
+ return (ERESTART);
+ }
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
+ hifnstats.hst_nomem_load++;
+ err = ENOMEM;
+ goto err_srcmap1;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
+ hifnstats.hst_nomem_load++;
+ err = ENOMEM;
+ goto err_srcmap1;
+ }
+ } else {
+ if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
+ hifnstats.hst_nomem_load++;
+ err = ENOMEM;
+ goto err_srcmap1;
+ }
+ }
+
+ if (hifn_dmamap_aligned(&cmd->src)) {
+ cmd->sloplen = cmd->src_mapsize & 3;
+ cmd->dst = cmd->src;
+ } else {
+ if (crp->crp_flags & CRYPTO_F_IOV) {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ err = EINVAL;
+ goto err_srcmap;
+ } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
+#ifdef NOTYET
+ int totlen, len;
+ struct mbuf *m, *m0, *mlast;
+
+ KASSERT(cmd->dst_m == cmd->src_m,
+ ("hifn_crypto: dst_m initialized improperly"));
+ hifnstats.hst_unaligned++;
+ /*
+ * Source is not aligned on a longword boundary.
+ * Copy the data to insure alignment. If we fail
+ * to allocate mbufs or clusters while doing this
+ * we return ERESTART so the operation is requeued
+ * at the crypto later, but only if there are
+ * ops already posted to the hardware; otherwise we
+ * have no guarantee that we'll be re-entered.
+ */
+ totlen = cmd->src_mapsize;
+ if (cmd->src_m->m_flags & M_PKTHDR) {
+ len = MHLEN;
+ MGETHDR(m0, M_DONTWAIT, MT_DATA);
+ if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
+ m_free(m0);
+ m0 = NULL;
+ }
+ } else {
+ len = MLEN;
+ MGET(m0, M_DONTWAIT, MT_DATA);
+ }
+ if (m0 == NULL) {
+ hifnstats.hst_nomem_mbuf++;
+ err = dma->cmdu ? ERESTART : ENOMEM;
+ goto err_srcmap;
+ }
+ if (totlen >= MINCLSIZE) {
+ MCLGET(m0, M_DONTWAIT);
+ if ((m0->m_flags & M_EXT) == 0) {
+ hifnstats.hst_nomem_mcl++;
+ err = dma->cmdu ? ERESTART : ENOMEM;
+ m_freem(m0);
+ goto err_srcmap;
+ }
+ len = MCLBYTES;
+ }
+ totlen -= len;
+ m0->m_pkthdr.len = m0->m_len = len;
+ mlast = m0;
+
+ while (totlen > 0) {
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ hifnstats.hst_nomem_mbuf++;
+ err = dma->cmdu ? ERESTART : ENOMEM;
+ m_freem(m0);
+ goto err_srcmap;
+ }
+ len = MLEN;
+ if (totlen >= MINCLSIZE) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ hifnstats.hst_nomem_mcl++;
+ err = dma->cmdu ? ERESTART : ENOMEM;
+ mlast->m_next = m;
+ m_freem(m0);
+ goto err_srcmap;
+ }
+ len = MCLBYTES;
+ }
+
+ m->m_len = len;
+ m0->m_pkthdr.len += len;
+ totlen -= len;
+
+ mlast->m_next = m;
+ mlast = m;
+ }
+ cmd->dst_m = m0;
+#else
+ device_printf(sc->sc_dev,
+ "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
+ __FILE__, __LINE__);
+ err = EINVAL;
+ goto err_srcmap;
+#endif
+ } else {
+ device_printf(sc->sc_dev,
+ "%s,%d: unaligned contig buffers not implemented\n",
+ __FILE__, __LINE__);
+ err = EINVAL;
+ goto err_srcmap;
+ }
+ }
+
+ if (cmd->dst_map == NULL) {
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
+ hifnstats.hst_nomem_map++;
+ err = ENOMEM;
+ goto err_dstmap1;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
+ hifnstats.hst_nomem_load++;
+ err = ENOMEM;
+ goto err_dstmap1;
+ }
+ } else {
+ if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
+ hifnstats.hst_nomem_load++;
+ err = ENOMEM;
+ goto err_dstmap1;
+ }
+ }
+ }
+
+#ifdef HIFN_DEBUG
+ if (hifn_debug) {
+ device_printf(sc->sc_dev,
+ "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
+ READ_REG_1(sc, HIFN_1_DMA_CSR),
+ READ_REG_1(sc, HIFN_1_DMA_IER),
+ dma->cmdu, dma->srcu, dma->dstu, dma->resu,
+ cmd->src_nsegs, cmd->dst_nsegs);
+ }
+#endif
+
+#if 0
+ if (cmd->src_map == cmd->dst_map) {
+ bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+ BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+ } else {
+ bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
+ BUS_DMASYNC_PREREAD);
+ }
+#endif
+
+ /*
+ * need N src, and N dst
+ */
+ if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
+ (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
+#ifdef HIFN_DEBUG
+ if (hifn_debug) {
+ device_printf(sc->sc_dev,
+ "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
+ dma->srcu, cmd->src_nsegs,
+ dma->dstu, cmd->dst_nsegs);
+ }
+#endif
+ hifnstats.hst_nomem_sd++;
+ err = ERESTART;
+ goto err_dstmap;
+ }
+
+ if (dma->cmdi == HIFN_D_CMD_RSIZE) {
+ dma->cmdi = 0;
+ dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+ wmb();
+ dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
+ HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ }
+ cmdi = dma->cmdi++;
+ cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
+ HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
+
+ /* .p for command/result already set */
+ dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
+ HIFN_D_MASKDONEIRQ);
+ wmb();
+ dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
+ HIFN_CMDR_SYNC(sc, cmdi,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ dma->cmdu++;
+
+ /*
+ * We don't worry about missing an interrupt (which a "command wait"
+ * interrupt salvages us from), unless there is more than one command
+ * in the queue.
+ */
+ if (dma->cmdu > 1) {
+ sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
+ WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+ }
+
+ hifnstats.hst_ipackets++;
+ hifnstats.hst_ibytes += cmd->src_mapsize;
+
+ hifn_dmamap_load_src(sc, cmd);
+
+ /*
+ * Unlike other descriptors, we don't mask done interrupt from
+ * result descriptor.
+ */
+#ifdef HIFN_DEBUG
+ if (hifn_debug)
+ device_printf(sc->sc_dev, "load res\n");
+#endif
+ if (dma->resi == HIFN_D_RES_RSIZE) {
+ dma->resi = 0;
+ dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+ wmb();
+ dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
+ HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
+ resi = dma->resi++;
+ KASSERT(dma->hifn_commands[resi] == NULL,
+ ("hifn_crypto: command slot %u busy", resi));
+ dma->hifn_commands[resi] = cmd;
+ HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
+ if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
+ dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
+ HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
+ wmb();
+ dma->resr[resi].l |= htole32(HIFN_D_VALID);
+ sc->sc_curbatch++;
+ if (sc->sc_curbatch > hifnstats.hst_maxbatch)
+ hifnstats.hst_maxbatch = sc->sc_curbatch;
+ hifnstats.hst_totbatch++;
+ } else {
+ dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
+ wmb();
+ dma->resr[resi].l |= htole32(HIFN_D_VALID);
+ sc->sc_curbatch = 0;
+ }
+ HIFN_RESR_SYNC(sc, resi,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ dma->resu++;
+
+ if (cmd->sloplen)
+ cmd->slopidx = resi;
+
+ hifn_dmamap_load_dst(sc, cmd);
+
+ csr = 0;
+ if (sc->sc_c_busy == 0) {
+ csr |= HIFN_DMACSR_C_CTRL_ENA;
+ sc->sc_c_busy = 1;
+ }
+ if (sc->sc_s_busy == 0) {
+ csr |= HIFN_DMACSR_S_CTRL_ENA;
+ sc->sc_s_busy = 1;
+ }
+ if (sc->sc_r_busy == 0) {
+ csr |= HIFN_DMACSR_R_CTRL_ENA;
+ sc->sc_r_busy = 1;
+ }
+ if (sc->sc_d_busy == 0) {
+ csr |= HIFN_DMACSR_D_CTRL_ENA;
+ sc->sc_d_busy = 1;
+ }
+ if (csr)
+ WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
+
+#ifdef HIFN_DEBUG
+ if (hifn_debug) {
+ device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
+ READ_REG_1(sc, HIFN_1_DMA_CSR),
+ READ_REG_1(sc, HIFN_1_DMA_IER));
+ }
+#endif
+
+ sc->sc_active = 5;
+ HIFN_UNLOCK(sc);
+ KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
+ return (err); /* success */
+
+err_dstmap:
+ if (cmd->src_map != cmd->dst_map)
+ pci_unmap_buf(sc, &cmd->dst);
+err_dstmap1:
+err_srcmap:
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ if (cmd->src_skb != cmd->dst_skb)
+#ifdef NOTYET
+ m_freem(cmd->dst_m);
+#else
+ device_printf(sc->sc_dev,
+ "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
+ __FILE__, __LINE__);
+#endif
+ }
+ pci_unmap_buf(sc, &cmd->src);
+err_srcmap1:
+ HIFN_UNLOCK(sc);
+ return (err);
+}
+
+static void
+hifn_tick(unsigned long arg)
+{
+ struct hifn_softc *sc;
+ unsigned long l_flags;
+
+ if (arg >= HIFN_MAX_CHIPS)
+ return;
+ sc = hifn_chip_idx[arg];
+ if (!sc)
+ return;
+
+ HIFN_LOCK(sc);
+ if (sc->sc_active == 0) {
+ struct hifn_dma *dma = sc->sc_dma;
+ u_int32_t r = 0;
+
+ if (dma->cmdu == 0 && sc->sc_c_busy) {
+ sc->sc_c_busy = 0;
+ r |= HIFN_DMACSR_C_CTRL_DIS;
+ }
+ if (dma->srcu == 0 && sc->sc_s_busy) {
+ sc->sc_s_busy = 0;
+ r |= HIFN_DMACSR_S_CTRL_DIS;
+ }
+ if (dma->dstu == 0 && sc->sc_d_busy) {
+ sc->sc_d_busy = 0;
+ r |= HIFN_DMACSR_D_CTRL_DIS;
+ }
+ if (dma->resu == 0 && sc->sc_r_busy) {
+ sc->sc_r_busy = 0;
+ r |= HIFN_DMACSR_R_CTRL_DIS;
+ }
+ if (r)
+ WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
+ } else
+ sc->sc_active--;
+ HIFN_UNLOCK(sc);
+ mod_timer(&sc->sc_tickto, jiffies + HZ);
+}
+
+static irqreturn_t
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+hifn_intr(int irq, void *arg)
+#else
+hifn_intr(int irq, void *arg, struct pt_regs *regs)
+#endif
+{
+ struct hifn_softc *sc = arg;
+ struct hifn_dma *dma;
+ u_int32_t dmacsr, restart;
+ int i, u;
+ unsigned long l_flags;
+
+ dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
+
+ /* Nothing in the DMA unit interrupted */
+ if ((dmacsr & sc->sc_dmaier) == 0)
+ return IRQ_NONE;
+
+ HIFN_LOCK(sc);
+
+ dma = sc->sc_dma;
+
+#ifdef HIFN_DEBUG
+ if (hifn_debug) {
+ device_printf(sc->sc_dev,
+ "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
+ dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
+ dma->cmdi, dma->srci, dma->dsti, dma->resi,
+ dma->cmdk, dma->srck, dma->dstk, dma->resk,
+ dma->cmdu, dma->srcu, dma->dstu, dma->resu);
+ }
+#endif
+
+ WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
+
+ if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
+ (dmacsr & HIFN_DMACSR_PUBDONE))
+ WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
+ READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
+
+ restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
+ if (restart)
+ device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
+
+ if (sc->sc_flags & HIFN_IS_7811) {
+ if (dmacsr & HIFN_DMACSR_ILLR)
+ device_printf(sc->sc_dev, "illegal read\n");
+ if (dmacsr & HIFN_DMACSR_ILLW)
+ device_printf(sc->sc_dev, "illegal write\n");
+ }
+
+ restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
+ HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
+ if (restart) {
+ device_printf(sc->sc_dev, "abort, resetting.\n");
+ hifnstats.hst_abort++;
+ hifn_abort(sc);
+ HIFN_UNLOCK(sc);
+ return IRQ_HANDLED;
+ }
+
+ if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
+ /*
+ * If no slots to process and we receive a "waiting on
+ * command" interrupt, we disable the "waiting on command"
+ * (by clearing it).
+ */
+ sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
+ WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+ }
+
+ /* clear the rings */
+ i = dma->resk; u = dma->resu;
+ while (u != 0) {
+ HIFN_RESR_SYNC(sc, i,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
+ HIFN_RESR_SYNC(sc, i,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ break;
+ }
+
+ if (i != HIFN_D_RES_RSIZE) {
+ struct hifn_command *cmd;
+ u_int8_t *macbuf = NULL;
+
+ HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
+ cmd = dma->hifn_commands[i];
+ KASSERT(cmd != NULL,
+ ("hifn_intr: null command slot %u", i));
+ dma->hifn_commands[i] = NULL;
+
+ if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
+ macbuf = dma->result_bufs[i];
+ macbuf += 12;
+ }
+
+ hifn_callback(sc, cmd, macbuf);
+ hifnstats.hst_opackets++;
+ u--;
+ }
+
+ if (++i == (HIFN_D_RES_RSIZE + 1))
+ i = 0;
+ }
+ dma->resk = i; dma->resu = u;
+
+ i = dma->srck; u = dma->srcu;
+ while (u != 0) {
+ if (i == HIFN_D_SRC_RSIZE)
+ i = 0;
+ HIFN_SRCR_SYNC(sc, i,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
+ HIFN_SRCR_SYNC(sc, i,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ break;
+ }
+ i++, u--;
+ }
+ dma->srck = i; dma->srcu = u;
+
+ i = dma->cmdk; u = dma->cmdu;
+ while (u != 0) {
+ HIFN_CMDR_SYNC(sc, i,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
+ HIFN_CMDR_SYNC(sc, i,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ break;
+ }
+ if (i != HIFN_D_CMD_RSIZE) {
+ u--;
+ HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
+ }
+ if (++i == (HIFN_D_CMD_RSIZE + 1))
+ i = 0;
+ }
+ dma->cmdk = i; dma->cmdu = u;
+
+ HIFN_UNLOCK(sc);
+
+ if (sc->sc_needwakeup) { /* XXX check high watermark */
+ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
+#ifdef HIFN_DEBUG
+ if (hifn_debug)
+ device_printf(sc->sc_dev,
+ "wakeup crypto (%x) u %d/%d/%d/%d\n",
+ sc->sc_needwakeup,
+ dma->cmdu, dma->srcu, dma->dstu, dma->resu);
+#endif
+ sc->sc_needwakeup &= ~wakeup;
+ crypto_unblock(sc->sc_cid, wakeup);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Allocate a new 'session' and return an encoded session id. 'sidp'
+ * contains our registration id, and should contain an encoded session
+ * id on successful allocation.
+ */
+static int
+hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+ struct hifn_softc *sc = device_get_softc(dev);
+ struct cryptoini *c;
+ int mac = 0, cry = 0, sesn;
+ struct hifn_session *ses = NULL;
+ unsigned long l_flags;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ KASSERT(sc != NULL, ("hifn_newsession: null softc"));
+ if (sidp == NULL || cri == NULL || sc == NULL) {
+ DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
+ return (EINVAL);
+ }
+
+ HIFN_LOCK(sc);
+ if (sc->sc_sessions == NULL) {
+ ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
+ SLAB_ATOMIC);
+ if (ses == NULL) {
+ HIFN_UNLOCK(sc);
+ return (ENOMEM);
+ }
+ sesn = 0;
+ sc->sc_nsessions = 1;
+ } else {
+ for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+ if (!sc->sc_sessions[sesn].hs_used) {
+ ses = &sc->sc_sessions[sesn];
+ break;
+ }
+ }
+
+ if (ses == NULL) {
+ sesn = sc->sc_nsessions;
+ ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
+ SLAB_ATOMIC);
+ if (ses == NULL) {
+ HIFN_UNLOCK(sc);
+ return (ENOMEM);
+ }
+ bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
+ bzero(sc->sc_sessions, sesn * sizeof(*ses));
+ kfree(sc->sc_sessions);
+ sc->sc_sessions = ses;
+ ses = &sc->sc_sessions[sesn];
+ sc->sc_nsessions++;
+ }
+ }
+ HIFN_UNLOCK(sc);
+
+ bzero(ses, sizeof(*ses));
+ ses->hs_used = 1;
+
+ for (c = cri; c != NULL; c = c->cri_next) {
+ switch (c->cri_alg) {
+ case CRYPTO_MD5:
+ case CRYPTO_SHA1:
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ if (mac) {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ return (EINVAL);
+ }
+ mac = 1;
+ ses->hs_mlen = c->cri_mlen;
+ if (ses->hs_mlen == 0) {
+ switch (c->cri_alg) {
+ case CRYPTO_MD5:
+ case CRYPTO_MD5_HMAC:
+ ses->hs_mlen = 16;
+ break;
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ ses->hs_mlen = 20;
+ break;
+ }
+ }
+ break;
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ case CRYPTO_AES_CBC:
+ case CRYPTO_ARC4:
+ if (cry) {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ return (EINVAL);
+ }
+ cry = 1;
+ break;
+ default:
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ return (EINVAL);
+ }
+ }
+ if (mac == 0 && cry == 0) {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ return (EINVAL);
+ }
+
+ *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
+
+ return (0);
+}
+
+/*
+ * Deallocate a session.
+ * XXX this routine should run a zero'd mac/encrypt key into context ram.
+ * XXX to blow away any keys already stored there.
+ */
+static int
+hifn_freesession(device_t dev, u_int64_t tid)
+{
+ struct hifn_softc *sc = device_get_softc(dev);
+ int session, error;
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+ unsigned long l_flags;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ KASSERT(sc != NULL, ("hifn_freesession: null softc"));
+ if (sc == NULL) {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ return (EINVAL);
+ }
+
+ HIFN_LOCK(sc);
+ session = HIFN_SESSION(sid);
+ if (session < sc->sc_nsessions) {
+ bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
+ error = 0;
+ } else {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ error = EINVAL;
+ }
+ HIFN_UNLOCK(sc);
+
+ return (error);
+}
+
+static int
+hifn_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct hifn_softc *sc = device_get_softc(dev);
+ struct hifn_command *cmd = NULL;
+ int session, err, ivlen;
+ struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (crp == NULL || crp->crp_callback == NULL) {
+ hifnstats.hst_invalid++;
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ return (EINVAL);
+ }
+ session = HIFN_SESSION(crp->crp_sid);
+
+ if (sc == NULL || session >= sc->sc_nsessions) {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ err = EINVAL;
+ goto errout;
+ }
+
+ cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
+ if (cmd == NULL) {
+ hifnstats.hst_nomem++;
+ err = ENOMEM;
+ goto errout;
+ }
+ memset(cmd, 0, sizeof(*cmd));
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ cmd->src_skb = (struct sk_buff *)crp->crp_buf;
+ cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ cmd->src_io = (struct uio *)crp->crp_buf;
+ cmd->dst_io = (struct uio *)crp->crp_buf;
+ } else {
+ cmd->src_buf = crp->crp_buf;
+ cmd->dst_buf = crp->crp_buf;
+ }
+
+ crd1 = crp->crp_desc;
+ if (crd1 == NULL) {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ err = EINVAL;
+ goto errout;
+ }
+ crd2 = crd1->crd_next;
+
+ if (crd2 == NULL) {
+ if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1 ||
+ crd1->crd_alg == CRYPTO_MD5) {
+ maccrd = crd1;
+ enccrd = NULL;
+ } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+ crd1->crd_alg == CRYPTO_3DES_CBC ||
+ crd1->crd_alg == CRYPTO_AES_CBC ||
+ crd1->crd_alg == CRYPTO_ARC4) {
+ if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
+ cmd->base_masks |= HIFN_BASE_CMD_DECODE;
+ maccrd = NULL;
+ enccrd = crd1;
+ } else {
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ err = EINVAL;
+ goto errout;
+ }
+ } else {
+ if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd1->crd_alg == CRYPTO_MD5 ||
+ crd1->crd_alg == CRYPTO_SHA1) &&
+ (crd2->crd_alg == CRYPTO_DES_CBC ||
+ crd2->crd_alg == CRYPTO_3DES_CBC ||
+ crd2->crd_alg == CRYPTO_AES_CBC ||
+ crd2->crd_alg == CRYPTO_ARC4) &&
+ ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+ cmd->base_masks = HIFN_BASE_CMD_DECODE;
+ maccrd = crd1;
+ enccrd = crd2;
+ } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+ crd1->crd_alg == CRYPTO_ARC4 ||
+ crd1->crd_alg == CRYPTO_3DES_CBC ||
+ crd1->crd_alg == CRYPTO_AES_CBC) &&
+ (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+ crd2->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd2->crd_alg == CRYPTO_MD5 ||
+ crd2->crd_alg == CRYPTO_SHA1) &&
+ (crd1->crd_flags & CRD_F_ENCRYPT)) {
+ enccrd = crd1;
+ maccrd = crd2;
+ } else {
+ /*
+ * We cannot order the 7751 as requested
+ */
+ DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
+ err = EINVAL;
+ goto errout;
+ }
+ }
+
+ if (enccrd) {
+ cmd->enccrd = enccrd;
+ cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
+ switch (enccrd->crd_alg) {
+ case CRYPTO_ARC4:
+ cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
+ break;
+ case CRYPTO_DES_CBC:
+ cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
+ HIFN_CRYPT_CMD_MODE_CBC |
+ HIFN_CRYPT_CMD_NEW_IV;
+ break;
+ case CRYPTO_3DES_CBC:
+ cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
+ HIFN_CRYPT_CMD_MODE_CBC |
+ HIFN_CRYPT_CMD_NEW_IV;
+ break;
+ case CRYPTO_AES_CBC:
+ cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
+ HIFN_CRYPT_CMD_MODE_CBC |
+ HIFN_CRYPT_CMD_NEW_IV;
+ break;
+ default:
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ err = EINVAL;
+ goto errout;
+ }
+ if (enccrd->crd_alg != CRYPTO_ARC4) {
+ ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
+ HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
+ if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ bcopy(enccrd->crd_iv, cmd->iv, ivlen);
+ else
+ read_random(cmd->iv, ivlen);
+
+ if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
+ == 0) {
+ crypto_copyback(crp->crp_flags,
+ crp->crp_buf, enccrd->crd_inject,
+ ivlen, cmd->iv);
+ }
+ } else {
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ bcopy(enccrd->crd_iv, cmd->iv, ivlen);
+ else {
+ crypto_copydata(crp->crp_flags,
+ crp->crp_buf, enccrd->crd_inject,
+ ivlen, cmd->iv);
+ }
+ }
+ }
+
+ if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
+ cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
+ cmd->ck = enccrd->crd_key;
+ cmd->cklen = enccrd->crd_klen >> 3;
+ cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
+
+ /*
+ * Need to specify the size for the AES key in the masks.
+ */
+ if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
+ HIFN_CRYPT_CMD_ALG_AES) {
+ switch (cmd->cklen) {
+ case 16:
+ cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
+ break;
+ case 24:
+ cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
+ break;
+ case 32:
+ cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
+ break;
+ default:
+ DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+ err = EINVAL;
+ goto errout;
+ }
+ }
+ }
+
+ if (maccrd) {
+ cmd->maccrd = maccrd;
+ cmd->base_masks |= HIFN_BASE_CMD_MAC;
+
+ switch (maccrd->crd_alg) {
+ case CRYPTO_MD5:
+ cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
+ HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
+ HIFN_MAC_CMD_POS_IPSEC;
+ break;
+ case CRYPTO_MD5_HMAC:
+ cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
+ HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
+ HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
+ break;
+ case CRYPTO_SHA1:
+ cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
+ HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
+ HIFN_MAC_CMD_POS_IPSEC;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
+ HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
+ HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
+ break;
+ }
+
+ if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
+ maccrd->crd_alg == CRYPTO_MD5_HMAC) {
+ cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
+ bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
+ bzero(cmd->mac + (maccrd->crd_klen >> 3),
+ HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
+ }
+ }
+
+ cmd->crp = crp;
+ cmd->session_num = session;
+ cmd->softc = sc;
+
+ err = hifn_crypto(sc, cmd, crp, hint);
+ if (!err) {
+ return 0;
+ } else if (err == ERESTART) {
+ /*
+ * There weren't enough resources to dispatch the request
+ * to the part. Notify the caller so they'll requeue this
+ * request and resubmit it again soon.
+ */
+#ifdef HIFN_DEBUG
+ if (hifn_debug)
+ device_printf(sc->sc_dev, "requeue request\n");
+#endif
+ kfree(cmd);
+ sc->sc_needwakeup |= CRYPTO_SYMQ;
+ return (err);
+ }
+
+errout:
+ if (cmd != NULL)
+ kfree(cmd);
+ if (err == EINVAL)
+ hifnstats.hst_invalid++;
+ else
+ hifnstats.hst_nomem++;
+ crp->crp_etype = err;
+ crypto_done(crp);
+ return (err);
+}
+
+static void
+hifn_abort(struct hifn_softc *sc)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+ struct hifn_command *cmd;
+ struct cryptop *crp;
+ int i, u;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ i = dma->resk; u = dma->resu;
+ while (u != 0) {
+ cmd = dma->hifn_commands[i];
+ KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
+ dma->hifn_commands[i] = NULL;
+ crp = cmd->crp;
+
+ if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
+ /* Salvage what we can. */
+ u_int8_t *macbuf;
+
+ if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
+ macbuf = dma->result_bufs[i];
+ macbuf += 12;
+ } else
+ macbuf = NULL;
+ hifnstats.hst_opackets++;
+ hifn_callback(sc, cmd, macbuf);
+ } else {
+#if 0
+ if (cmd->src_map == cmd->dst_map) {
+ bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ } else {
+ bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
+ BUS_DMASYNC_POSTREAD);
+ }
+#endif
+
+ if (cmd->src_skb != cmd->dst_skb) {
+#ifdef NOTYET
+ m_freem(cmd->src_m);
+ crp->crp_buf = (caddr_t)cmd->dst_m;
+#else
+ device_printf(sc->sc_dev,
+ "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
+ __FILE__, __LINE__);
+#endif
+ }
+
+ /* non-shared buffers cannot be restarted */
+ if (cmd->src_map != cmd->dst_map) {
+ /*
+ * XXX should be EAGAIN, delayed until
+ * after the reset.
+ */
+ crp->crp_etype = ENOMEM;
+ pci_unmap_buf(sc, &cmd->dst);
+ } else
+ crp->crp_etype = ENOMEM;
+
+ pci_unmap_buf(sc, &cmd->src);
+
+ kfree(cmd);
+ if (crp->crp_etype != EAGAIN)
+ crypto_done(crp);
+ }
+
+ if (++i == HIFN_D_RES_RSIZE)
+ i = 0;
+ u--;
+ }
+ dma->resk = i; dma->resu = u;
+
+ hifn_reset_board(sc, 1);
+ hifn_init_dma(sc);
+ hifn_init_pci_registers(sc);
+}
+
+static void
+hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
+{
+ struct hifn_dma *dma = sc->sc_dma;
+ struct cryptop *crp = cmd->crp;
+ struct cryptodesc *crd;
+ int i, u;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+#if 0
+ if (cmd->src_map == cmd->dst_map) {
+ bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+ BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
+ } else {
+ bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
+ BUS_DMASYNC_POSTREAD);
+ }
+#endif
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ if (cmd->src_skb != cmd->dst_skb) {
+#ifdef NOTYET
+ crp->crp_buf = (caddr_t)cmd->dst_m;
+ totlen = cmd->src_mapsize;
+ for (m = cmd->dst_m; m != NULL; m = m->m_next) {
+ if (totlen < m->m_len) {
+ m->m_len = totlen;
+ totlen = 0;
+ } else
+ totlen -= m->m_len;
+ }
+ cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
+ m_freem(cmd->src_m);
+#else
+ device_printf(sc->sc_dev,
+ "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
+ __FILE__, __LINE__);
+#endif
+ }
+ }
+
+ if (cmd->sloplen != 0) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
+ (caddr_t)&dma->slop[cmd->slopidx]);
+ }
+
+ i = dma->dstk; u = dma->dstu;
+ while (u != 0) {
+ if (i == HIFN_D_DST_RSIZE)
+ i = 0;
+#if 0
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+#endif
+ if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
+#if 0
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#endif
+ break;
+ }
+ i++, u--;
+ }
+ dma->dstk = i; dma->dstu = u;
+
+ hifnstats.hst_obytes += cmd->dst_mapsize;
+
+ if (macbuf != NULL) {
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+ int len;
+
+ if (crd->crd_alg != CRYPTO_MD5 &&
+ crd->crd_alg != CRYPTO_SHA1 &&
+ crd->crd_alg != CRYPTO_MD5_HMAC &&
+ crd->crd_alg != CRYPTO_SHA1_HMAC) {
+ continue;
+ }
+ len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, len, macbuf);
+ break;
+ }
+ }
+
+ if (cmd->src_map != cmd->dst_map)
+ pci_unmap_buf(sc, &cmd->dst);
+ pci_unmap_buf(sc, &cmd->src);
+ kfree(cmd);
+ crypto_done(crp);
+}
+
+/*
+ * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
+ * and Group 1 registers; avoid conditions that could create
+ * burst writes by doing a read in between the writes.
+ *
+ * NB: The read we interpose is always to the same register;
+ * we do this because reading from an arbitrary (e.g. last)
+ * register may not always work.
+ */
+static void
+hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
+{
+ if (sc->sc_flags & HIFN_IS_7811) {
+ if (sc->sc_bar0_lastreg == reg - 4)
+ readl(sc->sc_bar0 + HIFN_0_PUCNFG);
+ sc->sc_bar0_lastreg = reg;
+ }
+ writel(val, sc->sc_bar0 + reg);
+}
+
+static void
+hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
+{
+ if (sc->sc_flags & HIFN_IS_7811) {
+ if (sc->sc_bar1_lastreg == reg - 4)
+ readl(sc->sc_bar1 + HIFN_1_REVID);
+ sc->sc_bar1_lastreg = reg;
+ }
+ writel(val, sc->sc_bar1 + reg);
+}
+
+
+static struct pci_device_id hifn_pci_tbl[] = {
+ { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ /*
+ * Other vendors share this PCI ID as well, such as
+ * http://www.powercrypt.com, and obviously they also
+ * use the same key.
+ */
+ { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { 0, 0, 0, 0, 0, 0, }
+};
+MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
+
+static struct pci_driver hifn_driver = {
+ .name = "hifn",
+ .id_table = hifn_pci_tbl,
+ .probe = hifn_probe,
+ .remove = hifn_remove,
+ /* add PM stuff here one day */
+};
+
+static int __init hifn_init (void)
+{
+ struct hifn_softc *sc = NULL;
+ int rc;
+
+ DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
+
+ rc = pci_register_driver(&hifn_driver);
+ pci_register_driver_compat(&hifn_driver, rc);
+
+ return rc;
+}
+
+static void __exit hifn_exit (void)
+{
+ pci_unregister_driver(&hifn_driver);
+}
+
+module_init(hifn_init);
+module_exit(hifn_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
diff --git a/target/linux/generic/files/crypto/ocf/hifn/hifn7751reg.h b/target/linux/generic/files/crypto/ocf/hifn/hifn7751reg.h
new file mode 100644
index 000000000..ccf54f9c4
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/hifn/hifn7751reg.h
@@ -0,0 +1,540 @@
+/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
+/* $OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $ */
+
+/*-
+ * Invertex AEON / Hifn 7751 driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ * Copyright (c) 1999 Theo de Raadt
+ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
+ * http://www.netsec.net
+ *
+ * Please send any comments, feedback, bug-fixes, or feature requests to
+ * software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+#ifndef __HIFN_H__
+#define __HIFN_H__
+
+/*
+ * Some PCI configuration space offset defines. The names were made
+ * identical to the names used by the Linux kernel.
+ */
+#define HIFN_BAR0 PCIR_BAR(0) /* PUC register map */
+#define HIFN_BAR1 PCIR_BAR(1) /* DMA register map */
+#define HIFN_TRDY_TIMEOUT 0x40
+#define HIFN_RETRY_TIMEOUT 0x41
+
+/*
+ * PCI vendor and device identifiers
+ * (the names are preserved from their OpenBSD source).
+ */
+#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
+#define PCI_PRODUCT_HIFN_7751 0x0005 /* 7751 */
+#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
+#define PCI_PRODUCT_HIFN_7811 0x0007 /* 7811 */
+#define PCI_PRODUCT_HIFN_7855 0x001f /* 7855 */
+#define PCI_PRODUCT_HIFN_7951 0x0012 /* 7951 */
+#define PCI_PRODUCT_HIFN_7955 0x0020 /* 7954/7955 */
+#define PCI_PRODUCT_HIFN_7956 0x001d /* 7956 */
+
+#define PCI_VENDOR_INVERTEX 0x14e1 /* Invertex */
+#define PCI_PRODUCT_INVERTEX_AEON 0x0005 /* AEON */
+
+#define PCI_VENDOR_NETSEC 0x1660 /* NetSec */
+#define PCI_PRODUCT_NETSEC_7751 0x7751 /* 7751 */
+
+/*
+ * The values below should multiple of 4 -- and be large enough to handle
+ * any command the driver implements.
+ *
+ * MAX_COMMAND = base command + mac command + encrypt command +
+ * mac-key + rc4-key
+ * MAX_RESULT = base result + mac result + mac + encrypt result
+ *
+ *
+ */
+#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
+#define HIFN_MAX_RESULT (8 + 4 + 20 + 4)
+
+/*
+ * hifn_desc_t
+ *
+ * Holds an individual descriptor for any of the rings.
+ */
+typedef struct hifn_desc {
+ volatile u_int32_t l; /* length and status bits */
+ volatile u_int32_t p;
+} hifn_desc_t;
+
+/*
+ * Masks for the "length" field of struct hifn_desc.
+ */
+#define HIFN_D_LENGTH 0x0000ffff /* length bit mask */
+#define HIFN_D_MASKDONEIRQ 0x02000000 /* mask the done interrupt */
+#define HIFN_D_DESTOVER 0x04000000 /* destination overflow */
+#define HIFN_D_OVER 0x08000000 /* overflow */
+#define HIFN_D_LAST 0x20000000 /* last descriptor in chain */
+#define HIFN_D_JUMP 0x40000000 /* jump descriptor */
+#define HIFN_D_VALID 0x80000000 /* valid bit */
+
+
+/*
+ * Processing Unit Registers (offset from BASEREG0)
+ */
+#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
+#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
+#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
+#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
+#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
+#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
+#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
+#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
+#define HIFN_0_PUCTRL2 0x28 /* Processing Unit Control (2nd map) */
+#define HIFN_0_MUTE1 0x80
+#define HIFN_0_MUTE2 0x90
+#define HIFN_0_SPACESIZE 0x100 /* Register space size */
+
+/* Processing Unit Control Register (HIFN_0_PUCTRL) */
+#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
+#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
+#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
+#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
+#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
+
+/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
+#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
+#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
+#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
+#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
+#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
+#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
+#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
+#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
+#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
+#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
+
+/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
+#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
+#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
+#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
+#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
+#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
+#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
+#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
+#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
+#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
+#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
+#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
+#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
+#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
+#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
+#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
+#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
+#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
+#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
+#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
+#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
+#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
+#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
+#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
+
+/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
+#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
+#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
+#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
+#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
+#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
+#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
+#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
+#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
+#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
+#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
+
+/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
+#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
+#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
+#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
+#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
+#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
+#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
+#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
+#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
+#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
+#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
+#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
+#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
+#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
+#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
+#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
+#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
+#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
+
+/* FIFO Status Register (HIFN_0_FIFOSTAT) */
+#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
+#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
+
+/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
+#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as this value */
+
+/*
+ * DMA Interface Registers (offset from BASEREG1)
+ */
+#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
+#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
+#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
+#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
+#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
+#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
+#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
+#define HIFN_1_PLL 0x4c /* 7955/7956: PLL config */
+#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
+#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
+#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
+#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
+#define HIFN_1_DMA_CNFG2 0x6c /* 7955/7956: dma config #2 */
+#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
+#define HIFN_1_REVID 0x98 /* Revision ID */
+
+#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
+#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
+#define HIFN_1_PUB_OPLEN 0x304 /* 7951-compat Public Operand Length */
+#define HIFN_1_PUB_OP 0x308 /* 7951-compat Public Operand */
+#define HIFN_1_PUB_STATUS 0x30c /* 7951-compat Public Status */
+#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
+#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
+#define HIFN_1_RNG_DATA 0x318 /* RNG data */
+#define HIFN_1_PUB_MODE 0x320 /* PK mode */
+#define HIFN_1_PUB_FIFO_OPLEN 0x380 /* first element of oplen fifo */
+#define HIFN_1_PUB_FIFO_OP 0x384 /* first element of op fifo */
+#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
+#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
+
+/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
+#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
+#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
+#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
+#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
+#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
+#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
+#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
+#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
+#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
+#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
+#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
+#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
+#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
+#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
+#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
+#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
+#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
+#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
+#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
+#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
+#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
+#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
+#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
+#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
+#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
+#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
+#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
+#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
+#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
+#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
+#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
+#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
+#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
+#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
+#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
+#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
+#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
+#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
+
+/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
+#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
+#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
+#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
+#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
+#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
+#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
+#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
+#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
+#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
+#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
+#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
+#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
+#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
+#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
+#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
+#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
+#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
+#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
+#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
+#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
+#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
+#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
+
+/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
+#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
+#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
+#define HIFN_DMACNFG_UNLOCK 0x00000800
+#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
+#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
+#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
+#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
+#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
+
+/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
+#define HIFN_DMACNFG2_PKSWAP32 (1 << 19) /* swap the OPLEN/OP reg */
+#define HIFN_DMACNFG2_PKSWAP8 (1 << 18) /* swap the bits of OPLEN/OP */
+#define HIFN_DMACNFG2_BAR0_SWAP32 (1<<17) /* swap the bytes of BAR0 */
+#define HIFN_DMACNFG2_BAR1_SWAP8 (1<<16) /* swap the bits of BAR0 */
+#define HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
+#define HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
+#define HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
+#define HIFN_DMACNFG2_TGT_READ_BURST_SHIFT 0
+
+/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
+#define HIFN_7811_RNGENA_ENA 0x00000001 /* enable RNG */
+
+/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
+#define HIFN_7811_RNGCFG_PRE1 0x00000f00 /* first prescalar */
+#define HIFN_7811_RNGCFG_OPRE 0x00000080 /* output prescalar */
+#define HIFN_7811_RNGCFG_DEFL 0x00000f80 /* 2 words/ 1/100 sec */
+
+/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
+#define HIFN_7811_RNGSTS_RDY 0x00004000 /* two numbers in FIFO */
+#define HIFN_7811_RNGSTS_UFL 0x00001000 /* rng underflow */
+
+/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
+#define HIFN_MIPSRST_BAR2SIZE 0xffff0000 /* sdram size */
+#define HIFN_MIPSRST_GPRAMINIT 0x00008000 /* gpram can be accessed */
+#define HIFN_MIPSRST_CRAMINIT 0x00004000 /* ctxram can be accessed */
+#define HIFN_MIPSRST_LED2 0x00000400 /* external LED2 */
+#define HIFN_MIPSRST_LED1 0x00000200 /* external LED1 */
+#define HIFN_MIPSRST_LED0 0x00000100 /* external LED0 */
+#define HIFN_MIPSRST_MIPSDIS 0x00000004 /* disable MIPS */
+#define HIFN_MIPSRST_MIPSRST 0x00000002 /* warm reset MIPS */
+#define HIFN_MIPSRST_MIPSCOLD 0x00000001 /* cold reset MIPS */
+
+/* Public key reset register (HIFN_1_PUB_RESET) */
+#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
+
+/* Public operation register (HIFN_1_PUB_OP) */
+#define HIFN_PUBOP_AOFFSET 0x0000003e /* A offset */
+#define HIFN_PUBOP_BOFFSET 0x00000fc0 /* B offset */
+#define HIFN_PUBOP_MOFFSET 0x0003f000 /* M offset */
+#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
+#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
+#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
+#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
+#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
+#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
+#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
+#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
+#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
+#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
+#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
+#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
+#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular Red */
+#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular Exp */
+
+/* Public operand length register (HIFN_1_PUB_OPLEN) */
+#define HIFN_PUBOPLEN_MODLEN 0x0000007f
+#define HIFN_PUBOPLEN_EXPLEN 0x0003ff80
+#define HIFN_PUBOPLEN_REDLEN 0x003c0000
+
+/* Public status register (HIFN_1_PUB_STATUS) */
+#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
+#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
+#define HIFN_PUBSTS_FIFO_EMPTY 0x00000100 /* fifo empty */
+#define HIFN_PUBSTS_FIFO_FULL 0x00000200 /* fifo full */
+#define HIFN_PUBSTS_FIFO_OVFL 0x00000400 /* fifo overflow */
+#define HIFN_PUBSTS_FIFO_WRITE 0x000f0000 /* fifo write */
+#define HIFN_PUBSTS_FIFO_READ 0x0f000000 /* fifo read */
+
+/* Public interrupt enable register (HIFN_1_PUB_IEN) */
+#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
+
+/* Random number generator config register (HIFN_1_RNG_CONFIG) */
+#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
+
+/*
+ * Register offsets in register set 1
+ */
+
+#define HIFN_UNLOCK_SECRET1 0xf4
+#define HIFN_UNLOCK_SECRET2 0xfc
+
+/*
+ * PLL config register
+ *
+ * This register is present only on 7954/7955/7956 parts. It must be
+ * programmed according to the bus interface method used by the h/w.
+ * Note that the parts require a stable clock. Since the PCI clock
+ * may vary the reference clock must usually be used. To avoid
+ * overclocking the core logic, setup must be done carefully, refer
+ * to the driver for details. The exact multiplier required varies
+ * by part and system configuration; refer to the Hifn documentation.
+ */
+#define HIFN_PLL_REF_SEL 0x00000001 /* REF/HBI clk selection */
+#define HIFN_PLL_BP 0x00000002 /* bypass (used during setup) */
+/* bit 2 reserved */
+#define HIFN_PLL_PK_CLK_SEL 0x00000008 /* public key clk select */
+#define HIFN_PLL_PE_CLK_SEL 0x00000010 /* packet engine clk select */
+/* bits 5-9 reserved */
+#define HIFN_PLL_MBSET 0x00000400 /* must be set to 1 */
+#define HIFN_PLL_ND 0x00003800 /* Fpll_ref multiplier select */
+#define HIFN_PLL_ND_SHIFT 11
+#define HIFN_PLL_ND_2 0x00000000 /* 2x */
+#define HIFN_PLL_ND_4 0x00000800 /* 4x */
+#define HIFN_PLL_ND_6 0x00001000 /* 6x */
+#define HIFN_PLL_ND_8 0x00001800 /* 8x */
+#define HIFN_PLL_ND_10 0x00002000 /* 10x */
+#define HIFN_PLL_ND_12 0x00002800 /* 12x */
+/* bits 14-15 reserved */
+#define HIFN_PLL_IS 0x00010000 /* charge pump current select */
+/* bits 17-31 reserved */
+
+/*
+ * Board configuration specifies only these bits.
+ */
+#define HIFN_PLL_CONFIG (HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
+
+/*
+ * Public Key Engine Mode Register
+ */
+#define HIFN_PKMODE_HOSTINVERT (1 << 0) /* HOST INVERT */
+#define HIFN_PKMODE_ENHANCED (1 << 1) /* Enable enhanced mode */
+
+
+/*********************************************************************
+ * Structs for board commands
+ *
+ *********************************************************************/
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct hifn_base_command {
+ volatile u_int16_t masks;
+ volatile u_int16_t session_num;
+ volatile u_int16_t total_source_count;
+ volatile u_int16_t total_dest_count;
+} hifn_base_command_t;
+
+#define HIFN_BASE_CMD_MAC 0x0400
+#define HIFN_BASE_CMD_CRYPT 0x0800
+#define HIFN_BASE_CMD_DECODE 0x2000
+#define HIFN_BASE_CMD_SRCLEN_M 0xc000
+#define HIFN_BASE_CMD_SRCLEN_S 14
+#define HIFN_BASE_CMD_DSTLEN_M 0x3000
+#define HIFN_BASE_CMD_DSTLEN_S 12
+#define HIFN_BASE_CMD_LENMASK_HI 0x30000
+#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct hifn_crypt_command {
+ volatile u_int16_t masks;
+ volatile u_int16_t header_skip;
+ volatile u_int16_t source_count;
+ volatile u_int16_t reserved;
+} hifn_crypt_command_t;
+
+#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
+#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
+#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
+#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
+#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
+#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
+#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
+#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
+#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
+#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
+#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
+#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
+#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
+
+#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
+#define HIFN_CRYPT_CMD_SRCLEN_S 14
+
+#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
+#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
+#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
+#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct hifn_mac_command {
+ volatile u_int16_t masks;
+ volatile u_int16_t header_skip;
+ volatile u_int16_t source_count;
+ volatile u_int16_t reserved;
+} hifn_mac_command_t;
+
+#define HIFN_MAC_CMD_ALG_MASK 0x0001
+#define HIFN_MAC_CMD_ALG_SHA1 0x0000
+#define HIFN_MAC_CMD_ALG_MD5 0x0001
+#define HIFN_MAC_CMD_MODE_MASK 0x000c
+#define HIFN_MAC_CMD_MODE_HMAC 0x0000
+#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
+#define HIFN_MAC_CMD_MODE_HASH 0x0008
+#define HIFN_MAC_CMD_MODE_FULL 0x0004
+#define HIFN_MAC_CMD_TRUNC 0x0010
+#define HIFN_MAC_CMD_RESULT 0x0020
+#define HIFN_MAC_CMD_APPEND 0x0040
+#define HIFN_MAC_CMD_SRCLEN_M 0xc000
+#define HIFN_MAC_CMD_SRCLEN_S 14
+
+/*
+ * MAC POS IPsec initiates authentication after encryption on encodes
+ * and before decryption on decodes.
+ */
+#define HIFN_MAC_CMD_POS_IPSEC 0x0200
+#define HIFN_MAC_CMD_NEW_KEY 0x0800
+
+/*
+ * The poll frequency and poll scalar defines are unshifted values used
+ * to set fields in the DMA Configuration Register.
+ */
+#ifndef HIFN_POLL_FREQUENCY
+#define HIFN_POLL_FREQUENCY 0x1
+#endif
+
+#ifndef HIFN_POLL_SCALAR
+#define HIFN_POLL_SCALAR 0x0
+#endif
+
+#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
+#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
+#endif /* __HIFN_H__ */
diff --git a/target/linux/generic/files/crypto/ocf/hifn/hifn7751var.h b/target/linux/generic/files/crypto/ocf/hifn/hifn7751var.h
new file mode 100644
index 000000000..c5d30f962
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/hifn/hifn7751var.h
@@ -0,0 +1,368 @@
+/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
+/* $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $ */
+
+/*-
+ * Invertex AEON / Hifn 7751 driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ * Copyright (c) 1999 Theo de Raadt
+ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
+ * http://www.netsec.net
+ *
+ * Please send any comments, feedback, bug-fixes, or feature requests to
+ * software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef __HIFN7751VAR_H__
+#define __HIFN7751VAR_H__
+
+#ifdef __KERNEL__
+
+/*
+ * Some configurable values for the driver. By default command+result
+ * descriptor rings are the same size. The src+dst descriptor rings
+ * are sized at 3.5x the number of potential commands. Slower parts
+ * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
+ * src+cmd/result descriptors. It's not clear that increasing the size
+ * of the descriptor rings helps performance significantly as other
+ * factors tend to come into play (e.g. copying misaligned packets).
+ */
+#define HIFN_D_CMD_RSIZE 24 /* command descriptors */
+#define HIFN_D_SRC_RSIZE ((HIFN_D_CMD_RSIZE * 7) / 2) /* source descriptors */
+#define HIFN_D_RES_RSIZE HIFN_D_CMD_RSIZE /* result descriptors */
+#define HIFN_D_DST_RSIZE HIFN_D_SRC_RSIZE /* destination descriptors */
+
+/*
+ * Length values for cryptography
+ */
+#define HIFN_DES_KEY_LENGTH 8
+#define HIFN_3DES_KEY_LENGTH 24
+#define HIFN_MAX_CRYPT_KEY_LENGTH HIFN_3DES_KEY_LENGTH
+#define HIFN_IV_LENGTH 8
+#define HIFN_AES_IV_LENGTH 16
+#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
+
+/*
+ * Length values for authentication
+ */
+#define HIFN_MAC_KEY_LENGTH 64
+#define HIFN_MD5_LENGTH 16
+#define HIFN_SHA1_LENGTH 20
+#define HIFN_MAC_TRUNC_LENGTH 12
+
+#define MAX_SCATTER 64
+
+/*
+ * Data structure to hold all 4 rings and any other ring related data.
+ */
+struct hifn_dma {
+ /*
+ * Descriptor rings. We add +1 to the size to accomidate the
+ * jump descriptor.
+ */
+ struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
+ struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
+ struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
+ struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
+
+ struct hifn_command *hifn_commands[HIFN_D_RES_RSIZE];
+
+ u_char command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
+ u_char result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
+ u_int32_t slop[HIFN_D_CMD_RSIZE];
+
+ u_int64_t test_src, test_dst;
+
+ /*
+ * Our current positions for insertion and removal from the desriptor
+ * rings.
+ */
+ int cmdi, srci, dsti, resi;
+ volatile int cmdu, srcu, dstu, resu;
+ int cmdk, srck, dstk, resk;
+};
+
+struct hifn_session {
+ int hs_used;
+ int hs_mlen;
+};
+
+#define HIFN_RING_SYNC(sc, r, i, f) \
+ /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
+
+#define HIFN_CMDR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), cmdr, (i), (f))
+#define HIFN_RESR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), resr, (i), (f))
+#define HIFN_SRCR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), srcr, (i), (f))
+#define HIFN_DSTR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), dstr, (i), (f))
+
+#define HIFN_CMD_SYNC(sc, i, f) \
+ /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
+
+#define HIFN_RES_SYNC(sc, i, f) \
+ /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
+
+typedef int bus_size_t;
+
+/*
+ * Holds data specific to a single HIFN board.
+ */
+struct hifn_softc {
+ softc_device_decl sc_dev;
+
+ struct pci_dev *sc_pcidev; /* PCI device pointer */
+ spinlock_t sc_mtx; /* per-instance lock */
+
+ int sc_num; /* for multiple devs */
+
+ ocf_iomem_t sc_bar0;
+ bus_size_t sc_bar0_lastreg;/* bar0 last reg written */
+ ocf_iomem_t sc_bar1;
+ bus_size_t sc_bar1_lastreg;/* bar1 last reg written */
+
+ int sc_irq;
+
+ u_int32_t sc_dmaier;
+ u_int32_t sc_drammodel; /* 1=dram, 0=sram */
+ u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
+
+ struct hifn_dma *sc_dma;
+ dma_addr_t sc_dma_physaddr;/* physical address of sc_dma */
+
+ int sc_dmansegs;
+ int32_t sc_cid;
+ int sc_maxses;
+ int sc_nsessions;
+ struct hifn_session *sc_sessions;
+ int sc_ramsize;
+ int sc_flags;
+#define HIFN_HAS_RNG 0x1 /* includes random number generator */
+#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
+#define HIFN_HAS_AES 0x4 /* includes AES support */
+#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
+#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
+
+ struct timer_list sc_tickto; /* for managing DMA */
+
+ int sc_rngfirst;
+ int sc_rnghz; /* RNG polling frequency */
+
+ int sc_c_busy; /* command ring busy */
+ int sc_s_busy; /* source data ring busy */
+ int sc_d_busy; /* destination data ring busy */
+ int sc_r_busy; /* result ring busy */
+ int sc_active; /* for initial countdown */
+ int sc_needwakeup; /* ops q'd wating on resources */
+ int sc_curbatch; /* # ops submitted w/o int */
+ int sc_suspended;
+#ifdef HIFN_VULCANDEV
+ struct cdev *sc_pkdev;
+#endif
+};
+
+#define HIFN_LOCK(_sc) spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
+#define HIFN_UNLOCK(_sc) spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
+
+/*
+ * hifn_command_t
+ *
+ * This is the control structure used to pass commands to hifn_encrypt().
+ *
+ * flags
+ * -----
+ * Flags is the bitwise "or" values for command configuration. A single
+ * encrypt direction needs to be set:
+ *
+ * HIFN_ENCODE or HIFN_DECODE
+ *
+ * To use cryptography, a single crypto algorithm must be included:
+ *
+ * HIFN_CRYPT_3DES or HIFN_CRYPT_DES
+ *
+ * To use authentication is used, a single MAC algorithm must be included:
+ *
+ * HIFN_MAC_MD5 or HIFN_MAC_SHA1
+ *
+ * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
+ * If the value below is set, hash values are truncated or assumed
+ * truncated to 12 bytes:
+ *
+ * HIFN_MAC_TRUNC
+ *
+ * Keys for encryption and authentication can be sent as part of a command,
+ * or the last key value used with a particular session can be retrieved
+ * and used again if either of these flags are not specified.
+ *
+ * HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
+ *
+ * session_num
+ * -----------
+ * A number between 0 and 2048 (for DRAM models) or a number between
+ * 0 and 768 (for SRAM models). Those who don't want to use session
+ * numbers should leave value at zero and send a new crypt key and/or
+ * new MAC key on every command. If you use session numbers and
+ * don't send a key with a command, the last key sent for that same
+ * session number will be used.
+ *
+ * Warning: Using session numbers and multiboard at the same time
+ * is currently broken.
+ *
+ * mbuf
+ * ----
+ * Either fill in the mbuf pointer and npa=0 or
+ * fill packp[] and packl[] and set npa to > 0
+ *
+ * mac_header_skip
+ * ---------------
+ * The number of bytes of the source_buf that are skipped over before
+ * authentication begins. This must be a number between 0 and 2^16-1
+ * and can be used by IPsec implementers to skip over IP headers.
+ * *** Value ignored if authentication not used ***
+ *
+ * crypt_header_skip
+ * -----------------
+ * The number of bytes of the source_buf that are skipped over before
+ * the cryptographic operation begins. This must be a number between 0
+ * and 2^16-1. For IPsec, this number will always be 8 bytes larger
+ * than the auth_header_skip (to skip over the ESP header).
+ * *** Value ignored if cryptography not used ***
+ *
+ */
+struct hifn_operand {
+ union {
+ struct sk_buff *skb;
+ struct uio *io;
+ unsigned char *buf;
+ } u;
+ void *map;
+ bus_size_t mapsize;
+ int nsegs;
+ struct {
+ dma_addr_t ds_addr;
+ int ds_len;
+ } segs[MAX_SCATTER];
+};
+
+struct hifn_command {
+ u_int16_t session_num;
+ u_int16_t base_masks, cry_masks, mac_masks;
+ u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
+ int cklen;
+ int sloplen, slopidx;
+
+ struct hifn_operand src;
+ struct hifn_operand dst;
+
+ struct hifn_softc *softc;
+ struct cryptop *crp;
+ struct cryptodesc *enccrd, *maccrd;
+};
+
+#define src_skb src.u.skb
+#define src_io src.u.io
+#define src_map src.map
+#define src_mapsize src.mapsize
+#define src_segs src.segs
+#define src_nsegs src.nsegs
+#define src_buf src.u.buf
+
+#define dst_skb dst.u.skb
+#define dst_io dst.u.io
+#define dst_map dst.map
+#define dst_mapsize dst.mapsize
+#define dst_segs dst.segs
+#define dst_nsegs dst.nsegs
+#define dst_buf dst.u.buf
+
+/*
+ * Return values for hifn_crypto()
+ */
+#define HIFN_CRYPTO_SUCCESS 0
+#define HIFN_CRYPTO_BAD_INPUT (-1)
+#define HIFN_CRYPTO_RINGS_FULL (-2)
+
+/**************************************************************************
+ *
+ * Function: hifn_crypto
+ *
+ * Purpose: Called by external drivers to begin an encryption on the
+ * HIFN board.
+ *
+ * Blocking/Non-blocking Issues
+ * ============================
+ * The driver cannot block in hifn_crypto (no calls to tsleep) currently.
+ * hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
+ * room in any of the rings for the request to proceed.
+ *
+ * Return Values
+ * =============
+ * 0 for success, negative values on error
+ *
+ * Defines for negative error codes are:
+ *
+ * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings.
+ * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking
+ * behaviour was requested.
+ *
+ *************************************************************************/
+
+/*
+ * Convert back and forth from 'sid' to 'card' and 'session'
+ */
+#define HIFN_CARD(sid) (((sid) & 0xf0000000) >> 28)
+#define HIFN_SESSION(sid) ((sid) & 0x000007ff)
+#define HIFN_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff))
+
+#endif /* _KERNEL */
+
+struct hifn_stats {
+ u_int64_t hst_ibytes;
+ u_int64_t hst_obytes;
+ u_int32_t hst_ipackets;
+ u_int32_t hst_opackets;
+ u_int32_t hst_invalid;
+ u_int32_t hst_nomem; /* malloc or one of hst_nomem_* */
+ u_int32_t hst_abort;
+ u_int32_t hst_noirq; /* IRQ for no reason */
+ u_int32_t hst_totbatch; /* ops submitted w/o interrupt */
+ u_int32_t hst_maxbatch; /* max ops submitted together */
+ u_int32_t hst_unaligned; /* unaligned src caused copy */
+ /*
+ * The following divides hst_nomem into more specific buckets.
+ */
+ u_int32_t hst_nomem_map; /* bus_dmamap_create failed */
+ u_int32_t hst_nomem_load; /* bus_dmamap_load_* failed */
+ u_int32_t hst_nomem_mbuf; /* MGET* failed */
+ u_int32_t hst_nomem_mcl; /* MCLGET* failed */
+ u_int32_t hst_nomem_cr; /* out of command/result descriptor */
+ u_int32_t hst_nomem_sd; /* out of src/dst descriptors */
+};
+
+#endif /* __HIFN7751VAR_H__ */
diff --git a/target/linux/generic/files/crypto/ocf/hifn/hifnHIPP.c b/target/linux/generic/files/crypto/ocf/hifn/hifnHIPP.c
new file mode 100644
index 000000000..a69e630e9
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/hifn/hifnHIPP.c
@@ -0,0 +1,421 @@
+/*-
+ * Driver for Hifn HIPP-I/II chipset
+ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored by Hifn Inc.
+ *
+ */
+
+/*
+ * Driver for various Hifn encryption processors.
+ */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/uio.h>
+#include <linux/sysfs.h>
+#include <linux/miscdevice.h>
+#include <asm/io.h>
+
+#include <cryptodev.h>
+
+#include "hifnHIPPreg.h"
+#include "hifnHIPPvar.h"
+
+#if 1
+#define DPRINTF(a...) if (hipp_debug) { \
+ printk("%s: ", sc ? \
+ device_get_nameunit(sc->sc_dev) : "hifn"); \
+ printk(a); \
+ } else
+#else
+#define DPRINTF(a...)
+#endif
+
+typedef int bus_size_t;
+
+static inline int
+pci_get_revid(struct pci_dev *dev)
+{
+ u8 rid = 0;
+ pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
+ return rid;
+}
+
+#define debug hipp_debug
+int hipp_debug = 0;
+module_param(hipp_debug, int, 0644);
+MODULE_PARM_DESC(hipp_debug, "Enable debug");
+
+int hipp_maxbatch = 1;
+module_param(hipp_maxbatch, int, 0644);
+MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
+
+static int hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void hipp_remove(struct pci_dev *dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+static irqreturn_t hipp_intr(int irq, void *arg);
+#else
+static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
+#endif
+
+static int hipp_num_chips = 0;
+static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
+
+static int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int hipp_freesession(device_t, u_int64_t);
+static int hipp_process(device_t, struct cryptop *, int);
+
+static device_method_t hipp_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, hipp_newsession),
+ DEVMETHOD(cryptodev_freesession,hipp_freesession),
+ DEVMETHOD(cryptodev_process, hipp_process),
+};
+
+static __inline u_int32_t
+READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
+{
+ u_int32_t v = readl(sc->sc_bar[barno] + reg);
+ //sc->sc_bar0_lastreg = (bus_size_t) -1;
+ return (v);
+}
+static __inline void
+WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
+{
+ writel(val, sc->sc_bar[barno] + reg);
+}
+
+#define READ_REG_0(sc, reg) READ_REG(sc, 0, reg)
+#define WRITE_REG_0(sc, reg, val) WRITE_REG(sc,0, reg, val)
+#define READ_REG_1(sc, reg) READ_REG(sc, 1, reg)
+#define WRITE_REG_1(sc, reg, val) WRITE_REG(sc,1, reg, val)
+
+static int
+hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+ return EINVAL;
+}
+
+static int
+hipp_freesession(device_t dev, u_int64_t tid)
+{
+ return EINVAL;
+}
+
+static int
+hipp_process(device_t dev, struct cryptop *crp, int hint)
+{
+ return EINVAL;
+}
+
+static const char*
+hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
+{
+ char *n = NULL;
+
+ switch (pci_get_vendor(sc->sc_pcidev)) {
+ case PCI_VENDOR_HIFN:
+ switch (pci_get_device(sc->sc_pcidev)) {
+ case PCI_PRODUCT_HIFN_7855: n = "Hifn 7855";
+ case PCI_PRODUCT_HIFN_8155: n = "Hifn 8155";
+ case PCI_PRODUCT_HIFN_6500: n = "Hifn 6500";
+ }
+ }
+
+ if(n==NULL) {
+ snprintf(buf, blen, "VID=%02x,PID=%02x",
+ pci_get_vendor(sc->sc_pcidev),
+ pci_get_device(sc->sc_pcidev));
+ } else {
+ buf[0]='\0';
+ strncat(buf, n, blen);
+ }
+ return buf;
+}
+
+struct hipp_fs_entry {
+ struct attribute attr;
+ /* other stuff */
+};
+
+
+static ssize_t
+cryptoid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hipp_softc *sc;
+
+ sc = pci_get_drvdata(to_pci_dev (dev));
+ return sprintf (buf, "%d\n", sc->sc_cid);
+}
+
+struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
+
+/*
+ * Attach an interface that successfully probed.
+ */
+static int
+hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct hipp_softc *sc = NULL;
+ int i;
+ //char rbase;
+ //u_int16_t ena;
+ int rev;
+ //int rseg;
+ int rc;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (pci_enable_device(dev) < 0)
+ return(-ENODEV);
+
+ if (pci_set_mwi(dev))
+ return(-ENODEV);
+
+ if (!dev->irq) {
+ printk("hifn: found device with no IRQ assigned. check BIOS settings!");
+ pci_disable_device(dev);
+ return(-ENODEV);
+ }
+
+ sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return(-ENOMEM);
+ memset(sc, 0, sizeof(*sc));
+
+ softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
+
+ sc->sc_pcidev = dev;
+ sc->sc_irq = -1;
+ sc->sc_cid = -1;
+ sc->sc_num = hipp_num_chips++;
+
+ if (sc->sc_num < HIPP_MAX_CHIPS)
+ hipp_chip_idx[sc->sc_num] = sc;
+
+ pci_set_drvdata(sc->sc_pcidev, sc);
+
+ spin_lock_init(&sc->sc_mtx);
+
+ /*
+ * Setup PCI resources.
+ * The READ_REG_0, WRITE_REG_0, READ_REG_1,
+ * and WRITE_REG_1 macros throughout the driver are used
+ * to permit better debugging.
+ */
+ for(i=0; i<4; i++) {
+ unsigned long mem_start, mem_len;
+ mem_start = pci_resource_start(sc->sc_pcidev, i);
+ mem_len = pci_resource_len(sc->sc_pcidev, i);
+ sc->sc_barphy[i] = (caddr_t)mem_start;
+ sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
+ if (!sc->sc_bar[i]) {
+ device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
+ goto fail;
+ }
+ }
+
+ //hipp_reset_board(sc, 0);
+ pci_set_master(sc->sc_pcidev);
+
+ /*
+ * Arrange the interrupt line.
+ */
+ rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
+ if (rc) {
+ device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
+ goto fail;
+ }
+ sc->sc_irq = dev->irq;
+
+ rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
+
+ {
+ char b[32];
+ device_printf(sc->sc_dev, "%s, rev %u",
+ hipp_partname(sc, b, sizeof(b)), rev);
+ }
+
+#if 0
+ if (sc->sc_flags & HIFN_IS_7956)
+ printf(", pll=0x%x<%s clk, %ux mult>",
+ sc->sc_pllconfig,
+ sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
+ 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
+#endif
+ printf("\n");
+
+ sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid < 0) {
+ device_printf(sc->sc_dev, "could not get crypto driver id\n");
+ goto fail;
+ }
+
+#if 0 /* cannot work with a non-GPL module */
+ /* make a sysfs entry to let the world know what entry we got */
+ sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
+#endif
+
+#if 0
+ init_timer(&sc->sc_tickto);
+ sc->sc_tickto.function = hifn_tick;
+ sc->sc_tickto.data = (unsigned long) sc->sc_num;
+ mod_timer(&sc->sc_tickto, jiffies + HZ);
+#endif
+
+#if 0 /* no code here yet ?? */
+ crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+#endif
+
+ return (0);
+
+fail:
+ if (sc->sc_cid >= 0)
+ crypto_unregister_all(sc->sc_cid);
+ if (sc->sc_irq != -1)
+ free_irq(sc->sc_irq, sc);
+
+#if 0
+ if (sc->sc_dma) {
+ /* Turn off DMA polling */
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+ HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+ pci_free_consistent(sc->sc_pcidev,
+ sizeof(*sc->sc_dma),
+ sc->sc_dma, sc->sc_dma_physaddr);
+ }
+#endif
+ kfree(sc);
+ return (-ENXIO);
+}
+
+/*
+ * Detach an interface that successfully probed.
+ */
+static void
+hipp_remove(struct pci_dev *dev)
+{
+ struct hipp_softc *sc = pci_get_drvdata(dev);
+ unsigned long l_flags;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ /* disable interrupts */
+ HIPP_LOCK(sc);
+
+#if 0
+ WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
+ HIFN_UNLOCK(sc);
+
+ /*XXX other resources */
+ del_timer_sync(&sc->sc_tickto);
+
+ /* Turn off DMA polling */
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+ HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+#endif
+
+ crypto_unregister_all(sc->sc_cid);
+
+ free_irq(sc->sc_irq, sc);
+
+#if 0
+ pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
+ sc->sc_dma, sc->sc_dma_physaddr);
+#endif
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+static irqreturn_t hipp_intr(int irq, void *arg)
+#else
+static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
+#endif
+{
+ struct hipp_softc *sc = arg;
+
+ sc = sc; /* shut up compiler */
+
+ return IRQ_HANDLED;
+}
+
+static struct pci_device_id hipp_pci_tbl[] = {
+ { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { 0 }, /* terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
+
+static struct pci_driver hipp_driver = {
+ .name = "hipp",
+ .id_table = hipp_pci_tbl,
+ .probe = hipp_probe,
+ .remove = hipp_remove,
+ /* add PM stuff here one day */
+};
+
+static int __init hipp_init (void)
+{
+ struct hipp_softc *sc = NULL;
+ int rc;
+
+ DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
+
+ rc = pci_register_driver(&hipp_driver);
+ pci_register_driver_compat(&hipp_driver, rc);
+
+ return rc;
+}
+
+static void __exit hipp_exit (void)
+{
+ pci_unregister_driver(&hipp_driver);
+}
+
+module_init(hipp_init);
+module_exit(hipp_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
+MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
diff --git a/target/linux/generic/files/crypto/ocf/hifn/hifnHIPPreg.h b/target/linux/generic/files/crypto/ocf/hifn/hifnHIPPreg.h
new file mode 100644
index 000000000..8c0e72038
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/hifn/hifnHIPPreg.h
@@ -0,0 +1,46 @@
+/*-
+ * Hifn HIPP-I/HIPP-II (7855/8155) driver.
+ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored by Hifn inc.
+ *
+ */
+
+#ifndef __HIFNHIPP_H__
+#define __HIFNHIPP_H__
+
+/*
+ * PCI vendor and device identifiers
+ */
+#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
+#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
+#define PCI_PRODUCT_HIFN_7855 0x001f /* 7855 */
+#define PCI_PRODUCT_HIFN_8155 0x999 /* XXX 8155 */
+
+#define HIPP_1_REVID 0x01 /* BOGUS */
+
+#endif /* __HIPP_H__ */
diff --git a/target/linux/generic/files/crypto/ocf/hifn/hifnHIPPvar.h b/target/linux/generic/files/crypto/ocf/hifn/hifnHIPPvar.h
new file mode 100644
index 000000000..61d292fb5
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/hifn/hifnHIPPvar.h
@@ -0,0 +1,93 @@
+/*
+ * Hifn HIPP-I/HIPP-II (7855/8155) driver.
+ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored by Hifn inc.
+ *
+ */
+
+#ifndef __HIFNHIPPVAR_H__
+#define __HIFNHIPPVAR_H__
+
+#define HIPP_MAX_CHIPS 8
+
+/*
+ * Holds data specific to a single Hifn HIPP-I board.
+ */
+struct hipp_softc {
+ softc_device_decl sc_dev;
+
+ struct pci_dev *sc_pcidev; /* device backpointer */
+ ocf_iomem_t sc_bar[5];
+ caddr_t sc_barphy[5]; /* physical address */
+ int sc_num; /* for multiple devs */
+ spinlock_t sc_mtx; /* per-instance lock */
+ int32_t sc_cid;
+ int sc_irq;
+
+#if 0
+
+ u_int32_t sc_dmaier;
+ u_int32_t sc_drammodel; /* 1=dram, 0=sram */
+ u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
+
+ struct hifn_dma *sc_dma;
+ dma_addr_t sc_dma_physaddr;/* physical address of sc_dma */
+
+ int sc_dmansegs;
+ int sc_maxses;
+ int sc_nsessions;
+ struct hifn_session *sc_sessions;
+ int sc_ramsize;
+ int sc_flags;
+#define HIFN_HAS_RNG 0x1 /* includes random number generator */
+#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
+#define HIFN_HAS_AES 0x4 /* includes AES support */
+#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
+#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
+
+ struct timer_list sc_tickto; /* for managing DMA */
+
+ int sc_rngfirst;
+ int sc_rnghz; /* RNG polling frequency */
+
+ int sc_c_busy; /* command ring busy */
+ int sc_s_busy; /* source data ring busy */
+ int sc_d_busy; /* destination data ring busy */
+ int sc_r_busy; /* result ring busy */
+ int sc_active; /* for initial countdown */
+ int sc_needwakeup; /* ops q'd wating on resources */
+ int sc_curbatch; /* # ops submitted w/o int */
+ int sc_suspended;
+ struct miscdevice sc_miscdev;
+#endif
+};
+
+#define HIPP_LOCK(_sc) spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
+#define HIPP_UNLOCK(_sc) spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
+
+#endif /* __HIFNHIPPVAR_H__ */
diff --git a/target/linux/generic/files/crypto/ocf/ixp4xx/Makefile b/target/linux/generic/files/crypto/ocf/ixp4xx/Makefile
new file mode 100644
index 000000000..d94a3b79f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ixp4xx/Makefile
@@ -0,0 +1,104 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+#
+# You will need to point this at your Intel ixp425 includes, this portion
+# of the Makefile only really works under SGLinux with the appropriate libs
+# installed. They can be downloaded from http://www.snapgear.org/
+#
+ifeq ($(CONFIG_CPU_IXP46X),y)
+IXPLATFORM = ixp46X
+else
+ifeq ($(CONFIG_CPU_IXP43X),y)
+IXPLATFORM = ixp43X
+else
+IXPLATFORM = ixp42X
+endif
+endif
+
+ifdef CONFIG_IXP400_LIB_2_4
+IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
+OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
+endif
+ifdef CONFIG_IXP400_LIB_2_1
+IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
+OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
+endif
+ifdef CONFIG_IXP400_LIB_2_0
+IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
+OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
+endif
+ifdef IX_XSCALE_SW
+ifdef CONFIG_IXP400_LIB_2_4
+IXP_CFLAGS = \
+ -I$(ROOTDIR)/. \
+ -I$(IX_XSCALE_SW)/src/include \
+ -I$(OSAL_DIR)/common/include/ \
+ -I$(OSAL_DIR)/common/include/modules/ \
+ -I$(OSAL_DIR)/common/include/modules/ddk/ \
+ -I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
+ -I$(OSAL_DIR)/common/include/modules/ioMem/ \
+ -I$(OSAL_DIR)/common/os/linux/include/ \
+ -I$(OSAL_DIR)/common/os/linux/include/core/ \
+ -I$(OSAL_DIR)/common/os/linux/include/modules/ \
+ -I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
+ -I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
+ -I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
+ -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
+ -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
+ -DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
+ -DUSE_IXP4XX_CRYPTO
+else
+IXP_CFLAGS = \
+ -I$(ROOTDIR)/. \
+ -I$(IX_XSCALE_SW)/src/include \
+ -I$(OSAL_DIR)/ \
+ -I$(OSAL_DIR)/os/linux/include/ \
+ -I$(OSAL_DIR)/os/linux/include/modules/ \
+ -I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
+ -I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
+ -I$(OSAL_DIR)/os/linux/include/core/ \
+ -I$(OSAL_DIR)/os/linux/include/platforms/ \
+ -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
+ -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
+ -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
+ -I$(OSAL_DIR)/os/linux/include/core/ \
+ -I$(OSAL_DIR)/include/ \
+ -I$(OSAL_DIR)/include/modules/ \
+ -I$(OSAL_DIR)/include/modules/bufferMgt/ \
+ -I$(OSAL_DIR)/include/modules/ioMem/ \
+ -I$(OSAL_DIR)/include/platforms/ \
+ -I$(OSAL_DIR)/include/platforms/ixp400/ \
+ -DUSE_IXP4XX_CRYPTO
+endif
+endif
+ifdef CONFIG_IXP400_LIB_1_4
+IXP_CFLAGS = \
+ -I$(ROOTDIR)/. \
+ -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
+ -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
+ -DUSE_IXP4XX_CRYPTO
+endif
+ifndef IXPDIR
+IXPDIR = ixp-version-is-not-supported
+endif
+
+ifeq ($(CONFIG_CPU_IXP46X),y)
+IXP_CFLAGS += -D__ixp46X
+else
+ifeq ($(CONFIG_CPU_IXP43X),y)
+IXP_CFLAGS += -D__ixp43X
+else
+IXP_CFLAGS += -D__ixp42X
+endif
+endif
+
+obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
+
+obj ?= .
+EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/ixp4xx/ixp4xx.c b/target/linux/generic/files/crypto/ocf/ixp4xx/ixp4xx.c
new file mode 100644
index 000000000..ede598fa5
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ixp4xx/ixp4xx.c
@@ -0,0 +1,1339 @@
+/*
+ * An OCF module that uses Intels IXP CryptACC API to do the crypto.
+ * This driver requires the IXP400 Access Library that is available
+ * from Intel in order to operate (or compile).
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2011 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ * 1. distributions of this source code include the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ *
+ * 2. distributions in binary form include the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other associated materials;
+ *
+ * 3. the copyright holder's name is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <asm/scatterlist.h>
+
+#include <IxTypes.h>
+#include <IxOsBuffMgt.h>
+#include <IxNpeDl.h>
+#include <IxCryptoAcc.h>
+#include <IxQMgr.h>
+#include <IxOsServices.h>
+#include <IxOsCacheMMU.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+
+#ifndef IX_MBUF_PRIV
+#define IX_MBUF_PRIV(x) ((x)->priv)
+#endif
+
+struct ixp_data;
+
+struct ixp_q {
+ struct list_head ixp_q_list;
+ struct ixp_data *ixp_q_data;
+ struct cryptop *ixp_q_crp;
+ struct cryptodesc *ixp_q_ccrd;
+ struct cryptodesc *ixp_q_acrd;
+ IX_MBUF ixp_q_mbuf;
+ UINT8 *ixp_hash_dest; /* Location for hash in client buffer */
+ UINT8 *ixp_hash_src; /* Location of hash in internal buffer */
+ unsigned char ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
+ unsigned char *ixp_q_iv;
+};
+
+struct ixp_data {
+ int ixp_registered; /* is the context registered */
+ int ixp_crd_flags; /* detect direction changes */
+
+ int ixp_cipher_alg;
+ int ixp_auth_alg;
+
+ UINT32 ixp_ctx_id;
+ UINT32 ixp_hash_key_id; /* used when hashing */
+ IxCryptoAccCtx ixp_ctx;
+ IX_MBUF ixp_pri_mbuf;
+ IX_MBUF ixp_sec_mbuf;
+
+ struct work_struct ixp_pending_work;
+ struct work_struct ixp_registration_work;
+ struct list_head ixp_q; /* unprocessed requests */
+};
+
+#ifdef __ixp46X
+
+#define MAX_IOP_SIZE 64 /* words */
+#define MAX_OOP_SIZE 128
+
+#define MAX_PARAMS 3
+
+struct ixp_pkq {
+ struct list_head pkq_list;
+ struct cryptkop *pkq_krp;
+
+ IxCryptoAccPkeEauInOperands pkq_op;
+ IxCryptoAccPkeEauOpResult pkq_result;
+
+ UINT32 pkq_ibuf0[MAX_IOP_SIZE];
+ UINT32 pkq_ibuf1[MAX_IOP_SIZE];
+ UINT32 pkq_ibuf2[MAX_IOP_SIZE];
+ UINT32 pkq_obuf[MAX_OOP_SIZE];
+};
+
+static LIST_HEAD(ixp_pkq); /* current PK wait list */
+static struct ixp_pkq *ixp_pk_cur;
+static spinlock_t ixp_pkq_lock;
+
+#endif /* __ixp46X */
+
+static int ixp_blocked = 0;
+
+static int32_t ixp_id = -1;
+static struct ixp_data **ixp_sessions = NULL;
+static u_int32_t ixp_sesnum = 0;
+
+static int ixp_process(device_t, struct cryptop *, int);
+static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int ixp_freesession(device_t, u_int64_t);
+#ifdef __ixp46X
+static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static kmem_cache_t *qcache;
+#else
+static struct kmem_cache *qcache;
+#endif
+
+#define debug ixp_debug
+static int ixp_debug = 0;
+module_param(ixp_debug, int, 0644);
+MODULE_PARM_DESC(ixp_debug, "Enable debug");
+
+static int ixp_init_crypto = 1;
+module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
+MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
+
+static void ixp_process_pending(void *arg);
+static void ixp_registration(void *arg);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void ixp_process_pending_wq(struct work_struct *work);
+static void ixp_registration_wq(struct work_struct *work);
+#endif
+
+/*
+ * dummy device structure
+ */
+
+static struct {
+ softc_device_decl sc_dev;
+} ixpdev;
+
+static device_method_t ixp_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, ixp_newsession),
+ DEVMETHOD(cryptodev_freesession,ixp_freesession),
+ DEVMETHOD(cryptodev_process, ixp_process),
+#ifdef __ixp46X
+ DEVMETHOD(cryptodev_kprocess, ixp_kprocess),
+#endif
+};
+
+/*
+ * Generate a new software session.
+ */
+static int
+ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+ struct ixp_data *ixp;
+ u_int32_t i;
+#define AUTH_LEN(cri, def) \
+ (cri->cri_mlen ? cri->cri_mlen : (def))
+
+ dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
+ if (sid == NULL || cri == NULL) {
+ dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ if (ixp_sessions) {
+ for (i = 1; i < ixp_sesnum; i++)
+ if (ixp_sessions[i] == NULL)
+ break;
+ } else
+ i = 1; /* NB: to silence compiler warning */
+
+ if (ixp_sessions == NULL || i == ixp_sesnum) {
+ struct ixp_data **ixpd;
+
+ if (ixp_sessions == NULL) {
+ i = 1; /* We leave ixp_sessions[0] empty */
+ ixp_sesnum = CRYPTO_SW_SESSIONS;
+ } else
+ ixp_sesnum *= 2;
+
+ ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
+ if (ixpd == NULL) {
+ /* Reset session number */
+ if (ixp_sesnum == CRYPTO_SW_SESSIONS)
+ ixp_sesnum = 0;
+ else
+ ixp_sesnum /= 2;
+ dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
+
+ /* Copy existing sessions */
+ if (ixp_sessions) {
+ memcpy(ixpd, ixp_sessions,
+ (ixp_sesnum / 2) * sizeof(struct ixp_data *));
+ kfree(ixp_sessions);
+ }
+
+ ixp_sessions = ixpd;
+ }
+
+ ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
+ SLAB_ATOMIC);
+ if (ixp_sessions[i] == NULL) {
+ ixp_freesession(NULL, i);
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+
+ *sid = i;
+
+ ixp = ixp_sessions[i];
+ memset(ixp, 0, sizeof(*ixp));
+
+ ixp->ixp_cipher_alg = -1;
+ ixp->ixp_auth_alg = -1;
+ ixp->ixp_ctx_id = -1;
+ INIT_LIST_HEAD(&ixp->ixp_q);
+
+ ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
+
+ while (cri) {
+ switch (cri->cri_alg) {
+ case CRYPTO_DES_CBC:
+ ixp->ixp_cipher_alg = cri->cri_alg;
+ ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
+ ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
+ ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
+ ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
+ ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
+ IX_CRYPTO_ACC_DES_IV_64;
+ memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ break;
+
+ case CRYPTO_3DES_CBC:
+ ixp->ixp_cipher_alg = cri->cri_alg;
+ ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
+ ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
+ ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
+ ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
+ ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
+ IX_CRYPTO_ACC_DES_IV_64;
+ memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ break;
+
+ case CRYPTO_RIJNDAEL128_CBC:
+ ixp->ixp_cipher_alg = cri->cri_alg;
+ ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
+ ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
+ ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
+ ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
+ ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
+ memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ break;
+
+ case CRYPTO_MD5:
+ case CRYPTO_MD5_HMAC:
+ ixp->ixp_auth_alg = cri->cri_alg;
+ ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
+ ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
+ ixp->ixp_ctx.authCtx.aadLen = 0;
+ /* Only MD5_HMAC needs a key */
+ if (cri->cri_alg == CRYPTO_MD5_HMAC) {
+ ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
+ if (ixp->ixp_ctx.authCtx.authKeyLen >
+ sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
+ printk(
+ "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
+ cri->cri_klen);
+ ixp_freesession(NULL, i);
+ return EINVAL;
+ }
+ memcpy(ixp->ixp_ctx.authCtx.key.authKey,
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ }
+ break;
+
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ ixp->ixp_auth_alg = cri->cri_alg;
+ ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
+ ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
+ ixp->ixp_ctx.authCtx.aadLen = 0;
+ /* Only SHA1_HMAC needs a key */
+ if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
+ ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
+ if (ixp->ixp_ctx.authCtx.authKeyLen >
+ sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
+ printk(
+ "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
+ cri->cri_klen);
+ ixp_freesession(NULL, i);
+ return EINVAL;
+ }
+ memcpy(ixp->ixp_ctx.authCtx.key.authKey,
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ }
+ break;
+
+ default:
+ printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
+ ixp_freesession(NULL, i);
+ return EINVAL;
+ }
+ cri = cri->cri_next;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
+ INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
+#else
+ INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
+ INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
+#endif
+
+ return 0;
+}
+
+
+/*
+ * Free a session.
+ */
+static int
+ixp_freesession(device_t dev, u_int64_t tid)
+{
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid > ixp_sesnum || ixp_sessions == NULL ||
+ ixp_sessions[sid] == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ /* Silently accept and return */
+ if (sid == 0)
+ return 0;
+
+ if (ixp_sessions[sid]) {
+ if (ixp_sessions[sid]->ixp_ctx_id != -1) {
+ ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
+ ixp_sessions[sid]->ixp_ctx_id = -1;
+ }
+ kfree(ixp_sessions[sid]);
+ }
+ ixp_sessions[sid] = NULL;
+ if (ixp_blocked) {
+ ixp_blocked = 0;
+ crypto_unblock(ixp_id, CRYPTO_SYMQ);
+ }
+ return 0;
+}
+
+
+/*
+ * callback for when hash processing is complete
+ */
+
+static void
+ixp_hash_perform_cb(
+ UINT32 hash_key_id,
+ IX_MBUF *bufp,
+ IxCryptoAccStatus status)
+{
+ struct ixp_q *q;
+
+ dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
+
+ if (bufp == NULL) {
+ printk("ixp: NULL buf in %s\n", __FUNCTION__);
+ return;
+ }
+
+ q = IX_MBUF_PRIV(bufp);
+ if (q == NULL) {
+ printk("ixp: NULL priv in %s\n", __FUNCTION__);
+ return;
+ }
+
+ if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
+ /* On success, need to copy hash back into original client buffer */
+ memcpy(q->ixp_hash_dest, q->ixp_hash_src,
+ (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
+ SHA1_HASH_LEN : MD5_HASH_LEN);
+ }
+ else {
+ printk("ixp: hash perform failed status=%d\n", status);
+ q->ixp_q_crp->crp_etype = EINVAL;
+ }
+
+ /* Free internal buffer used for hashing */
+ kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
+
+ crypto_done(q->ixp_q_crp);
+ kmem_cache_free(qcache, q);
+}
+
+/*
+ * setup a request and perform it
+ */
+static void
+ixp_q_process(struct ixp_q *q)
+{
+ IxCryptoAccStatus status;
+ struct ixp_data *ixp = q->ixp_q_data;
+ int auth_off = 0;
+ int auth_len = 0;
+ int crypt_off = 0;
+ int crypt_len = 0;
+ int icv_off = 0;
+ char *crypt_func;
+
+ dprintk("%s(%p)\n", __FUNCTION__, q);
+
+ if (q->ixp_q_ccrd) {
+ if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
+ if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+ q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
+ } else {
+ q->ixp_q_iv = q->ixp_q_iv_data;
+ read_random(q->ixp_q_iv, ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen);
+ }
+ if ((q->ixp_q_ccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
+ crypto_copyback(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
+ q->ixp_q_ccrd->crd_inject,
+ ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
+ (caddr_t) q->ixp_q_iv);
+ } else {
+ if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
+ else {
+ q->ixp_q_iv = q->ixp_q_iv_data;
+ crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
+ q->ixp_q_ccrd->crd_inject,
+ ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
+ (caddr_t) q->ixp_q_iv);
+ }
+ }
+
+ if (q->ixp_q_acrd) {
+ auth_off = q->ixp_q_acrd->crd_skip;
+ auth_len = q->ixp_q_acrd->crd_len;
+ icv_off = q->ixp_q_acrd->crd_inject;
+ }
+
+ crypt_off = q->ixp_q_ccrd->crd_skip;
+ crypt_len = q->ixp_q_ccrd->crd_len;
+ } else { /* if (q->ixp_q_acrd) */
+ auth_off = q->ixp_q_acrd->crd_skip;
+ auth_len = q->ixp_q_acrd->crd_len;
+ icv_off = q->ixp_q_acrd->crd_inject;
+ }
+
+ if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
+ struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
+ if (skb_shinfo(skb)->nr_frags) {
+ /*
+ * DAVIDM fix this limitation one day by using
+ * a buffer pool and chaining, it is not currently
+ * needed for current user/kernel space acceleration
+ */
+ printk("ixp: Cannot handle fragmented skb's yet !\n");
+ q->ixp_q_crp->crp_etype = ENOENT;
+ goto done;
+ }
+ IX_MBUF_MLEN(&q->ixp_q_mbuf) =
+ IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = skb->len;
+ IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
+ } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
+ struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
+ if (uiop->uio_iovcnt != 1) {
+ /*
+ * DAVIDM fix this limitation one day by using
+ * a buffer pool and chaining, it is not currently
+ * needed for current user/kernel space acceleration
+ */
+ printk("ixp: Cannot handle more than 1 iovec yet !\n");
+ q->ixp_q_crp->crp_etype = ENOENT;
+ goto done;
+ }
+ IX_MBUF_MLEN(&q->ixp_q_mbuf) =
+ IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
+ IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
+ } else /* contig buffer */ {
+ IX_MBUF_MLEN(&q->ixp_q_mbuf) =
+ IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
+ IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
+ }
+
+ IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
+
+ if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
+ /*
+ * For SHA1 and MD5 hash, need to create an internal buffer that is big
+ * enough to hold the original data + the appropriate padding for the
+ * hash algorithm.
+ */
+ UINT8 *tbuf = NULL;
+
+ IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
+ ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
+ tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
+
+ if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
+ printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
+ IX_MBUF_MLEN(&q->ixp_q_mbuf));
+ q->ixp_q_crp->crp_etype = ENOMEM;
+ goto done;
+ }
+ memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
+
+ /* Set location in client buffer to copy hash into */
+ q->ixp_hash_dest =
+ &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
+
+ IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
+
+ /* Set location in internal buffer for where hash starts */
+ q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
+
+ crypt_func = "ixCryptoAccHashPerform";
+ status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
+ &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
+ &ixp->ixp_hash_key_id);
+ }
+ else {
+ crypt_func = "ixCryptoAccAuthCryptPerform";
+ status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
+ NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
+ q->ixp_q_iv);
+ }
+
+ if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
+ return;
+
+ if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
+ q->ixp_q_crp->crp_etype = ENOMEM;
+ goto done;
+ }
+
+ printk("ixp: %s failed %u\n", crypt_func, status);
+ q->ixp_q_crp->crp_etype = EINVAL;
+
+done:
+ crypto_done(q->ixp_q_crp);
+ kmem_cache_free(qcache, q);
+}
+
+
+/*
+ * because we cannot process the Q from the Register callback
+ * we do it here on a task Q.
+ */
+
+static void
+ixp_process_pending(void *arg)
+{
+ struct ixp_data *ixp = arg;
+ struct ixp_q *q = NULL;
+
+ dprintk("%s(%p)\n", __FUNCTION__, arg);
+
+ if (!ixp)
+ return;
+
+ while (!list_empty(&ixp->ixp_q)) {
+ q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
+ list_del(&q->ixp_q_list);
+ ixp_q_process(q);
+ }
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+ixp_process_pending_wq(struct work_struct *work)
+{
+ struct ixp_data *ixp = container_of(work, struct ixp_data, ixp_pending_work);
+ ixp_process_pending(ixp);
+}
+#endif
+
+/*
+ * callback for when context registration is complete
+ */
+
+static void
+ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
+{
+ int i;
+ struct ixp_data *ixp;
+ struct ixp_q *q;
+
+ dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
+
+ /*
+ * free any buffer passed in to this routine
+ */
+ if (bufp) {
+ IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
+ kfree(IX_MBUF_MDATA(bufp));
+ IX_MBUF_MDATA(bufp) = NULL;
+ }
+
+ for (i = 0; i < ixp_sesnum; i++) {
+ ixp = ixp_sessions[i];
+ if (ixp && ixp->ixp_ctx_id == ctx_id)
+ break;
+ }
+ if (i >= ixp_sesnum) {
+ printk("ixp: invalid context id %d\n", ctx_id);
+ return;
+ }
+
+ if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
+ /* this is normal to free the first of two buffers */
+ dprintk("ixp: register not finished yet.\n");
+ return;
+ }
+
+ if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
+ printk("ixp: register failed 0x%x\n", status);
+ while (!list_empty(&ixp->ixp_q)) {
+ q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
+ list_del(&q->ixp_q_list);
+ q->ixp_q_crp->crp_etype = EINVAL;
+ crypto_done(q->ixp_q_crp);
+ kmem_cache_free(qcache, q);
+ }
+ return;
+ }
+
+ /*
+ * we are now registered, we cannot start processing the Q here
+ * or we get strange errors with AES (DES/3DES seem to be ok).
+ */
+ ixp->ixp_registered = 1;
+ schedule_work(&ixp->ixp_pending_work);
+}
+
+
+/*
+ * callback for when data processing is complete
+ */
+
+static void
+ixp_perform_cb(
+ UINT32 ctx_id,
+ IX_MBUF *sbufp,
+ IX_MBUF *dbufp,
+ IxCryptoAccStatus status)
+{
+ struct ixp_q *q;
+
+ dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
+ dbufp, status);
+
+ if (sbufp == NULL) {
+ printk("ixp: NULL sbuf in ixp_perform_cb\n");
+ return;
+ }
+
+ q = IX_MBUF_PRIV(sbufp);
+ if (q == NULL) {
+ printk("ixp: NULL priv in ixp_perform_cb\n");
+ return;
+ }
+
+ if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
+ printk("ixp: perform failed status=%d\n", status);
+ q->ixp_q_crp->crp_etype = EINVAL;
+ }
+
+ crypto_done(q->ixp_q_crp);
+ kmem_cache_free(qcache, q);
+}
+
+
+/*
+ * registration is not callable at IRQ time, so we defer
+ * to a task queue, this routines completes the registration for us
+ * when the task queue runs
+ *
+ * Unfortunately this means we cannot tell OCF that the driver is blocked,
+ * we do that on the next request.
+ */
+
+static void
+ixp_registration(void *arg)
+{
+ struct ixp_data *ixp = arg;
+ struct ixp_q *q = NULL;
+ IX_MBUF *pri = NULL, *sec = NULL;
+ int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
+
+ if (!ixp) {
+ printk("ixp: ixp_registration with no arg\n");
+ return;
+ }
+
+ if (ixp->ixp_ctx_id != -1) {
+ ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
+ ixp->ixp_ctx_id = -1;
+ }
+
+ if (list_empty(&ixp->ixp_q)) {
+ printk("ixp: ixp_registration with no Q\n");
+ return;
+ }
+
+ /*
+ * setup the primary and secondary buffers
+ */
+ q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
+ if (q->ixp_q_acrd) {
+ pri = &ixp->ixp_pri_mbuf;
+ sec = &ixp->ixp_sec_mbuf;
+ IX_MBUF_MLEN(pri) = IX_MBUF_PKT_LEN(pri) = 128;
+ IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
+ IX_MBUF_MLEN(sec) = IX_MBUF_PKT_LEN(sec) = 128;
+ IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
+ }
+
+ /* Only need to register if a crypt op or HMAC op */
+ if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
+ ixp->ixp_auth_alg == CRYPTO_MD5)) {
+ status = ixCryptoAccCtxRegister(
+ &ixp->ixp_ctx,
+ pri, sec,
+ ixp_register_cb,
+ ixp_perform_cb,
+ &ixp->ixp_ctx_id);
+ }
+ else {
+ /* Otherwise we start processing pending q */
+ schedule_work(&ixp->ixp_pending_work);
+ }
+
+ if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
+ return;
+
+ if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
+ printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
+ ixp_blocked = 1;
+ /* perhaps we should return EGAIN on queued ops ? */
+ return;
+ }
+
+ printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
+ ixp->ixp_ctx_id = -1;
+
+ /*
+ * everything waiting is toasted
+ */
+ while (!list_empty(&ixp->ixp_q)) {
+ q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
+ list_del(&q->ixp_q_list);
+ q->ixp_q_crp->crp_etype = ENOENT;
+ crypto_done(q->ixp_q_crp);
+ kmem_cache_free(qcache, q);
+ }
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+ixp_registration_wq(struct work_struct *work)
+{
+ struct ixp_data *ixp = container_of(work, struct ixp_data,
+ ixp_registration_work);
+ ixp_registration(ixp);
+}
+#endif
+
+/*
+ * Process a request.
+ */
+static int
+ixp_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct ixp_data *ixp;
+ unsigned int lid;
+ struct ixp_q *q = NULL;
+ int status;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ /* Sanity check */
+ if (crp == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ crp->crp_etype = 0;
+
+ if (ixp_blocked)
+ return ERESTART;
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ /*
+ * find the session we are using
+ */
+
+ lid = crp->crp_sid & 0xffffffff;
+ if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
+ ixp_sessions[lid] == NULL) {
+ crp->crp_etype = ENOENT;
+ dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+ goto done;
+ }
+ ixp = ixp_sessions[lid];
+
+ /*
+ * setup a new request ready for queuing
+ */
+ q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
+ if (q == NULL) {
+ dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
+ crp->crp_etype = ENOMEM;
+ goto done;
+ }
+ /*
+ * save some cycles by only zeroing the important bits
+ */
+ memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
+ q->ixp_q_ccrd = NULL;
+ q->ixp_q_acrd = NULL;
+ q->ixp_q_crp = crp;
+ q->ixp_q_data = ixp;
+
+ /*
+ * point the cipher and auth descriptors appropriately
+ * check that we have something to do
+ */
+ if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
+ q->ixp_q_ccrd = crp->crp_desc;
+ else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
+ q->ixp_q_acrd = crp->crp_desc;
+ else {
+ crp->crp_etype = ENOENT;
+ dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
+ goto done;
+ }
+ if (crp->crp_desc->crd_next) {
+ if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
+ q->ixp_q_ccrd = crp->crp_desc->crd_next;
+ else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
+ q->ixp_q_acrd = crp->crp_desc->crd_next;
+ else {
+ crp->crp_etype = ENOENT;
+ dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
+ goto done;
+ }
+ }
+
+ /*
+ * If there is a direction change for this context then we mark it as
+ * unregistered and re-register is for the new direction. This is not
+ * a very expensive operation and currently only tends to happen when
+ * user-space application are doing benchmarks
+ *
+ * DM - we should be checking for pending requests before unregistering.
+ */
+ if (q->ixp_q_ccrd && ixp->ixp_registered &&
+ ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
+ dprintk("%s - detected direction change on session\n", __FUNCTION__);
+ ixp->ixp_registered = 0;
+ }
+
+ /*
+ * if we are registered, call straight into the perform code
+ */
+ if (ixp->ixp_registered) {
+ ixp_q_process(q);
+ return 0;
+ }
+
+ /*
+ * the only part of the context not set in newsession is the direction
+ * dependent parts
+ */
+ if (q->ixp_q_ccrd) {
+ ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
+ if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
+ ixp->ixp_ctx.operation = q->ixp_q_acrd ?
+ IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
+ } else {
+ ixp->ixp_ctx.operation = q->ixp_q_acrd ?
+ IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
+ }
+ } else {
+ /* q->ixp_q_acrd must be set if we are here */
+ ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
+ }
+
+ status = list_empty(&ixp->ixp_q);
+ list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
+ if (status)
+ schedule_work(&ixp->ixp_registration_work);
+ return 0;
+
+done:
+ if (q)
+ kmem_cache_free(qcache, q);
+ crypto_done(crp);
+ return 0;
+}
+
+
+#ifdef __ixp46X
+/*
+ * key processing support for the ixp465
+ */
+
+
+/*
+ * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
+ * assume zeroed and only copy bits that are significant
+ */
+
+static int
+ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
+{
+ unsigned char *src = (unsigned char *) p->crp_p;
+ unsigned char *dst;
+ int len, bits = p->crp_nbits;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
+ dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
+ bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
+ return -1;
+ }
+
+ len = (bits + 31) / 32; /* the number UINT32's needed */
+
+ dst = (unsigned char *) &buf[len];
+ dst--;
+
+ while (bits > 0) {
+ *dst-- = *src++;
+ bits -= 8;
+ }
+
+#if 0 /* no need to zero remaining bits as it is done during request alloc */
+ while (dst > (unsigned char *) buf)
+ *dst-- = '\0';
+#endif
+
+ op->pData = buf;
+ op->dataLen = len;
+ return 0;
+}
+
+/*
+ * copy out the result, be as forgiving as we can about small output buffers
+ */
+
+static int
+ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
+{
+ unsigned char *dst = (unsigned char *) p->crp_p;
+ unsigned char *src = (unsigned char *) buf;
+ int len, z, bits = p->crp_nbits;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ len = op->dataLen * sizeof(UINT32);
+
+ /* skip leading zeroes to be small buffer friendly */
+ z = 0;
+ while (z < len && src[z] == '\0')
+ z++;
+
+ src += len;
+ src--;
+ len -= z;
+
+ while (len > 0 && bits > 0) {
+ *dst++ = *src--;
+ len--;
+ bits -= 8;
+ }
+
+ while (bits > 0) {
+ *dst++ = '\0';
+ bits -= 8;
+ }
+
+ if (len > 0) {
+ dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
+ __FUNCTION__, len, z, p->crp_nbits / 8);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/*
+ * the parameter offsets for exp_mod
+ */
+
+#define IXP_PARAM_BASE 0
+#define IXP_PARAM_EXP 1
+#define IXP_PARAM_MOD 2
+#define IXP_PARAM_RES 3
+
+/*
+ * key processing complete callback, is also used to start processing
+ * by passing a NULL for pResult
+ */
+
+static void
+ixp_kperform_cb(
+ IxCryptoAccPkeEauOperation operation,
+ IxCryptoAccPkeEauOpResult *pResult,
+ BOOL carryOrBorrow,
+ IxCryptoAccStatus status)
+{
+ struct ixp_pkq *q, *tmp;
+ unsigned long flags;
+
+ dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
+ carryOrBorrow, status);
+
+ /* handle a completed request */
+ if (pResult) {
+ if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
+ q = ixp_pk_cur;
+ if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
+ dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
+ q->pkq_krp->krp_status = ERANGE; /* could do better */
+ } else {
+ /* copy out the result */
+ if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
+ &q->pkq_result, q->pkq_obuf))
+ q->pkq_krp->krp_status = ERANGE;
+ }
+ crypto_kdone(q->pkq_krp);
+ kfree(q);
+ ixp_pk_cur = NULL;
+ } else
+ printk("%s - callback with invalid result pointer\n", __FUNCTION__);
+ }
+
+ spin_lock_irqsave(&ixp_pkq_lock, flags);
+ if (ixp_pk_cur || list_empty(&ixp_pkq)) {
+ spin_unlock_irqrestore(&ixp_pkq_lock, flags);
+ return;
+ }
+
+ list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
+
+ list_del(&q->pkq_list);
+ ixp_pk_cur = q;
+
+ spin_unlock_irqrestore(&ixp_pkq_lock, flags);
+
+ status = ixCryptoAccPkeEauPerform(
+ IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
+ &q->pkq_op,
+ ixp_kperform_cb,
+ &q->pkq_result);
+
+ if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
+ dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
+ return; /* callback will return here for callback */
+ } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
+ printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
+ } else {
+ printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
+ __FUNCTION__, status);
+ }
+ q->pkq_krp->krp_status = ERANGE; /* could do better */
+ crypto_kdone(q->pkq_krp);
+ kfree(q);
+ spin_lock_irqsave(&ixp_pkq_lock, flags);
+ }
+ spin_unlock_irqrestore(&ixp_pkq_lock, flags);
+}
+
+
+static int
+ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
+{
+ struct ixp_pkq *q;
+ int rc = 0;
+ unsigned long flags;
+
+ dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
+ krp->krp_param[IXP_PARAM_BASE].crp_nbits,
+ krp->krp_param[IXP_PARAM_EXP].crp_nbits,
+ krp->krp_param[IXP_PARAM_MOD].crp_nbits,
+ krp->krp_param[IXP_PARAM_RES].crp_nbits);
+
+
+ if (krp->krp_op != CRK_MOD_EXP) {
+ krp->krp_status = EOPNOTSUPP;
+ goto err;
+ }
+
+ q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
+ if (q == NULL) {
+ krp->krp_status = ENOMEM;
+ goto err;
+ }
+
+ /*
+ * The PKE engine does not appear to zero the output buffer
+ * appropriately, so we need to do it all here.
+ */
+ memset(q, 0, sizeof(*q));
+
+ q->pkq_krp = krp;
+ INIT_LIST_HEAD(&q->pkq_list);
+
+ if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
+ q->pkq_ibuf0))
+ rc = 1;
+ if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
+ &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
+ rc = 2;
+ if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
+ &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
+ rc = 3;
+
+ if (rc) {
+ kfree(q);
+ krp->krp_status = ERANGE;
+ goto err;
+ }
+
+ q->pkq_result.pData = q->pkq_obuf;
+ q->pkq_result.dataLen =
+ (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
+
+ spin_lock_irqsave(&ixp_pkq_lock, flags);
+ list_add_tail(&q->pkq_list, &ixp_pkq);
+ spin_unlock_irqrestore(&ixp_pkq_lock, flags);
+
+ if (!ixp_pk_cur)
+ ixp_kperform_cb(0, NULL, 0, 0);
+ return (0);
+
+err:
+ crypto_kdone(krp);
+ return (0);
+}
+
+
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
+/*
+ * We run the random number generator output through SHA so that it
+ * is FIPS compliant.
+ */
+
+static volatile int sha_done = 0;
+static unsigned char sha_digest[20];
+
+static void
+ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
+{
+ dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
+ if (sha_digest != digest)
+ printk("digest error\n");
+ if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
+ sha_done = 1;
+ else
+ sha_done = -status;
+}
+
+static int
+ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
+{
+ IxCryptoAccStatus status;
+ int i, n, rc;
+
+ dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
+ memset(buf, 0, maxwords * sizeof(*buf));
+ status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
+ if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
+ dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
+ __FUNCTION__, status);
+ return 0;
+ }
+
+ /*
+ * run the random data through SHA to make it look more random
+ */
+
+ n = sizeof(sha_digest); /* process digest bytes at a time */
+
+ rc = 0;
+ for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
+ if ((maxwords - i) * sizeof(*buf) < n)
+ n = (maxwords - i) * sizeof(*buf);
+ sha_done = 0;
+ status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
+ (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
+ if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
+ dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
+ return -EIO;
+ }
+ while (!sha_done)
+ schedule();
+ if (sha_done < 0) {
+ dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
+ return 0;
+ }
+ memcpy(&buf[i], sha_digest, n);
+ rc += n / sizeof(*buf);;
+ }
+
+ return rc;
+}
+#endif /* CONFIG_OCF_RANDOMHARVEST */
+
+#endif /* __ixp46X */
+
+
+
+/*
+ * our driver startup and shutdown routines
+ */
+
+static int
+ixp_init(void)
+{
+ dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
+
+ if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
+ printk("ixCryptoAccInit failed, assuming already initialised!\n");
+
+ qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
+ SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+ , NULL
+#endif
+ );
+ if (!qcache) {
+ printk("failed to create Qcache\n");
+ return -ENOENT;
+ }
+
+ memset(&ixpdev, 0, sizeof(ixpdev));
+ softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
+
+ ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
+ CRYPTOCAP_F_HARDWARE);
+ if (ixp_id < 0)
+ panic("IXP/OCF crypto device cannot initialize!");
+
+#define REGISTER(alg) \
+ crypto_register(ixp_id,alg,0,0)
+
+ REGISTER(CRYPTO_DES_CBC);
+ REGISTER(CRYPTO_3DES_CBC);
+ REGISTER(CRYPTO_RIJNDAEL128_CBC);
+#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
+ REGISTER(CRYPTO_MD5);
+ REGISTER(CRYPTO_SHA1);
+#endif
+ REGISTER(CRYPTO_MD5_HMAC);
+ REGISTER(CRYPTO_SHA1_HMAC);
+#undef REGISTER
+
+#ifdef __ixp46X
+ spin_lock_init(&ixp_pkq_lock);
+ /*
+ * we do not enable the go fast options here as they can potentially
+ * allow timing based attacks
+ *
+ * http://www.openssl.org/news/secadv_20030219.txt
+ */
+ ixCryptoAccPkeEauExpConfig(0, 0);
+ crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
+#ifdef CONFIG_OCF_RANDOMHARVEST
+ crypto_rregister(ixp_id, ixp_read_random, NULL);
+#endif
+#endif
+
+ return 0;
+}
+
+static void
+ixp_exit(void)
+{
+ dprintk("%s()\n", __FUNCTION__);
+ crypto_unregister_all(ixp_id);
+ ixp_id = -1;
+ kmem_cache_destroy(qcache);
+ qcache = NULL;
+}
+
+module_init(ixp_init);
+module_exit(ixp_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
+MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/Makefile b/target/linux/generic/files/crypto/ocf/kirkwood/Makefile
new file mode 100644
index 000000000..6dafd0048
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/Makefile
@@ -0,0 +1,19 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_KIRKWOOD) += mv_cesa.o
+
+mv_cesa-y := cesa/mvCesa.o cesa/mvLru.o cesa/mvMD5.o cesa/mvSHA1.o cesa/AES/mvAesAlg.o cesa/AES/mvAesApi.o cesa/mvCesaDebug.o cesa_ocf_drv.o
+
+# Extra objects required by the CESA driver
+mv_cesa-y += mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.o mvHal/kw_family/boardEnv/mvBoardEnvLib.o mvHal/mv_hal/twsi/mvTwsi.o mvHal/kw_family/ctrlEnv/sys/mvCpuIf.o mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.o mvHal/kw_family/ctrlEnv/sys/mvSysDram.o mvHal/linux_oss/mvOs.o mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.o mvHal/mv_hal/gpp/mvGpp.o mvHal/kw_family/ctrlEnv/sys/mvSysPex.o mvHal/mv_hal/pex/mvPex.o mvHal/kw_family/boardEnv/mvBoardEnvSpec.o mvHal/common/mvCommon.o mvHal/common/mvDebug.o mvHal/kw_family/ctrlEnv/sys/mvSysCesa.o
+
+ifdef src
+EXTRA_CFLAGS += -I$(src)/.. -I$(src)/cesa -I$(src)/mvHal -I$(src)/mvHal/common -I$(src)/mvHal/kw_family -I$(src)/mvHal/mv_hal -I$(src)/mvHal/linux_oss -I$(src)
+endif
+
+EXTRA_CFLAGS += -DMV_LINUX -DMV_CPU_LE -DMV_ARM -DMV_INCLUDE_CESA -DMV_INCLUDE_PEX -DMV_CACHE_COHERENCY=3
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAes.h b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAes.h
new file mode 100644
index 000000000..07a8601f8
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAes.h
@@ -0,0 +1,62 @@
+/* mvAes.h v2.0 August '99
+ * Reference ANSI C code
+ */
+
+/* AES Cipher header file for ANSI C Submissions
+ Lawrence E. Bassham III
+ Computer Security Division
+ National Institute of Standards and Technology
+
+ April 15, 1998
+
+ This sample is to assist implementers developing to the Cryptographic
+API Profile for AES Candidate Algorithm Submissions. Please consult this
+document as a cross-reference.
+
+ ANY CHANGES, WHERE APPROPRIATE, TO INFORMATION PROVIDED IN THIS FILE
+MUST BE DOCUMENTED. CHANGES ARE ONLY APPROPRIATE WHERE SPECIFIED WITH
+THE STRING "CHANGE POSSIBLE". FUNCTION CALLS AND THEIR PARAMETERS CANNOT
+BE CHANGED. STRUCTURES CAN BE ALTERED TO ALLOW IMPLEMENTERS TO INCLUDE
+IMPLEMENTATION SPECIFIC INFORMATION.
+*/
+
+/* Includes:
+ Standard include files
+*/
+
+#include "mvOs.h"
+
+
+/* Error Codes - CHANGE POSSIBLE: inclusion of additional error codes */
+
+/* Key direction is invalid, e.g., unknown value */
+#define AES_BAD_KEY_DIR -1
+
+/* Key material not of correct length */
+#define AES_BAD_KEY_MAT -2
+
+/* Key passed is not valid */
+#define AES_BAD_KEY_INSTANCE -3
+
+/* Params struct passed to cipherInit invalid */
+#define AES_BAD_CIPHER_MODE -4
+
+/* Cipher in wrong state (e.g., not initialized) */
+#define AES_BAD_CIPHER_STATE -5
+
+#define AES_BAD_CIPHER_INSTANCE -7
+
+
+/* Function protoypes */
+/* CHANGED: makeKey(): parameter blockLen added
+ this parameter is absolutely necessary if you want to
+ setup the round keys in a variable block length setting
+ cipherInit(): parameter blockLen added (for obvious reasons)
+ */
+int aesMakeKey(MV_U8 *expandedKey, MV_U8 *keyMaterial, int keyLen, int blockLen);
+int aesBlockEncrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int keyLen,
+ MV_U32 *plain, int numBlocks, MV_U32 *cipher);
+int aesBlockDecrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int keyLen,
+ MV_U32 *plain, int numBlocks, MV_U32 *cipher);
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c
new file mode 100644
index 000000000..a65dc2893
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c
@@ -0,0 +1,317 @@
+/* rijndael-alg-ref.c v2.0 August '99
+ * Reference ANSI C code
+ * authors: Paulo Barreto
+ * Vincent Rijmen, K.U.Leuven
+ *
+ * This code is placed in the public domain.
+ */
+
+#include "mvOs.h"
+
+#include "mvAesAlg.h"
+
+#include "mvAesBoxes.dat"
+
+
+MV_U8 mul1(MV_U8 aa, MV_U8 bb);
+void KeyAddition(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC], MV_U8 BC);
+void ShiftRow128Enc(MV_U8 a[4][MAXBC]);
+void ShiftRow128Dec(MV_U8 a[4][MAXBC]);
+void Substitution(MV_U8 a[4][MAXBC], MV_U8 box[256]);
+void MixColumn(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC]);
+void InvMixColumn(MV_U8 a[4][MAXBC]);
+
+
+#define mul(aa, bb) (mask[bb] & Alogtable[aa + Logtable[bb]])
+
+MV_U8 mul1(MV_U8 aa, MV_U8 bb)
+{
+ return mask[bb] & Alogtable[aa + Logtable[bb]];
+}
+
+
+void KeyAddition(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC], MV_U8 BC)
+{
+ /* Exor corresponding text input and round key input bytes
+ */
+ ((MV_U32*)(&(a[0][0])))[0] ^= ((MV_U32*)(&(rk[0][0])))[0];
+ ((MV_U32*)(&(a[1][0])))[0] ^= ((MV_U32*)(&(rk[1][0])))[0];
+ ((MV_U32*)(&(a[2][0])))[0] ^= ((MV_U32*)(&(rk[2][0])))[0];
+ ((MV_U32*)(&(a[3][0])))[0] ^= ((MV_U32*)(&(rk[3][0])))[0];
+
+}
+
+void ShiftRow128Enc(MV_U8 a[4][MAXBC]) {
+ /* Row 0 remains unchanged
+ * The other three rows are shifted a variable amount
+ */
+ MV_U8 tmp[MAXBC];
+
+ tmp[0] = a[1][1];
+ tmp[1] = a[1][2];
+ tmp[2] = a[1][3];
+ tmp[3] = a[1][0];
+
+ ((MV_U32*)(&(a[1][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+ /*
+ a[1][0] = tmp[0];
+ a[1][1] = tmp[1];
+ a[1][2] = tmp[2];
+ a[1][3] = tmp[3];
+ */
+ tmp[0] = a[2][2];
+ tmp[1] = a[2][3];
+ tmp[2] = a[2][0];
+ tmp[3] = a[2][1];
+
+ ((MV_U32*)(&(a[2][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+ /*
+ a[2][0] = tmp[0];
+ a[2][1] = tmp[1];
+ a[2][2] = tmp[2];
+ a[2][3] = tmp[3];
+ */
+ tmp[0] = a[3][3];
+ tmp[1] = a[3][0];
+ tmp[2] = a[3][1];
+ tmp[3] = a[3][2];
+
+ ((MV_U32*)(&(a[3][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+ /*
+ a[3][0] = tmp[0];
+ a[3][1] = tmp[1];
+ a[3][2] = tmp[2];
+ a[3][3] = tmp[3];
+ */
+}
+
+void ShiftRow128Dec(MV_U8 a[4][MAXBC]) {
+ /* Row 0 remains unchanged
+ * The other three rows are shifted a variable amount
+ */
+ MV_U8 tmp[MAXBC];
+
+ tmp[0] = a[1][3];
+ tmp[1] = a[1][0];
+ tmp[2] = a[1][1];
+ tmp[3] = a[1][2];
+
+ ((MV_U32*)(&(a[1][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+ /*
+ a[1][0] = tmp[0];
+ a[1][1] = tmp[1];
+ a[1][2] = tmp[2];
+ a[1][3] = tmp[3];
+ */
+
+ tmp[0] = a[2][2];
+ tmp[1] = a[2][3];
+ tmp[2] = a[2][0];
+ tmp[3] = a[2][1];
+
+ ((MV_U32*)(&(a[2][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+ /*
+ a[2][0] = tmp[0];
+ a[2][1] = tmp[1];
+ a[2][2] = tmp[2];
+ a[2][3] = tmp[3];
+ */
+
+ tmp[0] = a[3][1];
+ tmp[1] = a[3][2];
+ tmp[2] = a[3][3];
+ tmp[3] = a[3][0];
+
+ ((MV_U32*)(&(a[3][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+ /*
+ a[3][0] = tmp[0];
+ a[3][1] = tmp[1];
+ a[3][2] = tmp[2];
+ a[3][3] = tmp[3];
+ */
+}
+
+void Substitution(MV_U8 a[4][MAXBC], MV_U8 box[256]) {
+ /* Replace every byte of the input by the byte at that place
+ * in the nonlinear S-box
+ */
+ int i, j;
+
+ for(i = 0; i < 4; i++)
+ for(j = 0; j < 4; j++) a[i][j] = box[a[i][j]] ;
+}
+
+void MixColumn(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC]) {
+ /* Mix the four bytes of every column in a linear way
+ */
+ MV_U8 b[4][MAXBC];
+ int i, j;
+
+ for(j = 0; j < 4; j++){
+ b[0][j] = mul(25,a[0][j]) ^ mul(1,a[1][j]) ^ a[2][j] ^ a[3][j];
+ b[1][j] = mul(25,a[1][j]) ^ mul(1,a[2][j]) ^ a[3][j] ^ a[0][j];
+ b[2][j] = mul(25,a[2][j]) ^ mul(1,a[3][j]) ^ a[0][j] ^ a[1][j];
+ b[3][j] = mul(25,a[3][j]) ^ mul(1,a[0][j]) ^ a[1][j] ^ a[2][j];
+ }
+ for(i = 0; i < 4; i++)
+ /*for(j = 0; j < BC; j++) a[i][j] = b[i][j];*/
+ ((MV_U32*)(&(a[i][0])))[0] = ((MV_U32*)(&(b[i][0])))[0] ^ ((MV_U32*)(&(rk[i][0])))[0];;
+}
+
+void InvMixColumn(MV_U8 a[4][MAXBC]) {
+ /* Mix the four bytes of every column in a linear way
+ * This is the opposite operation of Mixcolumn
+ */
+ MV_U8 b[4][MAXBC];
+ int i, j;
+
+ for(j = 0; j < 4; j++){
+ b[0][j] = mul(223,a[0][j]) ^ mul(104,a[1][j]) ^ mul(238,a[2][j]) ^ mul(199,a[3][j]);
+ b[1][j] = mul(223,a[1][j]) ^ mul(104,a[2][j]) ^ mul(238,a[3][j]) ^ mul(199,a[0][j]);
+ b[2][j] = mul(223,a[2][j]) ^ mul(104,a[3][j]) ^ mul(238,a[0][j]) ^ mul(199,a[1][j]);
+ b[3][j] = mul(223,a[3][j]) ^ mul(104,a[0][j]) ^ mul(238,a[1][j]) ^ mul(199,a[2][j]);
+ }
+ for(i = 0; i < 4; i++)
+ /*for(j = 0; j < BC; j++) a[i][j] = b[i][j];*/
+ ((MV_U32*)(&(a[i][0])))[0] = ((MV_U32*)(&(b[i][0])))[0];
+}
+
+int rijndaelKeySched (MV_U8 k[4][MAXKC], int keyBits, int blockBits, MV_U8 W[MAXROUNDS+1][4][MAXBC])
+{
+ /* Calculate the necessary round keys
+ * The number of calculations depends on keyBits and blockBits
+ */
+ int KC, BC, ROUNDS;
+ int i, j, t, rconpointer = 0;
+ MV_U8 tk[4][MAXKC];
+
+ switch (keyBits) {
+ case 128: KC = 4; break;
+ case 192: KC = 6; break;
+ case 256: KC = 8; break;
+ default : return (-1);
+ }
+
+ switch (blockBits) {
+ case 128: BC = 4; break;
+ case 192: BC = 6; break;
+ case 256: BC = 8; break;
+ default : return (-2);
+ }
+
+ switch (keyBits >= blockBits ? keyBits : blockBits) {
+ case 128: ROUNDS = 10; break;
+ case 192: ROUNDS = 12; break;
+ case 256: ROUNDS = 14; break;
+ default : return (-3); /* this cannot happen */
+ }
+
+
+ for(j = 0; j < KC; j++)
+ for(i = 0; i < 4; i++)
+ tk[i][j] = k[i][j];
+ t = 0;
+ /* copy values into round key array */
+ for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
+ for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
+
+ while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
+ /* calculate new values */
+ for(i = 0; i < 4; i++)
+ tk[i][0] ^= S[tk[(i+1)%4][KC-1]];
+ tk[0][0] ^= rcon[rconpointer++];
+
+ if (KC != 8)
+ for(j = 1; j < KC; j++)
+ for(i = 0; i < 4; i++) tk[i][j] ^= tk[i][j-1];
+ else {
+ for(j = 1; j < KC/2; j++)
+ for(i = 0; i < 4; i++) tk[i][j] ^= tk[i][j-1];
+ for(i = 0; i < 4; i++) tk[i][KC/2] ^= S[tk[i][KC/2 - 1]];
+ for(j = KC/2 + 1; j < KC; j++)
+ for(i = 0; i < 4; i++) tk[i][j] ^= tk[i][j-1];
+ }
+ /* copy values into round key array */
+ for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
+ for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
+ }
+
+ return 0;
+}
+
+
+
+int rijndaelEncrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS+1][4][MAXBC], int rounds)
+{
+ /* Encryption of one block.
+ */
+ int r, BC, ROUNDS;
+
+ BC = 4;
+ ROUNDS = rounds;
+
+ /* begin with a key addition
+ */
+
+ KeyAddition(a,rk[0],BC);
+
+ /* ROUNDS-1 ordinary rounds
+ */
+ for(r = 1; r < ROUNDS; r++) {
+ Substitution(a,S);
+ ShiftRow128Enc(a);
+ MixColumn(a, rk[r]);
+ /*KeyAddition(a,rk[r],BC);*/
+ }
+
+ /* Last round is special: there is no MixColumn
+ */
+ Substitution(a,S);
+ ShiftRow128Enc(a);
+ KeyAddition(a,rk[ROUNDS],BC);
+
+ return 0;
+}
+
+
+int rijndaelDecrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS+1][4][MAXBC], int rounds)
+{
+ int r, BC, ROUNDS;
+
+ BC = 4;
+ ROUNDS = rounds;
+
+ /* To decrypt: apply the inverse operations of the encrypt routine,
+ * in opposite order
+ *
+ * (KeyAddition is an involution: it 's equal to its inverse)
+ * (the inverse of Substitution with table S is Substitution with the inverse table of S)
+ * (the inverse of Shiftrow is Shiftrow over a suitable distance)
+ */
+
+ /* First the special round:
+ * without InvMixColumn
+ * with extra KeyAddition
+ */
+ KeyAddition(a,rk[ROUNDS],BC);
+ ShiftRow128Dec(a);
+ Substitution(a,Si);
+
+ /* ROUNDS-1 ordinary rounds
+ */
+ for(r = ROUNDS-1; r > 0; r--) {
+ KeyAddition(a,rk[r],BC);
+ InvMixColumn(a);
+ ShiftRow128Dec(a);
+ Substitution(a,Si);
+
+ }
+
+ /* End with the extra key addition
+ */
+
+ KeyAddition(a,rk[0],BC);
+
+ return 0;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h
new file mode 100644
index 000000000..ec81e403f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h
@@ -0,0 +1,19 @@
+/* rijndael-alg-ref.h v2.0 August '99
+ * Reference ANSI C code
+ * authors: Paulo Barreto
+ * Vincent Rijmen, K.U.Leuven
+ */
+#ifndef __RIJNDAEL_ALG_H
+#define __RIJNDAEL_ALG_H
+
+#define MAXBC (128/32)
+#define MAXKC (256/32)
+#define MAXROUNDS 14
+
+
+int rijndaelKeySched (MV_U8 k[4][MAXKC], int keyBits, int blockBits, MV_U8 rk[MAXROUNDS+1][4][MAXBC]);
+
+int rijndaelEncrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS+1][4][MAXBC], int rounds);
+int rijndaelDecrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS+1][4][MAXBC], int rounds);
+
+#endif /* __RIJNDAEL_ALG_H */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesApi.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesApi.c
new file mode 100644
index 000000000..b432dc6e6
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesApi.c
@@ -0,0 +1,312 @@
+/* rijndael-api-ref.c v2.1 April 2000
+ * Reference ANSI C code
+ * authors: v2.0 Paulo Barreto
+ * Vincent Rijmen, K.U.Leuven
+ * v2.1 Vincent Rijmen, K.U.Leuven
+ *
+ * This code is placed in the public domain.
+ */
+#include "mvOs.h"
+
+#include "mvAes.h"
+#include "mvAesAlg.h"
+
+
+/* Defines:
+ Add any additional defines you need
+*/
+
+#define MODE_ECB 1 /* Are we ciphering in ECB mode? */
+#define MODE_CBC 2 /* Are we ciphering in CBC mode? */
+#define MODE_CFB1 3 /* Are we ciphering in 1-bit CFB mode? */
+
+
+int aesMakeKey(MV_U8 *expandedKey, MV_U8 *keyMaterial, int keyLen, int blockLen)
+{
+ MV_U8 W[MAXROUNDS+1][4][MAXBC];
+ MV_U8 k[4][MAXKC];
+ MV_U8 j;
+ int i, rounds, KC;
+
+ if (expandedKey == NULL)
+ {
+ return AES_BAD_KEY_INSTANCE;
+ }
+
+ if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
+ {
+ return AES_BAD_KEY_MAT;
+ }
+
+ if (keyMaterial == NULL)
+ {
+ return AES_BAD_KEY_MAT;
+ }
+
+ /* initialize key schedule: */
+ for(i=0; i<keyLen/8; i++)
+ {
+ j = keyMaterial[i];
+ k[i % 4][i / 4] = j;
+ }
+
+ rijndaelKeySched (k, keyLen, blockLen, W);
+#ifdef MV_AES_DEBUG
+ {
+ MV_U8* pW = &W[0][0][0];
+ int x;
+
+ mvOsPrintf("Expended Key: size = %d\n", sizeof(W));
+ for(i=0; i<sizeof(W); i++)
+ {
+ mvOsPrintf("%02x ", pW[i]);
+ }
+ for(i=0; i<MAXROUNDS+1; i++)
+ {
+ mvOsPrintf("\n Round #%02d: ", i);
+ for(x=0; x<MAXBC; x++)
+ {
+ mvOsPrintf("%02x%02x%02x%02x ",
+ W[i][0][x], W[i][1][x], W[i][2][x], W[i][3][x]);
+ }
+ mvOsPrintf("\n");
+ }
+ }
+#endif /* MV_AES_DEBUG */
+ switch (keyLen)
+ {
+ case 128:
+ rounds = 10;
+ KC = 4;
+ break;
+ case 192:
+ rounds = 12;
+ KC = 6;
+ break;
+ case 256:
+ rounds = 14;
+ KC = 8;
+ break;
+ default :
+ return (-1);
+ }
+
+ for(i=0; i<MAXBC; i++)
+ {
+ for(j=0; j<4; j++)
+ {
+ expandedKey[i*4+j] = W[rounds][j][i];
+ }
+ }
+ for(; i<KC; i++)
+ {
+ for(j=0; j<4; j++)
+ {
+ expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
+ }
+ }
+
+
+ return 0;
+}
+
+int aesBlockEncrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int keyLen,
+ MV_U32 *plain, int numBlocks, MV_U32 *cipher)
+{
+ int i, j, t;
+ MV_U8 block[4][MAXBC];
+ int rounds;
+ char *input, *outBuffer;
+
+ input = (char*)plain;
+ outBuffer = (char*)cipher;
+
+ /* check parameter consistency: */
+ if( (expandedKey == NULL) || ((keyLen != 128) && (keyLen != 192) && (keyLen != 256)))
+ {
+ return AES_BAD_KEY_MAT;
+ }
+ if ((mode != MODE_ECB && mode != MODE_CBC))
+ {
+ return AES_BAD_CIPHER_STATE;
+ }
+
+ switch (keyLen)
+ {
+ case 128: rounds = 10; break;
+ case 192: rounds = 12; break;
+ case 256: rounds = 14; break;
+ default : return (-3); /* this cannot happen */
+ }
+
+
+ switch (mode)
+ {
+ case MODE_ECB:
+ for (i = 0; i < numBlocks; i++)
+ {
+ for (j = 0; j < 4; j++)
+ {
+ for(t = 0; t < 4; t++)
+ /* parse input stream into rectangular array */
+ block[t][j] = input[16*i+4*j+t] & 0xFF;
+ }
+ rijndaelEncrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+ for (j = 0; j < 4; j++)
+ {
+ /* parse rectangular array into output ciphertext bytes */
+ for(t = 0; t < 4; t++)
+ outBuffer[16*i+4*j+t] = (MV_U8) block[t][j];
+
+ }
+ }
+ break;
+
+ case MODE_CBC:
+ for (j = 0; j < 4; j++)
+ {
+ for(t = 0; t < 4; t++)
+ /* parse initial value into rectangular array */
+ block[t][j] = IV[t+4*j] & 0xFF;
+ }
+ for (i = 0; i < numBlocks; i++)
+ {
+ for (j = 0; j < 4; j++)
+ {
+ for(t = 0; t < 4; t++)
+ /* parse input stream into rectangular array and exor with
+ IV or the previous ciphertext */
+ block[t][j] ^= input[16*i+4*j+t] & 0xFF;
+ }
+ rijndaelEncrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+ for (j = 0; j < 4; j++)
+ {
+ /* parse rectangular array into output ciphertext bytes */
+ for(t = 0; t < 4; t++)
+ outBuffer[16*i+4*j+t] = (MV_U8) block[t][j];
+ }
+ }
+ break;
+
+ default: return AES_BAD_CIPHER_STATE;
+ }
+
+ return 0;
+}
+
+int aesBlockDecrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int keyLen,
+ MV_U32 *srcData, int numBlocks, MV_U32 *dstData)
+{
+ int i, j, t;
+ MV_U8 block[4][MAXBC];
+ MV_U8 iv[4][MAXBC];
+ int rounds;
+ char *input, *outBuffer;
+
+ input = (char*)srcData;
+ outBuffer = (char*)dstData;
+
+ if (expandedKey == NULL)
+ {
+ return AES_BAD_KEY_MAT;
+ }
+
+ /* check parameter consistency: */
+ if (keyLen != 128 && keyLen != 192 && keyLen != 256)
+ {
+ return AES_BAD_KEY_MAT;
+ }
+ if ((mode != MODE_ECB && mode != MODE_CBC))
+ {
+ return AES_BAD_CIPHER_STATE;
+ }
+
+ switch (keyLen)
+ {
+ case 128: rounds = 10; break;
+ case 192: rounds = 12; break;
+ case 256: rounds = 14; break;
+ default : return (-3); /* this cannot happen */
+ }
+
+
+ switch (mode)
+ {
+ case MODE_ECB:
+ for (i = 0; i < numBlocks; i++)
+ {
+ for (j = 0; j < 4; j++)
+ {
+ for(t = 0; t < 4; t++)
+ {
+ /* parse input stream into rectangular array */
+ block[t][j] = input[16*i+4*j+t] & 0xFF;
+ }
+ }
+ rijndaelDecrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+ for (j = 0; j < 4; j++)
+ {
+ /* parse rectangular array into output ciphertext bytes */
+ for(t = 0; t < 4; t++)
+ outBuffer[16*i+4*j+t] = (MV_U8) block[t][j];
+ }
+ }
+ break;
+
+ case MODE_CBC:
+ /* first block */
+ for (j = 0; j < 4; j++)
+ {
+ for(t = 0; t < 4; t++)
+ {
+ /* parse input stream into rectangular array */
+ block[t][j] = input[4*j+t] & 0xFF;
+ iv[t][j] = block[t][j];
+ }
+ }
+ rijndaelDecrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+
+ for (j = 0; j < 4; j++)
+ {
+ /* exor the IV and parse rectangular array into output ciphertext bytes */
+ for(t = 0; t < 4; t++)
+ {
+ outBuffer[4*j+t] = (MV_U8) (block[t][j] ^ IV[t+4*j]);
+ IV[t+4*j] = iv[t][j];
+ }
+ }
+
+ /* next blocks */
+ for (i = 1; i < numBlocks; i++)
+ {
+ for (j = 0; j < 4; j++)
+ {
+ for(t = 0; t < 4; t++)
+ {
+ /* parse input stream into rectangular array */
+ iv[t][j] = input[16*i+4*j+t] & 0xFF;
+ block[t][j] = iv[t][j];
+ }
+ }
+ rijndaelDecrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+
+ for (j = 0; j < 4; j++)
+ {
+ /* exor previous ciphertext block and parse rectangular array
+ into output ciphertext bytes */
+ for(t = 0; t < 4; t++)
+ {
+ outBuffer[16*i+4*j+t] = (MV_U8) (block[t][j] ^ IV[t+4*j]);
+ IV[t+4*j] = iv[t][j];
+ }
+ }
+ }
+ break;
+
+ default: return AES_BAD_CIPHER_STATE;
+ }
+
+ return 0;
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat
new file mode 100644
index 000000000..4011b187b
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat
@@ -0,0 +1,123 @@
+static MV_U8 mask[256] = {
+0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+};
+
+static MV_U8 Logtable[256] = {
+ 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
+100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
+125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
+101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
+150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
+102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
+126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
+ 43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
+175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
+ 44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
+127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
+204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
+151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
+ 83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
+ 68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
+103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7,
+};
+
+static MV_U8 Alogtable[512] = {
+ 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
+ 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
+229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
+ 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
+ 76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
+131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
+181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
+254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
+251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
+195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
+159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
+155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
+252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
+ 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
+ 18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
+ 57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1,
+
+ 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
+ 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
+229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
+ 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
+ 76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
+131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
+181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
+254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
+251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
+195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
+159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
+155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
+252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
+ 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
+ 18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
+ 57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1,
+
+};
+
+static MV_U8 S[256] = {
+ 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118,
+202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192,
+183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21,
+ 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117,
+ 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132,
+ 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207,
+208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168,
+ 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210,
+205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115,
+ 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219,
+224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121,
+231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8,
+186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138,
+112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158,
+225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
+140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22,
+};
+
+static MV_U8 Si[256] = {
+ 82, 9, 106, 213, 48, 54, 165, 56, 191, 64, 163, 158, 129, 243, 215, 251,
+124, 227, 57, 130, 155, 47, 255, 135, 52, 142, 67, 68, 196, 222, 233, 203,
+ 84, 123, 148, 50, 166, 194, 35, 61, 238, 76, 149, 11, 66, 250, 195, 78,
+ 8, 46, 161, 102, 40, 217, 36, 178, 118, 91, 162, 73, 109, 139, 209, 37,
+114, 248, 246, 100, 134, 104, 152, 22, 212, 164, 92, 204, 93, 101, 182, 146,
+108, 112, 72, 80, 253, 237, 185, 218, 94, 21, 70, 87, 167, 141, 157, 132,
+144, 216, 171, 0, 140, 188, 211, 10, 247, 228, 88, 5, 184, 179, 69, 6,
+208, 44, 30, 143, 202, 63, 15, 2, 193, 175, 189, 3, 1, 19, 138, 107,
+ 58, 145, 17, 65, 79, 103, 220, 234, 151, 242, 207, 206, 240, 180, 230, 115,
+150, 172, 116, 34, 231, 173, 53, 133, 226, 249, 55, 232, 28, 117, 223, 110,
+ 71, 241, 26, 113, 29, 41, 197, 137, 111, 183, 98, 14, 170, 24, 190, 27,
+252, 86, 62, 75, 198, 210, 121, 32, 154, 219, 192, 254, 120, 205, 90, 244,
+ 31, 221, 168, 51, 136, 7, 199, 49, 177, 18, 16, 89, 39, 128, 236, 95,
+ 96, 81, 127, 169, 25, 181, 74, 13, 45, 229, 122, 159, 147, 201, 156, 239,
+160, 224, 59, 77, 174, 42, 245, 176, 200, 235, 187, 60, 131, 83, 153, 97,
+ 23, 43, 4, 126, 186, 119, 214, 38, 225, 105, 20, 99, 85, 33, 12, 125,
+};
+
+/*
+static MV_U8 iG[4][4] = {
+{0x0e, 0x09, 0x0d, 0x0b},
+{0x0b, 0x0e, 0x09, 0x0d},
+{0x0d, 0x0b, 0x0e, 0x09},
+{0x09, 0x0d, 0x0b, 0x0e},
+};
+*/
+static MV_U32 rcon[30] = {
+ 0x01,0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, };
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.c
new file mode 100644
index 000000000..17ab086f0
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.c
@@ -0,0 +1,3126 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "cesa/mvCesa.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#undef CESA_DEBUG
+
+
+/********** Global variables **********/
+
+/* If request size is more than MV_CESA_MAX_BUF_SIZE the
+ * request is processed as fragmented request.
+ */
+
+MV_CESA_STATS cesaStats;
+
+MV_BUF_INFO cesaSramSaBuf;
+short cesaLastSid = -1;
+MV_CESA_SA* pCesaSAD = NULL;
+MV_U16 cesaMaxSA = 0;
+
+MV_CESA_REQ* pCesaReqFirst = NULL;
+MV_CESA_REQ* pCesaReqLast = NULL;
+MV_CESA_REQ* pCesaReqEmpty = NULL;
+MV_CESA_REQ* pCesaReqProcess = NULL;
+int cesaQueueDepth = 0;
+int cesaReqResources = 0;
+
+MV_CESA_SRAM_MAP* cesaSramVirtPtr = NULL;
+MV_U32 cesaCryptEngBase = 0;
+void *cesaOsHandle = NULL;
+#if (MV_CESA_VERSION >= 3)
+MV_U32 cesaChainLength = 0;
+int chainReqNum = 0;
+MV_U32 chainIndex = 0;
+MV_CESA_REQ* pNextActiveChain = 0;
+MV_CESA_REQ* pEndCurrChain = 0;
+MV_BOOL isFirstReq = MV_TRUE;
+#endif
+
+static INLINE MV_U8* mvCesaSramAddrGet(void)
+{
+#ifdef MV_CESA_NO_SRAM
+ return (MV_U8*)cesaSramVirtPtr;
+#else
+ return (MV_U8*)cesaCryptEngBase;
+#endif /* MV_CESA_NO_SRAM */
+}
+
+static INLINE MV_ULONG mvCesaSramVirtToPhys(void* pDev, MV_U8* pSramVirt)
+{
+#ifdef MV_CESA_NO_SRAM
+ return (MV_ULONG)mvOsIoVirtToPhy(NULL, pSramVirt);
+#else
+ return (MV_ULONG)pSramVirt;
+#endif /* MV_CESA_NO_SRAM */
+}
+
+/* Internal Function prototypes */
+
+static INLINE void mvCesaSramDescrBuild(MV_U32 config, int frag,
+ int cryptoOffset, int ivOffset, int cryptoLength,
+ int macOffset, int digestOffset, int macLength, int macTotalLen,
+ MV_CESA_REQ *pCesaReq, MV_DMA_DESC* pDmaDesc);
+
+static INLINE void mvCesaSramSaUpdate(short sid, MV_DMA_DESC *pDmaDesc);
+
+static INLINE int mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf,
+ MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf,
+ int offset, int copySize, MV_BOOL skipFlush);
+
+static void mvCesaHmacIvGet(MV_CESA_MAC_MODE macMode, unsigned char key[], int keyLength,
+ unsigned char innerIV[], unsigned char outerIV[]);
+
+static MV_STATUS mvCesaFragAuthComplete(MV_CESA_REQ* pReq, MV_CESA_SA* pSA,
+ int macDataSize);
+
+static MV_CESA_COMMAND* mvCesaCtrModeInit(void);
+
+static MV_STATUS mvCesaCtrModePrepare(MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd);
+static MV_STATUS mvCesaCtrModeComplete(MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd);
+static void mvCesaCtrModeFinish(MV_CESA_COMMAND *pCmd);
+
+static INLINE MV_STATUS mvCesaReqProcess(MV_CESA_REQ* pReq);
+static MV_STATUS mvCesaFragReqProcess(MV_CESA_REQ* pReq, MV_U8 frag);
+
+static INLINE MV_STATUS mvCesaParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, MV_U8* pFixOffset);
+static INLINE MV_STATUS mvCesaFragParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd);
+
+static INLINE void mvCesaFragSizeFind(MV_CESA_SA* pSA, MV_CESA_REQ* pReq,
+ int cryptoOffset, int macOffset,
+ int* pCopySize, int* pCryptoDataSize, int* pMacDataSize);
+static MV_STATUS mvCesaMbufCacheUnmap(MV_CESA_MBUF* pMbuf, int offset, int size);
+
+
+/* Go to the next request in the request queue */
+static INLINE MV_CESA_REQ* MV_CESA_REQ_NEXT_PTR(MV_CESA_REQ* pReq)
+{
+ if(pReq == pCesaReqLast)
+ return pCesaReqFirst;
+
+ return pReq+1;
+}
+
+#if (MV_CESA_VERSION >= 3)
+/* Go to the previous request in the request queue */
+static INLINE MV_CESA_REQ* MV_CESA_REQ_PREV_PTR(MV_CESA_REQ* pReq)
+{
+ if(pReq == pCesaReqFirst)
+ return pCesaReqLast;
+
+ return pReq-1;
+}
+
+#endif
+
+
+static INLINE void mvCesaReqProcessStart(MV_CESA_REQ* pReq)
+{
+ int frag;
+
+#if (MV_CESA_VERSION >= 3)
+ pReq->state = MV_CESA_CHAIN;
+#else
+ pReq->state = MV_CESA_PROCESS;
+#endif
+ cesaStats.startCount++;
+
+ if(pReq->fragMode == MV_CESA_FRAG_NONE)
+ {
+ frag = 0;
+ }
+ else
+ {
+ frag = pReq->frags.nextFrag;
+ pReq->frags.nextFrag++;
+ }
+#if (MV_CESA_VERSION >= 2)
+ /* Enable TDMA engine */
+ MV_REG_WRITE(MV_CESA_TDMA_CURR_DESC_PTR_REG, 0);
+ MV_REG_WRITE(MV_CESA_TDMA_NEXT_DESC_PTR_REG,
+ (MV_U32)mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst));
+#else
+ /* Enable IDMA engine */
+ MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(0), 0);
+ MV_REG_WRITE(IDMA_NEXT_DESC_PTR_REG(0),
+ (MV_U32)mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst));
+#endif /* MV_CESA_VERSION >= 2 */
+
+#if defined(MV_BRIDGE_SYNC_REORDER)
+ mvOsBridgeReorderWA();
+#endif
+
+ /* Start Accelerator */
+ MV_REG_WRITE(MV_CESA_CMD_REG, MV_CESA_CMD_CHAN_ENABLE_MASK);
+}
+
+
+/*******************************************************************************
+* mvCesaHalInit - Initialize the CESA driver
+*
+* DESCRIPTION:
+* This function initialize the CESA driver.
+* 1) Session database
+* 2) Request queue
+* 4) DMA descriptor lists - one list per request. Each list
+* has MV_CESA_MAX_DMA_DESC descriptors.
+*
+* INPUT:
+* numOfSession - maximum number of supported sessions
+* queueDepth - number of elements in the request queue.
+* pSramBase - virtual address of Sram
+* osHandle - A handle used by the OS to allocate memory for the
+* module (Passed to the OS Services layer)
+*
+* RETURN:
+* MV_OK - Success
+* MV_NO_RESOURCE - Fail, can't allocate resources:
+* Session database, request queue,
+* DMA descriptors list, LRU cache database.
+* MV_NOT_ALIGNED - Sram base address is not 8 byte aligned.
+*
+*******************************************************************************/
+MV_STATUS mvCesaHalInit (int numOfSession, int queueDepth, char* pSramBase, MV_U32 cryptEngBase,
+ void *osHandle)
+{
+ int i, req;
+ MV_U32 descOffsetReg, configReg;
+ MV_CESA_SRAM_SA *pSramSA;
+
+
+ mvOsPrintf("mvCesaInit: sessions=%d, queue=%d, pSram=%p\n",
+ numOfSession, queueDepth, pSramBase);
+
+ cesaOsHandle = osHandle;
+ /* Create Session database */
+ pCesaSAD = mvOsMalloc(sizeof(MV_CESA_SA)*numOfSession);
+ if(pCesaSAD == NULL)
+ {
+ mvOsPrintf("mvCesaInit: Can't allocate %u bytes for %d SAs\n",
+ sizeof(MV_CESA_SA)*numOfSession, numOfSession);
+ mvCesaFinish();
+ return MV_NO_RESOURCE;
+ }
+ memset(pCesaSAD, 0, sizeof(MV_CESA_SA)*numOfSession);
+ cesaMaxSA = numOfSession;
+
+ /* Allocate imag of sramSA in the DRAM */
+ cesaSramSaBuf.bufSize = sizeof(MV_CESA_SRAM_SA)*numOfSession +
+ CPU_D_CACHE_LINE_SIZE;
+
+ cesaSramSaBuf.bufVirtPtr = mvOsIoCachedMalloc(osHandle,cesaSramSaBuf.bufSize,
+ &cesaSramSaBuf.bufPhysAddr,
+ &cesaSramSaBuf.memHandle);
+
+ if(cesaSramSaBuf.bufVirtPtr == NULL)
+ {
+ mvOsPrintf("mvCesaInit: Can't allocate %d bytes for sramSA structures\n",
+ cesaSramSaBuf.bufSize);
+ mvCesaFinish();
+ return MV_NO_RESOURCE;
+ }
+ memset(cesaSramSaBuf.bufVirtPtr, 0, cesaSramSaBuf.bufSize);
+ pSramSA = (MV_CESA_SRAM_SA*)MV_ALIGN_UP((MV_ULONG)cesaSramSaBuf.bufVirtPtr,
+ CPU_D_CACHE_LINE_SIZE);
+ for(i=0; i<numOfSession; i++)
+ {
+ pCesaSAD[i].pSramSA = &pSramSA[i];
+ }
+
+ /* Create request queue */
+ pCesaReqFirst = mvOsMalloc(sizeof(MV_CESA_REQ)*queueDepth);
+ if(pCesaReqFirst == NULL)
+ {
+ mvOsPrintf("mvCesaInit: Can't allocate %u bytes for %d requests\n",
+ sizeof(MV_CESA_REQ)*queueDepth, queueDepth);
+ mvCesaFinish();
+ return MV_NO_RESOURCE;
+ }
+ memset(pCesaReqFirst, 0, sizeof(MV_CESA_REQ)*queueDepth);
+ pCesaReqEmpty = pCesaReqFirst;
+ pCesaReqLast = pCesaReqFirst + (queueDepth-1);
+ pCesaReqProcess = pCesaReqEmpty;
+ cesaQueueDepth = queueDepth;
+ cesaReqResources = queueDepth;
+#if (MV_CESA_VERSION >= 3)
+ cesaChainLength = MAX_CESA_CHAIN_LENGTH;
+#endif
+ /* pSramBase must be 8 byte aligned */
+ if( MV_IS_NOT_ALIGN((MV_ULONG)pSramBase, 8) )
+ {
+ mvOsPrintf("mvCesaInit: pSramBase (%p) must be 8 byte aligned\n",
+ pSramBase);
+ mvCesaFinish();
+ return MV_NOT_ALIGNED;
+ }
+ cesaSramVirtPtr = (MV_CESA_SRAM_MAP*)pSramBase;
+
+ cesaCryptEngBase = cryptEngBase;
+
+ /*memset(cesaSramVirtPtr, 0, sizeof(MV_CESA_SRAM_MAP));*/
+
+ /* Clear registers */
+ MV_REG_WRITE( MV_CESA_CFG_REG, 0);
+ MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+ MV_REG_WRITE( MV_CESA_ISR_MASK_REG, 0);
+
+ /* Initialize DMA descriptor lists for all requests in Request queue */
+ descOffsetReg = configReg = 0;
+ for(req=0; req<queueDepth; req++)
+ {
+ int frag;
+ MV_CESA_REQ* pReq;
+ MV_DMA_DESC* pDmaDesc;
+
+ pReq = &pCesaReqFirst[req];
+
+ pReq->cesaDescBuf.bufSize = sizeof(MV_CESA_DESC)*MV_CESA_MAX_REQ_FRAGS +
+ CPU_D_CACHE_LINE_SIZE;
+
+ pReq->cesaDescBuf.bufVirtPtr =
+ mvOsIoCachedMalloc(osHandle,pReq->cesaDescBuf.bufSize,
+ &pReq->cesaDescBuf.bufPhysAddr,
+ &pReq->cesaDescBuf.memHandle);
+
+ if(pReq->cesaDescBuf.bufVirtPtr == NULL)
+ {
+ mvOsPrintf("mvCesaInit: req=%d, Can't allocate %d bytes for CESA descriptors\n",
+ req, pReq->cesaDescBuf.bufSize);
+ mvCesaFinish();
+ return MV_NO_RESOURCE;
+ }
+ memset(pReq->cesaDescBuf.bufVirtPtr, 0, pReq->cesaDescBuf.bufSize);
+ pReq->pCesaDesc = (MV_CESA_DESC*)MV_ALIGN_UP((MV_ULONG)pReq->cesaDescBuf.bufVirtPtr,
+ CPU_D_CACHE_LINE_SIZE);
+
+ pReq->dmaDescBuf.bufSize = sizeof(MV_DMA_DESC)*MV_CESA_MAX_DMA_DESC*MV_CESA_MAX_REQ_FRAGS +
+ CPU_D_CACHE_LINE_SIZE;
+
+ pReq->dmaDescBuf.bufVirtPtr =
+ mvOsIoCachedMalloc(osHandle,pReq->dmaDescBuf.bufSize,
+ &pReq->dmaDescBuf.bufPhysAddr,
+ &pReq->dmaDescBuf.memHandle);
+
+ if(pReq->dmaDescBuf.bufVirtPtr == NULL)
+ {
+ mvOsPrintf("mvCesaInit: req=%d, Can't allocate %d bytes for DMA descriptor list\n",
+ req, pReq->dmaDescBuf.bufSize);
+ mvCesaFinish();
+ return MV_NO_RESOURCE;
+ }
+ memset(pReq->dmaDescBuf.bufVirtPtr, 0, pReq->dmaDescBuf.bufSize);
+ pDmaDesc = (MV_DMA_DESC*)MV_ALIGN_UP((MV_ULONG)pReq->dmaDescBuf.bufVirtPtr,
+ CPU_D_CACHE_LINE_SIZE);
+
+ for(frag=0; frag<MV_CESA_MAX_REQ_FRAGS; frag++)
+ {
+ MV_CESA_DMA* pDma = &pReq->dma[frag];
+
+ pDma->pDmaFirst = pDmaDesc;
+ pDma->pDmaLast = NULL;
+
+ for(i=0; i<MV_CESA_MAX_DMA_DESC-1; i++)
+ {
+ /* link all DMA descriptors together */
+ pDma->pDmaFirst[i].phyNextDescPtr =
+ MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pDmaDesc[i+1]));
+ }
+ pDma->pDmaFirst[i].phyNextDescPtr = 0;
+ mvOsCacheFlush(NULL, &pDma->pDmaFirst[0], MV_CESA_MAX_DMA_DESC*sizeof(MV_DMA_DESC));
+
+ pDmaDesc += MV_CESA_MAX_DMA_DESC;
+ }
+ }
+ /*mvCesaCryptoIvSet(NULL, MV_CESA_MAX_IV_LENGTH);*/
+ descOffsetReg = (MV_U16)((MV_U8*)&cesaSramVirtPtr->desc - mvCesaSramAddrGet());
+ MV_REG_WRITE(MV_CESA_CHAN_DESC_OFFSET_REG, descOffsetReg);
+
+ configReg |= (MV_CESA_CFG_WAIT_DMA_MASK | MV_CESA_CFG_ACT_DMA_MASK);
+#if (MV_CESA_VERSION >= 3)
+ configReg |= MV_CESA_CFG_CHAIN_MODE_MASK;
+#endif
+
+#if (MV_CESA_VERSION >= 2)
+ /* Initialize TDMA engine */
+ MV_REG_WRITE(MV_CESA_TDMA_CTRL_REG, MV_CESA_TDMA_CTRL_VALUE);
+ MV_REG_WRITE(MV_CESA_TDMA_BYTE_COUNT_REG, 0);
+ MV_REG_WRITE(MV_CESA_TDMA_CURR_DESC_PTR_REG, 0);
+#else
+ /* Initialize IDMA #0 engine */
+ MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), 0);
+ MV_REG_WRITE(IDMA_BYTE_COUNT_REG(0), 0);
+ MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(0), 0);
+ MV_REG_WRITE(IDMA_CTRL_HIGH_REG(0), ICCHR_ENDIAN_LITTLE
+#ifdef MV_CPU_LE
+ | ICCHR_DESC_BYTE_SWAP_EN
+#endif
+ );
+ /* Clear Cause Byte of IDMA channel to be used */
+ MV_REG_WRITE( IDMA_CAUSE_REG, ~ICICR_CAUSE_MASK_ALL(0));
+ MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), MV_CESA_IDMA_CTRL_LOW_VALUE);
+#endif /* (MV_CESA_VERSION >= 2) */
+
+ /* Set CESA configuration registers */
+ MV_REG_WRITE( MV_CESA_CFG_REG, configReg);
+ mvCesaDebugStatsClear();
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFinish - Shutdown the CESA driver
+*
+* DESCRIPTION:
+* This function shutdown the CESA driver and free all allocted resources.
+*
+* INPUT: None
+*
+* RETURN:
+* MV_OK - Success
+* Other - Fail
+*
+*******************************************************************************/
+MV_STATUS mvCesaFinish (void)
+{
+ int req;
+ MV_CESA_REQ* pReq;
+
+ mvOsPrintf("mvCesaFinish: \n");
+
+ cesaSramVirtPtr = NULL;
+
+ /* Free all resources: DMA list, etc. */
+ for(req=0; req<cesaQueueDepth; req++)
+ {
+ pReq = &pCesaReqFirst[req];
+ if(pReq->dmaDescBuf.bufVirtPtr != NULL)
+ {
+ mvOsIoCachedFree(cesaOsHandle,pReq->dmaDescBuf.bufSize,
+ pReq->dmaDescBuf.bufPhysAddr,
+ pReq->dmaDescBuf.bufVirtPtr,
+ pReq->dmaDescBuf.memHandle);
+ }
+ if(pReq->cesaDescBuf.bufVirtPtr != NULL)
+ {
+ mvOsIoCachedFree(cesaOsHandle,pReq->cesaDescBuf.bufSize,
+ pReq->cesaDescBuf.bufPhysAddr,
+ pReq->cesaDescBuf.bufVirtPtr,
+ pReq->cesaDescBuf.memHandle);
+ }
+ }
+#if (MV_CESA_VERSION < 2)
+ MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), 0);
+#endif /* (MV_CESA_VERSION < 2) */
+
+ /* Free request queue */
+ if(pCesaReqFirst != NULL)
+ {
+ mvOsFree(pCesaReqFirst);
+ pCesaReqFirst = pCesaReqLast = NULL;
+ pCesaReqEmpty = pCesaReqProcess = NULL;
+ cesaQueueDepth = cesaReqResources = 0;
+ }
+ /* Free SA database */
+ if(pCesaSAD != NULL)
+ {
+ mvOsFree(pCesaSAD);
+ pCesaSAD = NULL;
+ cesaMaxSA = 0;
+ }
+ MV_REG_WRITE( MV_CESA_CFG_REG, 0);
+ MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+ MV_REG_WRITE( MV_CESA_ISR_MASK_REG, 0);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCryptoIvSet - Set IV value for Crypto algorithm working in CBC mode
+*
+* DESCRIPTION:
+* This function set IV value using by Crypto algorithms in CBC mode.
+* Each channel has its own IV value.
+* This function gets IV value from the caller. If no IV value passed from
+* the caller or only part of IV passed, the function will init the rest part
+* of IV value (or the whole IV) by random value.
+*
+* INPUT:
+* MV_U8* pIV - Pointer to IV value supplied by user. If pIV==NULL
+* the function will generate random IV value.
+* int ivSize - size (in bytes) of IV provided by user. If ivSize is
+* smaller than maximum IV size, the function will complete
+* IV by random value.
+*
+* RETURN:
+* MV_OK - Success
+* Other - Fail
+*
+*******************************************************************************/
+MV_STATUS mvCesaCryptoIvSet(MV_U8* pIV, int ivSize)
+{
+ MV_U8* pSramIV;
+#if defined(MV646xx)
+ mvOsPrintf("mvCesaCryptoIvSet: ERR. shouldn't use this call on MV64660\n");
+#endif
+ pSramIV = cesaSramVirtPtr->cryptoIV;
+ if(ivSize > MV_CESA_MAX_IV_LENGTH)
+ {
+ mvOsPrintf("mvCesaCryptoIvSet: ivSize (%d) is too large\n", ivSize);
+ ivSize = MV_CESA_MAX_IV_LENGTH;
+ }
+ if(pIV != NULL)
+ {
+ memcpy(pSramIV, pIV, ivSize);
+ ivSize = MV_CESA_MAX_IV_LENGTH - ivSize;
+ pSramIV += ivSize;
+ }
+
+ while(ivSize > 0)
+ {
+ int size, mv_random = mvOsRand();
+
+ size = MV_MIN(ivSize, sizeof(mv_random));
+ memcpy(pSramIV, (void*)&mv_random, size);
+
+ pSramIV += size;
+ ivSize -= size;
+ }
+/*
+ mvOsCacheFlush(NULL, cesaSramVirtPtr->cryptoIV,
+ MV_CESA_MAX_IV_LENGTH);
+ mvOsCacheInvalidate(NULL, cesaSramVirtPtr->cryptoIV,
+ MV_CESA_MAX_IV_LENGTH);
+*/
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaSessionOpen - Open new uni-directional crypto session
+*
+* DESCRIPTION:
+* This function open new session.
+*
+* INPUT:
+* MV_CESA_OPEN_SESSION *pSession - pointer to new session input parameters
+*
+* OUTPUT:
+* short *pSid - session ID, should be used for all future
+* requests over this session.
+*
+* RETURN:
+* MV_OK - Session opend successfully.
+* MV_FULL - All sessions are in use, no free place in
+* SA database.
+* MV_BAD_PARAM - One of session input parameters is invalid.
+*
+*******************************************************************************/
+MV_STATUS mvCesaSessionOpen(MV_CESA_OPEN_SESSION *pSession, short* pSid)
+{
+ short sid;
+ MV_U32 config = 0;
+ int digestSize;
+
+ cesaStats.openedCount++;
+
+ /* Find free entry in SAD */
+ for(sid=0; sid<cesaMaxSA; sid++)
+ {
+ if(pCesaSAD[sid].valid == 0)
+ {
+ break;
+ }
+ }
+ if(sid == cesaMaxSA)
+ {
+ mvOsPrintf("mvCesaSessionOpen: SA Database is FULL\n");
+ return MV_FULL;
+ }
+
+ /* Check Input parameters for Open session */
+ if (pSession->operation >= MV_CESA_MAX_OPERATION)
+ {
+ mvOsPrintf("mvCesaSessionOpen: Unexpected operation %d\n",
+ pSession->operation);
+ return MV_BAD_PARAM;
+ }
+ config |= (pSession->operation << MV_CESA_OPERATION_OFFSET);
+
+ if( (pSession->direction != MV_CESA_DIR_ENCODE) &&
+ (pSession->direction != MV_CESA_DIR_DECODE) )
+ {
+ mvOsPrintf("mvCesaSessionOpen: Unexpected direction %d\n",
+ pSession->direction);
+ return MV_BAD_PARAM;
+ }
+ config |= (pSession->direction << MV_CESA_DIRECTION_BIT);
+ /* Clear SA entry */
+ /* memset(&pCesaSAD[sid], 0, sizeof(pCesaSAD[sid])); */
+
+ /* Check AUTH parameters and update SA entry */
+ if(pSession->operation != MV_CESA_CRYPTO_ONLY)
+ {
+ /* For HMAC (MD5 and SHA1) - Maximum Key size is 64 bytes */
+ if( (pSession->macMode == MV_CESA_MAC_HMAC_MD5) ||
+ (pSession->macMode == MV_CESA_MAC_HMAC_SHA1) )
+ {
+ if(pSession->macKeyLength > MV_CESA_MAX_MAC_KEY_LENGTH)
+ {
+ mvOsPrintf("mvCesaSessionOpen: macKeyLength %d is too large\n",
+ pSession->macKeyLength);
+ return MV_BAD_PARAM;
+ }
+ mvCesaHmacIvGet(pSession->macMode, pSession->macKey, pSession->macKeyLength,
+ pCesaSAD[sid].pSramSA->macInnerIV,
+ pCesaSAD[sid].pSramSA->macOuterIV);
+ pCesaSAD[sid].macKeyLength = pSession->macKeyLength;
+ }
+ switch(pSession->macMode)
+ {
+ case MV_CESA_MAC_MD5:
+ case MV_CESA_MAC_HMAC_MD5:
+ digestSize = MV_CESA_MD5_DIGEST_SIZE;
+ break;
+
+ case MV_CESA_MAC_SHA1:
+ case MV_CESA_MAC_HMAC_SHA1:
+ digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+ break;
+
+ default:
+ mvOsPrintf("mvCesaSessionOpen: Unexpected macMode %d\n",
+ pSession->macMode);
+ return MV_BAD_PARAM;
+ }
+ config |= (pSession->macMode << MV_CESA_MAC_MODE_OFFSET);
+
+ /* Supported digest sizes: MD5 - 16 bytes (128 bits), */
+ /* SHA1 - 20 bytes (160 bits) or 12 bytes (96 bits) for both */
+ if( (pSession->digestSize != digestSize) && (pSession->digestSize != 12))
+ {
+ mvOsPrintf("mvCesaSessionOpen: Unexpected digest size %d\n",
+ pSession->digestSize);
+ mvOsPrintf("\t Valid values [bytes]: MD5-16, SHA1-20, Both-12\n");
+ return MV_BAD_PARAM;
+ }
+ pCesaSAD[sid].digestSize = pSession->digestSize;
+
+ if(pCesaSAD[sid].digestSize == 12)
+ {
+ /* Set MV_CESA_MAC_DIGEST_SIZE_BIT if digest size is 96 bits */
+ config |= (MV_CESA_MAC_DIGEST_96B << MV_CESA_MAC_DIGEST_SIZE_BIT);
+ }
+ }
+
+ /* Check CRYPTO parameters and update SA entry */
+ if(pSession->operation != MV_CESA_MAC_ONLY)
+ {
+ switch(pSession->cryptoAlgorithm)
+ {
+ case MV_CESA_CRYPTO_DES:
+ pCesaSAD[sid].cryptoKeyLength = MV_CESA_DES_KEY_LENGTH;
+ pCesaSAD[sid].cryptoBlockSize = MV_CESA_DES_BLOCK_SIZE;
+ break;
+
+ case MV_CESA_CRYPTO_3DES:
+ pCesaSAD[sid].cryptoKeyLength = MV_CESA_3DES_KEY_LENGTH;
+ pCesaSAD[sid].cryptoBlockSize = MV_CESA_DES_BLOCK_SIZE;
+ /* Only EDE mode is supported */
+ config |= (MV_CESA_CRYPTO_3DES_EDE <<
+ MV_CESA_CRYPTO_3DES_MODE_BIT);
+ break;
+
+ case MV_CESA_CRYPTO_AES:
+ switch(pSession->cryptoKeyLength)
+ {
+ case 16:
+ pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_128_KEY_LENGTH;
+ config |= (MV_CESA_CRYPTO_AES_KEY_128 <<
+ MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+ break;
+
+ case 24:
+ pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_192_KEY_LENGTH;
+ config |= (MV_CESA_CRYPTO_AES_KEY_192 <<
+ MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+ break;
+
+ case 32:
+ default:
+ pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_256_KEY_LENGTH;
+ config |= (MV_CESA_CRYPTO_AES_KEY_256 <<
+ MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+ break;
+ }
+ pCesaSAD[sid].cryptoBlockSize = MV_CESA_AES_BLOCK_SIZE;
+ break;
+
+ default:
+ mvOsPrintf("mvCesaSessionOpen: Unexpected cryptoAlgorithm %d\n",
+ pSession->cryptoAlgorithm);
+ return MV_BAD_PARAM;
+ }
+ config |= (pSession->cryptoAlgorithm << MV_CESA_CRYPTO_ALG_OFFSET);
+
+ if(pSession->cryptoKeyLength != pCesaSAD[sid].cryptoKeyLength)
+ {
+ mvOsPrintf("cesaSessionOpen: Wrong CryptoKeySize %d != %d\n",
+ pSession->cryptoKeyLength, pCesaSAD[sid].cryptoKeyLength);
+ return MV_BAD_PARAM;
+ }
+
+ /* Copy Crypto key */
+ if( (pSession->cryptoAlgorithm == MV_CESA_CRYPTO_AES) &&
+ (pSession->direction == MV_CESA_DIR_DECODE))
+ {
+ /* Crypto Key for AES decode is computed from original key material */
+ /* and depend on cryptoKeyLength (128/192/256 bits) */
+ aesMakeKey(pCesaSAD[sid].pSramSA->cryptoKey, pSession->cryptoKey,
+ pSession->cryptoKeyLength*8, MV_CESA_AES_BLOCK_SIZE*8);
+ }
+ else
+ {
+ /*panic("mvCesaSessionOpen2");*/
+ memcpy(pCesaSAD[sid].pSramSA->cryptoKey, pSession->cryptoKey,
+ pCesaSAD[sid].cryptoKeyLength);
+
+ }
+
+ switch(pSession->cryptoMode)
+ {
+ case MV_CESA_CRYPTO_ECB:
+ pCesaSAD[sid].cryptoIvSize = 0;
+ break;
+
+ case MV_CESA_CRYPTO_CBC:
+ pCesaSAD[sid].cryptoIvSize = pCesaSAD[sid].cryptoBlockSize;
+ break;
+
+ case MV_CESA_CRYPTO_CTR:
+ /* Supported only for AES algorithm */
+ if(pSession->cryptoAlgorithm != MV_CESA_CRYPTO_AES)
+ {
+ mvOsPrintf("mvCesaSessionOpen: CRYPTO CTR mode supported for AES only\n");
+ return MV_BAD_PARAM;
+ }
+ pCesaSAD[sid].cryptoIvSize = 0;
+ pCesaSAD[sid].ctrMode = 1;
+ /* Replace to ECB mode for HW */
+ pSession->cryptoMode = MV_CESA_CRYPTO_ECB;
+ break;
+
+ default:
+ mvOsPrintf("mvCesaSessionOpen: Unexpected cryptoMode %d\n",
+ pSession->cryptoMode);
+ return MV_BAD_PARAM;
+ }
+
+ config |= (pSession->cryptoMode << MV_CESA_CRYPTO_MODE_BIT);
+ }
+ pCesaSAD[sid].config = config;
+
+ mvOsCacheFlush(NULL, pCesaSAD[sid].pSramSA, sizeof(MV_CESA_SRAM_SA));
+ if(pSid != NULL)
+ *pSid = sid;
+
+ pCesaSAD[sid].valid = 1;
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaSessionClose - Close active crypto session
+*
+* DESCRIPTION:
+* This function closes existing session
+*
+* INPUT:
+* short sid - Unique identifier of the session to be closed
+*
+* RETURN:
+* MV_OK - Session closed successfully.
+* MV_BAD_PARAM - Session identifier is out of valid range.
+* MV_NOT_FOUND - There is no active session with such ID.
+*
+*******************************************************************************/
+MV_STATUS mvCesaSessionClose(short sid)
+{
+ cesaStats.closedCount++;
+
+ if(sid >= cesaMaxSA)
+ {
+ mvOsPrintf("CESA Error: sid (%d) is too big\n", sid);
+ return MV_BAD_PARAM;
+ }
+ if(pCesaSAD[sid].valid == 0)
+ {
+ mvOsPrintf("CESA Warning: Session (sid=%d) is invalid\n", sid);
+ return MV_NOT_FOUND;
+ }
+ if(cesaLastSid == sid)
+ cesaLastSid = -1;
+
+ pCesaSAD[sid].valid = 0;
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaAction - Perform crypto operation
+*
+* DESCRIPTION:
+* This function set new CESA request FIFO queue for further HW processing.
+* The function checks request parameters before set new request to the queue.
+* If one of the CESA channels is ready for processing the request will be
+* passed to HW. When request processing is finished the CESA interrupt will
+* be generated by HW. The caller should call mvCesaReadyGet() function to
+* complete request processing and get result.
+*
+* INPUT:
+* MV_CESA_COMMAND *pCmd - pointer to new CESA request.
+* It includes pointers to Source and Destination
+* buffers, session identifier get from
+* mvCesaSessionOpen() function, pointer to caller
+* private data and all needed crypto parameters.
+*
+* RETURN:
+* MV_OK - request successfully added to request queue
+* and will be processed.
+* MV_NO_MORE - request successfully added to request queue and will
+* be processed, but request queue became Full and next
+* request will not be accepted.
+* MV_NO_RESOURCE - request queue is FULL and the request can not
+* be processed.
+* MV_OUT_OF_CPU_MEM - memory allocation needed for request processing is
+* failed. Request can not be processed.
+* MV_NOT_ALLOWED - This mixed request (CRYPTO+MAC) can not be processed
+* as one request and should be splitted for two requests:
+* CRYPTO_ONLY and MAC_ONLY.
+* MV_BAD_PARAM - One of the request parameters is out of valid range.
+* The request can not be processed.
+*
+*******************************************************************************/
+MV_STATUS mvCesaAction (MV_CESA_COMMAND *pCmd)
+{
+ MV_STATUS status;
+ MV_CESA_REQ* pReq = pCesaReqEmpty;
+ int sid = pCmd->sessionId;
+ MV_CESA_SA* pSA = &pCesaSAD[sid];
+#if (MV_CESA_VERSION >= 3)
+ MV_CESA_REQ* pFromReq;
+ MV_CESA_REQ* pToReq;
+#endif
+ cesaStats.reqCount++;
+
+ /* Check that the request queue is not FULL */
+ if(cesaReqResources == 0)
+ return MV_NO_RESOURCE;
+
+ if( (sid >= cesaMaxSA) || (!pSA->valid) )
+ {
+ mvOsPrintf("CESA Action Error: Session sid=%d is INVALID\n", sid);
+ return MV_BAD_PARAM;
+ }
+ pSA->count++;
+
+ if(pSA->ctrMode)
+ {
+ /* AES in CTR mode can't be mixed with Authentication */
+ if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ mvOsPrintf("mvCesaAction : CRYPTO CTR mode can't be mixed with AUTH\n");
+ return MV_NOT_ALLOWED;
+ }
+ /* All other request parameters should not be checked because key stream */
+ /* (not user data) processed by AES HW engine */
+ pReq->pOrgCmd = pCmd;
+ /* Allocate temporary pCmd structure for Key stream */
+ pCmd = mvCesaCtrModeInit();
+ if(pCmd == NULL)
+ return MV_OUT_OF_CPU_MEM;
+
+ /* Prepare Key stream */
+ mvCesaCtrModePrepare(pCmd, pReq->pOrgCmd);
+ pReq->fixOffset = 0;
+ }
+ else
+ {
+ /* Check request parameters and calculae fixOffset */
+ status = mvCesaParamCheck(pSA, pCmd, &pReq->fixOffset);
+ if(status != MV_OK)
+ {
+ return status;
+ }
+ }
+ pReq->pCmd = pCmd;
+
+ /* Check if the packet need fragmentation */
+ if(pCmd->pSrc->mbufSize <= sizeof(cesaSramVirtPtr->buf) )
+ {
+ /* request size is smaller than single buffer size */
+ pReq->fragMode = MV_CESA_FRAG_NONE;
+
+ /* Prepare NOT fragmented packets */
+ status = mvCesaReqProcess(pReq);
+ if(status != MV_OK)
+ {
+ mvOsPrintf("CesaReady: ReqProcess error: pReq=%p, status=0x%x\n",
+ pReq, status);
+ }
+#if (MV_CESA_VERSION >= 3)
+ pReq->frags.numFrag = 1;
+#endif
+ }
+ else
+ {
+ MV_U8 frag = 0;
+
+ /* request size is larger than buffer size - needs fragmentation */
+
+ /* Check restrictions for processing fragmented packets */
+ status = mvCesaFragParamCheck(pSA, pCmd);
+ if(status != MV_OK)
+ return status;
+
+ pReq->fragMode = MV_CESA_FRAG_FIRST;
+ pReq->frags.nextFrag = 0;
+
+ /* Prepare Process Fragmented packets */
+ while(pReq->fragMode != MV_CESA_FRAG_LAST)
+ {
+ if(frag >= MV_CESA_MAX_REQ_FRAGS)
+ {
+ mvOsPrintf("mvCesaAction Error: Too large request frag=%d\n", frag);
+ return MV_OUT_OF_CPU_MEM;
+ }
+ status = mvCesaFragReqProcess(pReq, frag);
+ if(status == MV_OK) {
+#if (MV_CESA_VERSION >= 3)
+ if(frag) {
+ pReq->dma[frag-1].pDmaLast->phyNextDescPtr =
+ MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst));
+ mvOsCacheFlush(NULL, pReq->dma[frag-1].pDmaLast, sizeof(MV_DMA_DESC));
+ }
+#endif
+ frag++;
+ }
+ }
+ pReq->frags.numFrag = frag;
+#if (MV_CESA_VERSION >= 3)
+ if(chainReqNum) {
+ chainReqNum += pReq->frags.numFrag;
+ if(chainReqNum >= MAX_CESA_CHAIN_LENGTH)
+ chainReqNum = MAX_CESA_CHAIN_LENGTH;
+ }
+#endif
+ }
+
+ pReq->state = MV_CESA_PENDING;
+
+ pCesaReqEmpty = MV_CESA_REQ_NEXT_PTR(pReq);
+ cesaReqResources -= 1;
+
+/* #ifdef CESA_DEBUG */
+ if( (cesaQueueDepth - cesaReqResources) > cesaStats.maxReqCount)
+ cesaStats.maxReqCount = (cesaQueueDepth - cesaReqResources);
+/* #endif CESA_DEBUG */
+
+ cesaLastSid = sid;
+
+#if (MV_CESA_VERSION >= 3)
+ /* Are we within chain bounderies and follows the first request ? */
+ if((chainReqNum > 0) && (chainReqNum < MAX_CESA_CHAIN_LENGTH)) {
+ if(chainIndex) {
+ pFromReq = MV_CESA_REQ_PREV_PTR(pReq);
+ pToReq = pReq;
+ pReq->state = MV_CESA_CHAIN;
+ /* assume concatenating is possible */
+ pFromReq->dma[pFromReq->frags.numFrag-1].pDmaLast->phyNextDescPtr =
+ MV_32BIT_LE(mvCesaVirtToPhys(&pToReq->dmaDescBuf, pToReq->dma[0].pDmaFirst));
+ mvOsCacheFlush(NULL, pFromReq->dma[pFromReq->frags.numFrag-1].pDmaLast, sizeof(MV_DMA_DESC));
+
+ /* align active & next pointers */
+ if(pNextActiveChain->state != MV_CESA_PENDING)
+ pEndCurrChain = pNextActiveChain = MV_CESA_REQ_NEXT_PTR(pReq);
+ }
+ else { /* we have only one chain, start new one */
+ chainReqNum = 0;
+ chainIndex++;
+ /* align active & next pointers */
+ if(pNextActiveChain->state != MV_CESA_PENDING)
+ pEndCurrChain = pNextActiveChain = pReq;
+ }
+ }
+ else {
+ /* In case we concatenate full chain */
+ if(chainReqNum == MAX_CESA_CHAIN_LENGTH) {
+ chainIndex++;
+ if(pNextActiveChain->state != MV_CESA_PENDING)
+ pEndCurrChain = pNextActiveChain = pReq;
+ chainReqNum = 0;
+ }
+
+ pReq = pCesaReqProcess;
+ if(pReq->state == MV_CESA_PENDING) {
+ pNextActiveChain = pReq;
+ pEndCurrChain = MV_CESA_REQ_NEXT_PTR(pReq);
+ /* Start Process new request */
+ mvCesaReqProcessStart(pReq);
+ }
+ }
+
+ chainReqNum++;
+
+ if((chainIndex < MAX_CESA_CHAIN_LENGTH) && (chainReqNum > cesaStats.maxChainUsage))
+ cesaStats.maxChainUsage = chainReqNum;
+
+#else
+
+ /* Check status of CESA channels and process requests if possible */
+ pReq = pCesaReqProcess;
+ if(pReq->state == MV_CESA_PENDING)
+ {
+ /* Start Process new request */
+ mvCesaReqProcessStart(pReq);
+ }
+#endif
+ /* If request queue became FULL - return MV_NO_MORE */
+ if(cesaReqResources == 0)
+ return MV_NO_MORE;
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCesaReadyGet - Get crypto request that processing is finished
+*
+* DESCRIPTION:
+* This function complete request processing and return ready request to
+* caller. To don't miss interrupts the caller must call this function
+* while MV_OK or MV_TERMINATE values returned.
+*
+* INPUT:
+* MV_U32 chanMap - map of CESA channels finished thier job
+* accordingly with CESA Cause register.
+* MV_CESA_RESULT* pResult - pointer to structure contains information
+* about ready request. It includes pointer to
+* user private structure "pReqPrv", session identifier
+* for this request "sessionId" and return code.
+* Return code set to MV_FAIL if calculated digest value
+* on decode direction is different than digest value
+* in the packet.
+*
+* RETURN:
+* MV_OK - Success, ready request is returned.
+* MV_NOT_READY - Next request is not ready yet. New interrupt will
+* be generated for futher request processing.
+* MV_EMPTY - There is no more request for processing.
+* MV_BUSY - Fragmented request is not ready yet.
+* MV_TERMINATE - Call this function once more to complete processing
+* of fragmented request.
+*
+*******************************************************************************/
+MV_STATUS mvCesaReadyGet(MV_CESA_RESULT* pResult)
+{
+ MV_STATUS status, readyStatus = MV_NOT_READY;
+ MV_U32 statusReg;
+ MV_CESA_REQ* pReq;
+ MV_CESA_SA* pSA;
+
+#if (MV_CESA_VERSION >= 3)
+ if(isFirstReq == MV_TRUE) {
+ if(chainIndex == 0)
+ chainReqNum = 0;
+
+ isFirstReq = MV_FALSE;
+
+ if(pNextActiveChain->state == MV_CESA_PENDING) {
+ /* Start request Process */
+ mvCesaReqProcessStart(pNextActiveChain);
+ pEndCurrChain = pNextActiveChain;
+ if(chainIndex > 0)
+ chainIndex--;
+ /* Update pNextActiveChain to next chain head */
+ while(pNextActiveChain->state == MV_CESA_CHAIN)
+ pNextActiveChain = MV_CESA_REQ_NEXT_PTR(pNextActiveChain);
+ }
+ }
+
+ /* Check if there are more processed requests - can we remove pEndCurrChain ??? */
+ if(pCesaReqProcess == pEndCurrChain) {
+ isFirstReq = MV_TRUE;
+ pEndCurrChain = pNextActiveChain;
+#else
+ if(pCesaReqProcess->state != MV_CESA_PROCESS) {
+#endif
+ return MV_EMPTY;
+ }
+
+#ifdef CESA_DEBUG
+ statusReg = MV_REG_READ(MV_CESA_STATUS_REG);
+ if( statusReg & MV_CESA_STATUS_ACTIVE_MASK )
+ {
+ mvOsPrintf("mvCesaReadyGet: Not Ready, Status = 0x%x\n", statusReg);
+ cesaStats.notReadyCount++;
+ return MV_NOT_READY;
+ }
+#endif /* CESA_DEBUG */
+
+ cesaStats.readyCount++;
+
+ pReq = pCesaReqProcess;
+ pSA = &pCesaSAD[pReq->pCmd->sessionId];
+
+ pResult->retCode = MV_OK;
+ if(pReq->fragMode != MV_CESA_FRAG_NONE)
+ {
+ MV_U8* pNewDigest;
+ int frag;
+#if (MV_CESA_VERSION >= 3)
+ pReq->frags.nextFrag = 1;
+ while(pReq->frags.nextFrag <= pReq->frags.numFrag) {
+#endif
+ frag = (pReq->frags.nextFrag - 1);
+
+ /* Restore DMA descriptor list */
+ pReq->dma[frag].pDmaLast->phyNextDescPtr =
+ MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pReq->dma[frag].pDmaLast[1]));
+ pReq->dma[frag].pDmaLast = NULL;
+
+ /* Special processing for finished fragmented request */
+ if(pReq->frags.nextFrag >= pReq->frags.numFrag)
+ {
+ mvCesaMbufCacheUnmap(pReq->pCmd->pDst, 0, pReq->pCmd->pDst->mbufSize);
+
+ /* Fragmented packet is ready */
+ if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ int macDataSize = pReq->pCmd->macLength - pReq->frags.macSize;
+
+ if(macDataSize != 0)
+ {
+ /* Calculate all other blocks by SW */
+ mvCesaFragAuthComplete(pReq, pSA, macDataSize);
+ }
+
+ /* Copy new digest from SRAM to the Destination buffer */
+ pNewDigest = cesaSramVirtPtr->buf + pReq->frags.newDigestOffset;
+ status = mvCesaCopyToMbuf(pNewDigest, pReq->pCmd->pDst,
+ pReq->pCmd->digestOffset, pSA->digestSize);
+
+ /* For decryption: Compare new digest value with original one */
+ if((pSA->config & MV_CESA_DIRECTION_MASK) ==
+ (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT))
+ {
+ if( memcmp(pNewDigest, pReq->frags.orgDigest, pSA->digestSize) != 0)
+ {
+/*
+ mvOsPrintf("Digest error: chan=%d, newDigest=%p, orgDigest=%p, status = 0x%x\n",
+ chan, pNewDigest, pReq->frags.orgDigest, MV_REG_READ(MV_CESA_STATUS_REG));
+*/
+ /* Signiture verification is failed */
+ pResult->retCode = MV_FAIL;
+ }
+ }
+ }
+ readyStatus = MV_OK;
+ }
+#if (MV_CESA_VERSION >= 3)
+ pReq->frags.nextFrag++;
+ }
+#endif
+ }
+ else
+ {
+ mvCesaMbufCacheUnmap(pReq->pCmd->pDst, 0, pReq->pCmd->pDst->mbufSize);
+
+ /* Restore DMA descriptor list */
+ pReq->dma[0].pDmaLast->phyNextDescPtr =
+ MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pReq->dma[0].pDmaLast[1]));
+ pReq->dma[0].pDmaLast = NULL;
+ if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) &&
+ ((pSA->config & MV_CESA_DIRECTION_MASK) ==
+ (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) )
+ {
+ /* For AUTH on decode : Check Digest result in Status register */
+ statusReg = MV_REG_READ(MV_CESA_STATUS_REG);
+ if(statusReg & MV_CESA_STATUS_DIGEST_ERR_MASK)
+ {
+/*
+ mvOsPrintf("Digest error: chan=%d, status = 0x%x\n",
+ chan, statusReg);
+*/
+ /* Signiture verification is failed */
+ pResult->retCode = MV_FAIL;
+ }
+ }
+ readyStatus = MV_OK;
+ }
+
+ if(readyStatus == MV_OK)
+ {
+ /* If Request is ready - Prepare pResult structure */
+ pResult->pReqPrv = pReq->pCmd->pReqPrv;
+ pResult->sessionId = pReq->pCmd->sessionId;
+
+ pReq->state = MV_CESA_IDLE;
+ pCesaReqProcess = MV_CESA_REQ_NEXT_PTR(pReq);
+ cesaReqResources++;
+
+ if(pSA->ctrMode)
+ {
+ /* For AES CTR mode - complete processing and free allocated resources */
+ mvCesaCtrModeComplete(pReq->pOrgCmd, pReq->pCmd);
+ mvCesaCtrModeFinish(pReq->pCmd);
+ pReq->pOrgCmd = NULL;
+ }
+ }
+
+#if (MV_CESA_VERSION < 3)
+ if(pCesaReqProcess->state == MV_CESA_PROCESS)
+ {
+ /* Start request Process */
+ mvCesaReqProcessStart(pCesaReqProcess);
+ if(readyStatus == MV_NOT_READY)
+ readyStatus = MV_BUSY;
+ }
+ else if(pCesaReqProcess != pCesaReqEmpty)
+ {
+ /* Start process new request from the queue */
+ mvCesaReqProcessStart(pCesaReqProcess);
+ }
+#endif
+ return readyStatus;
+}
+
+/***************** Functions to work with CESA_MBUF structure ******************/
+
+/*******************************************************************************
+* mvCesaMbufOffset - Locate offset in the Mbuf structure
+*
+* DESCRIPTION:
+* This function locates offset inside Multi-Bufeer structure.
+* It get fragment number and place in the fragment where the offset
+* is located.
+*
+*
+* INPUT:
+* MV_CESA_MBUF* pMbuf - Pointer to multi-buffer structure
+* int offset - Offset from the beginning of the data presented by
+* the Mbuf structure.
+*
+* OUTPUT:
+* int* pBufOffset - Offset from the beginning of the fragment where
+* the offset is located.
+*
+* RETURN:
+* int - Number of fragment, where the offset is located\
+*
+*******************************************************************************/
+int mvCesaMbufOffset(MV_CESA_MBUF* pMbuf, int offset, int* pBufOffset)
+{
+ int frag = 0;
+
+ while(offset > 0)
+ {
+ if(frag >= pMbuf->numFrags)
+ {
+ mvOsPrintf("mvCesaMbufOffset: Error: frag (%d) > numFrags (%d)\n",
+ frag, pMbuf->numFrags);
+ return MV_INVALID;
+ }
+ if(offset < pMbuf->pFrags[frag].bufSize)
+ {
+ break;
+ }
+ offset -= pMbuf->pFrags[frag].bufSize;
+ frag++;
+ }
+ if(pBufOffset != NULL)
+ *pBufOffset = offset;
+
+ return frag;
+}
+
+/*******************************************************************************
+* mvCesaCopyFromMbuf - Copy data from the Mbuf structure to continuous buffer
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_U8* pDstBuf - Pointer to continuous buffer, where data is
+* copied to.
+* MV_CESA_MBUF* pSrcMbuf - Pointer to multi-buffer structure where data is
+* copied from.
+* int offset - Offset in the Mbuf structure where located first
+* byte of data should be copied.
+* int size - Size of data should be copied
+*
+* RETURN:
+* MV_OK - Success, all data is copied successfully.
+* MV_OUT_OF_RANGE - Failed, offset is out of Multi-buffer data range.
+* No data is copied.
+* MV_EMPTY - Multi-buffer structure has not enough data to copy
+* Data from the offset to end of Mbuf data is copied.
+*
+*******************************************************************************/
+MV_STATUS mvCesaCopyFromMbuf(MV_U8* pDstBuf, MV_CESA_MBUF* pSrcMbuf,
+ int offset, int size)
+{
+ int frag, fragOffset, bufSize;
+ MV_U8* pBuf;
+
+ if(size == 0)
+ return MV_OK;
+
+ frag = mvCesaMbufOffset(pSrcMbuf, offset, &fragOffset);
+ if(frag == MV_INVALID)
+ {
+ mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+ return MV_OUT_OF_RANGE;
+ }
+
+ bufSize = pSrcMbuf->pFrags[frag].bufSize - fragOffset;
+ pBuf = pSrcMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+ while(MV_TRUE)
+ {
+ if(size <= bufSize)
+ {
+ memcpy(pDstBuf, pBuf, size);
+ return MV_OK;
+ }
+ memcpy(pDstBuf, pBuf, bufSize);
+ size -= bufSize;
+ frag++;
+ pDstBuf += bufSize;
+ if(frag >= pSrcMbuf->numFrags)
+ break;
+
+ bufSize = pSrcMbuf->pFrags[frag].bufSize;
+ pBuf = pSrcMbuf->pFrags[frag].bufVirtPtr;
+ }
+ mvOsPrintf("mvCesaCopyFromMbuf: Mbuf is EMPTY - %d bytes isn't copied\n",
+ size);
+ return MV_EMPTY;
+}
+
+/*******************************************************************************
+* mvCesaCopyToMbuf - Copy data from continuous buffer to the Mbuf structure
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_U8* pSrcBuf - Pointer to continuous buffer, where data is
+* copied from.
+* MV_CESA_MBUF* pDstMbuf - Pointer to multi-buffer structure where data is
+* copied to.
+* int offset - Offset in the Mbuf structure where located first
+* byte of data should be copied.
+* int size - Size of data should be copied
+*
+* RETURN:
+* MV_OK - Success, all data is copied successfully.
+* MV_OUT_OF_RANGE - Failed, offset is out of Multi-buffer data range.
+* No data is copied.
+* MV_FULL - Multi-buffer structure has not enough place to copy
+* all data. Data from the offset to end of Mbuf data
+* is copied.
+*
+*******************************************************************************/
+MV_STATUS mvCesaCopyToMbuf(MV_U8* pSrcBuf, MV_CESA_MBUF* pDstMbuf,
+ int offset, int size)
+{
+ int frag, fragOffset, bufSize;
+ MV_U8* pBuf;
+
+ if(size == 0)
+ return MV_OK;
+
+ frag = mvCesaMbufOffset(pDstMbuf, offset, &fragOffset);
+ if(frag == MV_INVALID)
+ {
+ mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+ return MV_OUT_OF_RANGE;
+ }
+
+ bufSize = pDstMbuf->pFrags[frag].bufSize - fragOffset;
+ pBuf = pDstMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+ while(MV_TRUE)
+ {
+ if(size <= bufSize)
+ {
+ memcpy(pBuf, pSrcBuf, size);
+ return MV_OK;
+ }
+ memcpy(pBuf, pSrcBuf, bufSize);
+ size -= bufSize;
+ frag++;
+ pSrcBuf += bufSize;
+ if(frag >= pDstMbuf->numFrags)
+ break;
+
+ bufSize = pDstMbuf->pFrags[frag].bufSize;
+ pBuf = pDstMbuf->pFrags[frag].bufVirtPtr;
+ }
+ mvOsPrintf("mvCesaCopyToMbuf: Mbuf is FULL - %d bytes isn't copied\n",
+ size);
+ return MV_FULL;
+}
+
+/*******************************************************************************
+* mvCesaMbufCopy - Copy data from one Mbuf structure to the other Mbuf structure
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*
+* MV_CESA_MBUF* pDstMbuf - Pointer to multi-buffer structure where data is
+* copied to.
+* int dstMbufOffset - Offset in the dstMbuf structure where first byte
+* of data should be copied to.
+* MV_CESA_MBUF* pSrcMbuf - Pointer to multi-buffer structure where data is
+* copied from.
+* int srcMbufOffset - Offset in the srcMbuf structure where first byte
+* of data should be copied from.
+* int size - Size of data should be copied
+*
+* RETURN:
+* MV_OK - Success, all data is copied successfully.
+* MV_OUT_OF_RANGE - Failed, srcMbufOffset or dstMbufOffset is out of
+* srcMbuf or dstMbuf structure correspondently.
+* No data is copied.
+* MV_BAD_SIZE - srcMbuf or dstMbuf structure is too small to copy
+* all data. Partial data is copied
+*
+*******************************************************************************/
+MV_STATUS mvCesaMbufCopy(MV_CESA_MBUF* pMbufDst, int dstMbufOffset,
+ MV_CESA_MBUF* pMbufSrc, int srcMbufOffset, int size)
+{
+ int srcFrag, dstFrag, srcSize, dstSize, srcOffset, dstOffset;
+ int copySize;
+ MV_U8 *pSrc, *pDst;
+
+ if(size == 0)
+ return MV_OK;
+
+ srcFrag = mvCesaMbufOffset(pMbufSrc, srcMbufOffset, &srcOffset);
+ if(srcFrag == MV_INVALID)
+ {
+ mvOsPrintf("CESA srcMbuf Error: offset (%d) out of range\n", srcMbufOffset);
+ return MV_OUT_OF_RANGE;
+ }
+ pSrc = pMbufSrc->pFrags[srcFrag].bufVirtPtr + srcOffset;
+ srcSize = pMbufSrc->pFrags[srcFrag].bufSize - srcOffset;
+
+ dstFrag = mvCesaMbufOffset(pMbufDst, dstMbufOffset, &dstOffset);
+ if(dstFrag == MV_INVALID)
+ {
+ mvOsPrintf("CESA dstMbuf Error: offset (%d) out of range\n", dstMbufOffset);
+ return MV_OUT_OF_RANGE;
+ }
+ pDst = pMbufDst->pFrags[dstFrag].bufVirtPtr + dstOffset;
+ dstSize = pMbufDst->pFrags[dstFrag].bufSize - dstOffset;
+
+ while(size > 0)
+ {
+ copySize = MV_MIN(srcSize, dstSize);
+ if(size <= copySize)
+ {
+ memcpy(pDst, pSrc, size);
+ return MV_OK;
+ }
+ memcpy(pDst, pSrc, copySize);
+ size -= copySize;
+ srcSize -= copySize;
+ dstSize -= copySize;
+
+ if(srcSize == 0)
+ {
+ srcFrag++;
+ if(srcFrag >= pMbufSrc->numFrags)
+ break;
+
+ pSrc = pMbufSrc->pFrags[srcFrag].bufVirtPtr;
+ srcSize = pMbufSrc->pFrags[srcFrag].bufSize;
+ }
+
+ if(dstSize == 0)
+ {
+ dstFrag++;
+ if(dstFrag >= pMbufDst->numFrags)
+ break;
+
+ pDst = pMbufDst->pFrags[dstFrag].bufVirtPtr;
+ dstSize = pMbufDst->pFrags[dstFrag].bufSize;
+ }
+ }
+ mvOsPrintf("mvCesaMbufCopy: BAD size - %d bytes isn't copied\n",
+ size);
+
+ return MV_BAD_SIZE;
+}
+
+static MV_STATUS mvCesaMbufCacheUnmap(MV_CESA_MBUF* pMbuf, int offset, int size)
+{
+ int frag, fragOffset, bufSize;
+ MV_U8* pBuf;
+
+ if(size == 0)
+ return MV_OK;
+
+ frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+ if(frag == MV_INVALID)
+ {
+ mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+ return MV_OUT_OF_RANGE;
+ }
+
+ bufSize = pMbuf->pFrags[frag].bufSize - fragOffset;
+ pBuf = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+ while(MV_TRUE)
+ {
+ if(size <= bufSize)
+ {
+ mvOsCacheUnmap(NULL, mvOsIoVirtToPhy(NULL, pBuf), size);
+ return MV_OK;
+ }
+
+ mvOsCacheUnmap(NULL, mvOsIoVirtToPhy(NULL, pBuf), bufSize);
+ size -= bufSize;
+ frag++;
+ if(frag >= pMbuf->numFrags)
+ break;
+
+ bufSize = pMbuf->pFrags[frag].bufSize;
+ pBuf = pMbuf->pFrags[frag].bufVirtPtr;
+ }
+ mvOsPrintf("%s: Mbuf is FULL - %d bytes isn't Unmapped\n",
+ __FUNCTION__, size);
+ return MV_FULL;
+}
+
+
+/*************************************** Local Functions ******************************/
+
+/*******************************************************************************
+* mvCesaFragReqProcess - Process fragmented request
+*
+* DESCRIPTION:
+* This function processes a fragment of fragmented request (First, Middle or Last)
+*
+*
+* INPUT:
+* MV_CESA_REQ* pReq - Pointer to the request in the request queue.
+*
+* RETURN:
+* MV_OK - The fragment is successfully passed to HW for processing.
+* MV_TERMINATE - Means, that HW finished its work on this packet and no more
+* interrupts will be generated for this request.
+* Function mvCesaReadyGet() must be called to complete request
+* processing and get request result.
+*
+*******************************************************************************/
+static MV_STATUS mvCesaFragReqProcess(MV_CESA_REQ* pReq, MV_U8 frag)
+{
+ int i, copySize, cryptoDataSize, macDataSize, sid;
+ int cryptoIvOffset, digestOffset;
+ MV_U32 config;
+ MV_CESA_COMMAND* pCmd = pReq->pCmd;
+ MV_CESA_SA* pSA;
+ MV_CESA_MBUF* pMbuf;
+ MV_DMA_DESC* pDmaDesc = pReq->dma[frag].pDmaFirst;
+ MV_U8* pSramBuf = cesaSramVirtPtr->buf;
+ int macTotalLen = 0;
+ int fixOffset, cryptoOffset, macOffset;
+
+ cesaStats.fragCount++;
+
+ sid = pReq->pCmd->sessionId;
+
+ pSA = &pCesaSAD[sid];
+
+ cryptoIvOffset = digestOffset = 0;
+ i = macDataSize = 0;
+ cryptoDataSize = 0;
+
+ /* First fragment processing */
+ if(pReq->fragMode == MV_CESA_FRAG_FIRST)
+ {
+ /* pReq->frags monitors processing of fragmented request between fragments */
+ pReq->frags.bufOffset = 0;
+ pReq->frags.cryptoSize = 0;
+ pReq->frags.macSize = 0;
+
+ config = pSA->config | (MV_CESA_FRAG_FIRST << MV_CESA_FRAG_MODE_OFFSET);
+
+ /* fixOffset can be not equal to zero only for FIRST fragment */
+ fixOffset = pReq->fixOffset;
+ /* For FIRST fragment crypto and mac offsets are taken from pCmd */
+ cryptoOffset = pCmd->cryptoOffset;
+ macOffset = pCmd->macOffset;
+
+ copySize = sizeof(cesaSramVirtPtr->buf) - pReq->fixOffset;
+
+ /* Find fragment size: Must meet all requirements for CRYPTO and MAC
+ * cryptoDataSize - size of data will be encrypted/decrypted in this fragment
+ * macDataSize - size of data will be signed/verified in this fragment
+ * copySize - size of data will be copied from srcMbuf to SRAM and
+ * back to dstMbuf for this fragment
+ */
+ mvCesaFragSizeFind(pSA, pReq, cryptoOffset, macOffset,
+ &copySize, &cryptoDataSize, &macDataSize);
+
+ if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET))
+ {
+ /* CryptoIV special processing */
+ if( (pSA->config & MV_CESA_CRYPTO_MODE_MASK) ==
+ (MV_CESA_CRYPTO_CBC << MV_CESA_CRYPTO_MODE_BIT) )
+ {
+ /* In CBC mode for encode direction when IV from user */
+ if( (pCmd->ivFromUser) &&
+ ((pSA->config & MV_CESA_DIRECTION_MASK) ==
+ (MV_CESA_DIR_ENCODE << MV_CESA_DIRECTION_BIT)) )
+ {
+
+ /* For Crypto Encode in CBC mode HW always takes IV from SRAM IVPointer,
+ * (not from IVBufPointer). So when ivFromUser==1, we should copy IV from user place
+ * in the buffer to SRAM IVPointer
+ */
+ i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i],
+ MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+ }
+
+ /* Special processing when IV is not located in the first fragment */
+ if(pCmd->ivOffset > (copySize - pSA->cryptoIvSize))
+ {
+ /* Prepare dummy place for cryptoIV in SRAM */
+ cryptoIvOffset = cesaSramVirtPtr->tempCryptoIV - mvCesaSramAddrGet();
+
+ /* For Decryption: Copy IV value from pCmd->ivOffset to Special SRAM place */
+ if((pSA->config & MV_CESA_DIRECTION_MASK) ==
+ (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT))
+ {
+ i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->tempCryptoIV, &pDmaDesc[i],
+ MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+ }
+ else
+ {
+ /* For Encryption when IV is NOT from User: */
+ /* Copy IV from SRAM to buffer (pCmd->ivOffset) */
+ if(pCmd->ivFromUser == 0)
+ {
+ /* copy IV value from cryptoIV to Buffer (pCmd->ivOffset) */
+ i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i],
+ MV_TRUE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+ }
+ }
+ }
+ else
+ {
+ cryptoIvOffset = pCmd->ivOffset;
+ }
+ }
+ }
+
+ if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ /* MAC digest special processing on Decode direction */
+ if((pSA->config & MV_CESA_DIRECTION_MASK) ==
+ (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT))
+ {
+ /* Save digest from pCmd->digestOffset */
+ mvCesaCopyFromMbuf(pReq->frags.orgDigest,
+ pCmd->pSrc, pCmd->digestOffset, pSA->digestSize);
+
+ /* If pCmd->digestOffset is not located on the first */
+ if(pCmd->digestOffset > (copySize - pSA->digestSize))
+ {
+ MV_U8 digestZero[MV_CESA_MAX_DIGEST_SIZE];
+
+ /* Set zeros to pCmd->digestOffset (DRAM) */
+ memset(digestZero, 0, MV_CESA_MAX_DIGEST_SIZE);
+ mvCesaCopyToMbuf(digestZero, pCmd->pSrc, pCmd->digestOffset, pSA->digestSize);
+
+ /* Prepare dummy place for digest in SRAM */
+ digestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet();
+ }
+ else
+ {
+ digestOffset = pCmd->digestOffset;
+ }
+ }
+ }
+ /* Update SA in SRAM */
+ if(cesaLastSid != sid)
+ {
+ mvCesaSramSaUpdate(sid, &pDmaDesc[i]);
+ i++;
+ }
+
+ pReq->fragMode = MV_CESA_FRAG_MIDDLE;
+ }
+ else
+ {
+ /* Continue fragment */
+ fixOffset = 0;
+ cryptoOffset = 0;
+ macOffset = 0;
+ if( (pCmd->pSrc->mbufSize - pReq->frags.bufOffset) <= sizeof(cesaSramVirtPtr->buf))
+ {
+ /* Last fragment */
+ config = pSA->config | (MV_CESA_FRAG_LAST << MV_CESA_FRAG_MODE_OFFSET);
+ pReq->fragMode = MV_CESA_FRAG_LAST;
+ copySize = pCmd->pSrc->mbufSize - pReq->frags.bufOffset;
+
+ if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ macDataSize = pCmd->macLength - pReq->frags.macSize;
+
+ /* If pCmd->digestOffset is not located on last fragment */
+ if(pCmd->digestOffset < pReq->frags.bufOffset)
+ {
+ /* Prepare dummy place for digest in SRAM */
+ digestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet();
+ }
+ else
+ {
+ digestOffset = pCmd->digestOffset - pReq->frags.bufOffset;
+ }
+ pReq->frags.newDigestOffset = digestOffset;
+ macTotalLen = pCmd->macLength;
+
+ /* HW can't calculate the Digest correctly for fragmented packets
+ * in the following cases:
+ * - MV88F5182 ||
+ * - MV88F5181L when total macLength more that 16 Kbytes ||
+ * - total macLength more that 64 Kbytes
+ */
+ if( (mvCtrlModelGet() == MV_5182_DEV_ID) ||
+ ( (mvCtrlModelGet() == MV_5181_DEV_ID) &&
+ (mvCtrlRevGet() >= MV_5181L_A0_REV) &&
+ (pCmd->macLength >= (1 << 14)) ) )
+ {
+ return MV_TERMINATE;
+ }
+ }
+ if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ cryptoDataSize = pCmd->cryptoLength - pReq->frags.cryptoSize;
+ }
+
+ /* cryptoIvOffset - don't care */
+ }
+ else
+ {
+ /* WA for MV88F5182 SHA1 and MD5 fragmentation mode */
+ if( (mvCtrlModelGet() == MV_5182_DEV_ID) &&
+ (((pSA->config & MV_CESA_MAC_MODE_MASK) ==
+ (MV_CESA_MAC_MD5 << MV_CESA_MAC_MODE_OFFSET)) ||
+ ((pSA->config & MV_CESA_MAC_MODE_MASK) ==
+ (MV_CESA_MAC_SHA1 << MV_CESA_MAC_MODE_OFFSET))) )
+ {
+ pReq->frags.newDigestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet();
+ pReq->fragMode = MV_CESA_FRAG_LAST;
+
+ return MV_TERMINATE;
+ }
+ /* Middle fragment */
+ config = pSA->config | (MV_CESA_FRAG_MIDDLE << MV_CESA_FRAG_MODE_OFFSET);
+ copySize = sizeof(cesaSramVirtPtr->buf);
+ /* digestOffset and cryptoIvOffset - don't care */
+
+ /* Find fragment size */
+ mvCesaFragSizeFind(pSA, pReq, cryptoOffset, macOffset,
+ &copySize, &cryptoDataSize, &macDataSize);
+ }
+ }
+ /********* Prepare DMA descriptors to copy from pSrc to SRAM *********/
+ pMbuf = pCmd->pSrc;
+ i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+ MV_FALSE, pReq->frags.bufOffset, copySize, pCmd->skipFlush);
+
+ /* Prepare CESA descriptor to copy from DRAM to SRAM by DMA */
+ mvCesaSramDescrBuild(config, frag,
+ cryptoOffset + fixOffset, cryptoIvOffset + fixOffset,
+ cryptoDataSize, macOffset + fixOffset,
+ digestOffset + fixOffset, macDataSize, macTotalLen,
+ pReq, &pDmaDesc[i]);
+ i++;
+
+ /* Add special descriptor Ownership for CPU */
+ pDmaDesc[i].byteCnt = 0;
+ pDmaDesc[i].phySrcAdd = 0;
+ pDmaDesc[i].phyDestAdd = 0;
+ i++;
+
+ /********* Prepare DMA descriptors to copy from SRAM to pDst *********/
+ pMbuf = pCmd->pDst;
+ i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+ MV_TRUE, pReq->frags.bufOffset, copySize, pCmd->skipFlush);
+
+ /* Next field of Last DMA descriptor must be NULL */
+ pDmaDesc[i-1].phyNextDescPtr = 0;
+ pReq->dma[frag].pDmaLast = &pDmaDesc[i-1];
+ mvOsCacheFlush(NULL, pReq->dma[frag].pDmaFirst,
+ i*sizeof(MV_DMA_DESC));
+
+ /*mvCesaDebugDescriptor(&cesaSramVirtPtr->desc[frag]);*/
+
+ pReq->frags.bufOffset += copySize;
+ pReq->frags.cryptoSize += cryptoDataSize;
+ pReq->frags.macSize += macDataSize;
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvCesaReqProcess - Process regular (Non-fragmented) request
+*
+* DESCRIPTION:
+* This function processes the whole (not fragmented) request
+*
+* INPUT:
+* MV_CESA_REQ* pReq - Pointer to the request in the request queue.
+*
+* RETURN:
+* MV_OK - The request is successfully passed to HW for processing.
+* Other - Failure. The request will not be processed
+*
+*******************************************************************************/
+static MV_STATUS mvCesaReqProcess(MV_CESA_REQ* pReq)
+{
+ MV_CESA_MBUF *pMbuf;
+ MV_DMA_DESC *pDmaDesc;
+ MV_U8 *pSramBuf;
+ int sid, i, fixOffset;
+ MV_CESA_SA *pSA;
+ MV_CESA_COMMAND *pCmd = pReq->pCmd;
+
+ cesaStats.procCount++;
+
+ sid = pCmd->sessionId;
+ pSA = &pCesaSAD[sid];
+ pDmaDesc = pReq->dma[0].pDmaFirst;
+ pSramBuf = cesaSramVirtPtr->buf;
+ fixOffset = pReq->fixOffset;
+
+/*
+ mvOsPrintf("mvCesaReqProcess: sid=%d, pSA=%p, pDmaDesc=%p, pSramBuf=%p\n",
+ sid, pSA, pDmaDesc, pSramBuf);
+*/
+ i = 0;
+
+ /* Crypto IV Special processing in CBC mode for Encryption direction */
+ if( ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) &&
+ ((pSA->config & MV_CESA_CRYPTO_MODE_MASK) == (MV_CESA_CRYPTO_CBC << MV_CESA_CRYPTO_MODE_BIT)) &&
+ ((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_ENCODE << MV_CESA_DIRECTION_BIT)) &&
+ (pCmd->ivFromUser) )
+ {
+ /* For Crypto Encode in CBC mode HW always takes IV from SRAM IVPointer,
+ * (not from IVBufPointer). So when ivFromUser==1, we should copy IV from user place
+ * in the buffer to SRAM IVPointer
+ */
+ i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i],
+ MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+ }
+
+ /* Update SA in SRAM */
+ if(cesaLastSid != sid)
+ {
+ mvCesaSramSaUpdate(sid, &pDmaDesc[i]);
+ i++;
+ }
+
+ /********* Prepare DMA descriptors to copy from pSrc to SRAM *********/
+ pMbuf = pCmd->pSrc;
+ i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+ MV_FALSE, 0, pMbuf->mbufSize, pCmd->skipFlush);
+
+ /* Prepare Security Accelerator descriptor to SRAM words 0 - 7 */
+ mvCesaSramDescrBuild(pSA->config, 0, pCmd->cryptoOffset + fixOffset,
+ pCmd->ivOffset + fixOffset, pCmd->cryptoLength,
+ pCmd->macOffset + fixOffset, pCmd->digestOffset + fixOffset,
+ pCmd->macLength, pCmd->macLength, pReq, &pDmaDesc[i]);
+ i++;
+
+ /* Add special descriptor Ownership for CPU */
+ pDmaDesc[i].byteCnt = 0;
+ pDmaDesc[i].phySrcAdd = 0;
+ pDmaDesc[i].phyDestAdd = 0;
+ i++;
+
+ /********* Prepare DMA descriptors to copy from SRAM to pDst *********/
+ pMbuf = pCmd->pDst;
+ i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+ MV_TRUE, 0, pMbuf->mbufSize, pCmd->skipFlush);
+
+ /* Next field of Last DMA descriptor must be NULL */
+ pDmaDesc[i-1].phyNextDescPtr = 0;
+ pReq->dma[0].pDmaLast = &pDmaDesc[i-1];
+ mvOsCacheFlush(NULL, pReq->dma[0].pDmaFirst, i*sizeof(MV_DMA_DESC));
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvCesaSramDescrBuild - Set CESA descriptor in SRAM
+*
+* DESCRIPTION:
+* This function builds CESA descriptor in SRAM from all Command parameters
+*
+*
+* INPUT:
+* int chan - CESA channel uses the descriptor
+* MV_U32 config - 32 bits of WORD_0 in CESA descriptor structure
+* int cryptoOffset - Offset from the beginning of SRAM buffer where
+* data for encryption/decription is started.
+* int ivOffset - Offset of crypto IV from the SRAM base. Valid only
+* for first fragment.
+* int cryptoLength - Size (in bytes) of data for encryption/descryption
+* operation on this fragment.
+* int macOffset - Offset from the beginning of SRAM buffer where
+* data for Authentication is started
+* int digestOffset - Offset from the beginning of SRAM buffer where
+* digest is located. Valid for first and last fragments.
+* int macLength - Size (in bytes) of data for Authentication
+* operation on this fragment.
+* int macTotalLen - Toatl size (in bytes) of data for Authentication
+* operation on the whole request (packet). Valid for
+* last fragment only.
+*
+* RETURN: None
+*
+*******************************************************************************/
+static void mvCesaSramDescrBuild(MV_U32 config, int frag,
+ int cryptoOffset, int ivOffset, int cryptoLength,
+ int macOffset, int digestOffset, int macLength,
+ int macTotalLen, MV_CESA_REQ* pReq, MV_DMA_DESC* pDmaDesc)
+{
+ MV_CESA_DESC* pCesaDesc = &pReq->pCesaDesc[frag];
+ MV_CESA_DESC* pSramDesc = pSramDesc = &cesaSramVirtPtr->desc;
+ MV_U16 sramBufOffset = (MV_U16)((MV_U8*)cesaSramVirtPtr->buf - mvCesaSramAddrGet());
+
+ pCesaDesc->config = MV_32BIT_LE(config);
+
+ if( (config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ /* word 1 */
+ pCesaDesc->cryptoSrcOffset = MV_16BIT_LE(sramBufOffset + cryptoOffset);
+ pCesaDesc->cryptoDstOffset = MV_16BIT_LE(sramBufOffset + cryptoOffset);
+ /* word 2 */
+ pCesaDesc->cryptoDataLen = MV_16BIT_LE(cryptoLength);
+ /* word 3 */
+ pCesaDesc->cryptoKeyOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.cryptoKey -
+ mvCesaSramAddrGet()));
+ /* word 4 */
+ pCesaDesc->cryptoIvOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->cryptoIV -
+ mvCesaSramAddrGet()));
+ pCesaDesc->cryptoIvBufOffset = MV_16BIT_LE(sramBufOffset + ivOffset);
+ }
+
+ if( (config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ /* word 5 */
+ pCesaDesc->macSrcOffset = MV_16BIT_LE(sramBufOffset + macOffset);
+ pCesaDesc->macTotalLen = MV_16BIT_LE(macTotalLen);
+
+ /* word 6 */
+ pCesaDesc->macDigestOffset = MV_16BIT_LE(sramBufOffset + digestOffset);
+ pCesaDesc->macDataLen = MV_16BIT_LE(macLength);
+
+ /* word 7 */
+ pCesaDesc->macInnerIvOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.macInnerIV -
+ mvCesaSramAddrGet()));
+ pCesaDesc->macOuterIvOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.macOuterIV -
+ mvCesaSramAddrGet()));
+ }
+ /* Prepare DMA descriptor to CESA descriptor from DRAM to SRAM */
+ pDmaDesc->phySrcAdd = MV_32BIT_LE(mvCesaVirtToPhys(&pReq->cesaDescBuf, pCesaDesc));
+ pDmaDesc->phyDestAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (MV_U8*)pSramDesc));
+ pDmaDesc->byteCnt = MV_32BIT_LE(sizeof(MV_CESA_DESC) | BIT31);
+
+ /* flush Source buffer */
+ mvOsCacheFlush(NULL, pCesaDesc, sizeof(MV_CESA_DESC));
+}
+
+/*******************************************************************************
+* mvCesaSramSaUpdate - Move required SA information to SRAM if needed.
+*
+* DESCRIPTION:
+* Copy to SRAM values of the required SA.
+*
+*
+* INPUT:
+* short sid - Session ID needs SRAM Cache update
+* MV_DMA_DESC *pDmaDesc - Pointer to DMA descriptor used to
+* copy SA values from DRAM to SRAM.
+*
+* RETURN:
+* MV_OK - Cache entry for this SA copied to SRAM.
+* MV_NO_CHANGE - Cache entry for this SA already exist in SRAM
+*
+*******************************************************************************/
+static INLINE void mvCesaSramSaUpdate(short sid, MV_DMA_DESC *pDmaDesc)
+{
+ MV_CESA_SA *pSA = &pCesaSAD[sid];
+
+ /* Prepare DMA descriptor to Copy CACHE_SA from SA database in DRAM to SRAM */
+ pDmaDesc->byteCnt = MV_32BIT_LE(sizeof(MV_CESA_SRAM_SA) | BIT31);
+ pDmaDesc->phySrcAdd = MV_32BIT_LE(mvCesaVirtToPhys(&cesaSramSaBuf, pSA->pSramSA));
+ pDmaDesc->phyDestAdd =
+ MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (MV_U8*)&cesaSramVirtPtr->sramSA));
+
+ /* Source buffer is already flushed during OpenSession*/
+ /*mvOsCacheFlush(NULL, &pSA->sramSA, sizeof(MV_CESA_SRAM_SA));*/
+}
+
+/*******************************************************************************
+* mvCesaDmaCopyPrepare - prepare DMA descriptor list to copy data presented by
+* Mbuf structure from DRAM to SRAM
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_MBUF* pMbuf - pointer to Mbuf structure contains request
+* data in DRAM
+* MV_U8* pSramBuf - pointer to buffer in SRAM where data should
+* be copied to.
+* MV_DMA_DESC* pDmaDesc - pointer to first DMA descriptor for this copy.
+* The function set number of DMA descriptors needed
+* to copy the copySize bytes from Mbuf.
+* MV_BOOL isToMbuf - Copy direction.
+* MV_TRUE means copy from SRAM buffer to Mbuf in DRAM.
+* MV_FALSE means copy from Mbuf in DRAM to SRAM buffer.
+* int offset - Offset in the Mbuf structure that copy should be
+* started from.
+* int copySize - Size of data should be copied.
+*
+* RETURN:
+* int - number of DMA descriptors used for the copy.
+*
+*******************************************************************************/
+#ifndef MV_NETBSD
+static INLINE int mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf,
+ MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf,
+ int offset, int copySize, MV_BOOL skipFlush)
+{
+ int bufOffset, bufSize, size, frag, i;
+ MV_U8* pBuf;
+
+ i = 0;
+
+ /* Calculate start place for copy: fragment number and offset in the fragment */
+ frag = mvCesaMbufOffset(pMbuf, offset, &bufOffset);
+ bufSize = pMbuf->pFrags[frag].bufSize - bufOffset;
+ pBuf = pMbuf->pFrags[frag].bufVirtPtr + bufOffset;
+
+ /* Size accumulate total copy size */
+ size = 0;
+
+ /* Create DMA lists to copy mBuf from pSrc to SRAM */
+ while(size < copySize)
+ {
+ /* Find copy size for each DMA descriptor */
+ bufSize = MV_MIN(bufSize, (copySize - size));
+ pDmaDesc[i].byteCnt = MV_32BIT_LE(bufSize | BIT31);
+ if(isToMbuf)
+ {
+ pDmaDesc[i].phyDestAdd = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf));
+ pDmaDesc[i].phySrcAdd =
+ MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (pSramBuf + size)));
+ /* invalidate the buffer */
+ if(skipFlush == MV_FALSE)
+ mvOsCacheInvalidate(NULL, pBuf, bufSize);
+ }
+ else
+ {
+ pDmaDesc[i].phySrcAdd = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf));
+ pDmaDesc[i].phyDestAdd =
+ MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (pSramBuf + size)));
+ /* flush the buffer */
+ if(skipFlush == MV_FALSE)
+ mvOsCacheFlush(NULL, pBuf, bufSize);
+ }
+
+ /* Count number of used DMA descriptors */
+ i++;
+ size += bufSize;
+
+ /* go to next fragment in the Mbuf */
+ frag++;
+ pBuf = pMbuf->pFrags[frag].bufVirtPtr;
+ bufSize = pMbuf->pFrags[frag].bufSize;
+ }
+ return i;
+}
+#else /* MV_NETBSD */
+static int mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf,
+ MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf,
+ int offset, int copySize, MV_BOOL skipFlush)
+{
+ int bufOffset, bufSize, thisSize, size, frag, i;
+ MV_ULONG bufPhys, sramPhys;
+ MV_U8* pBuf;
+
+ /*
+ * Calculate start place for copy: fragment number and offset in
+ * the fragment
+ */
+ frag = mvCesaMbufOffset(pMbuf, offset, &bufOffset);
+
+ /*
+ * Get SRAM physical address only once. We can update it in-place
+ * as we build the descriptor chain.
+ */
+ sramPhys = mvCesaSramVirtToPhys(NULL, pSramBuf);
+
+ /*
+ * 'size' accumulates total copy size, 'i' counts desccriptors.
+ */
+ size = i = 0;
+
+ /* Create DMA lists to copy mBuf from pSrc to SRAM */
+ while (size < copySize) {
+ /*
+ * Calculate # of bytes to copy from the current fragment,
+ * and the pointer to the start of data
+ */
+ bufSize = pMbuf->pFrags[frag].bufSize - bufOffset;
+ pBuf = pMbuf->pFrags[frag].bufVirtPtr + bufOffset;
+ bufOffset = 0; /* First frag may be non-zero */
+ frag++;
+
+ /*
+ * As long as there is data in the current fragment...
+ */
+ while (bufSize > 0) {
+ /*
+ * Ensure we don't cross an MMU page boundary.
+ * XXX: This is NetBSD-specific, but it is a
+ * quick and dirty way to fix the problem.
+ * A true HAL would rely on the OS-specific
+ * driver to do this...
+ */
+ thisSize = PAGE_SIZE -
+ (((MV_ULONG)pBuf) & (PAGE_SIZE - 1));
+ thisSize = MV_MIN(bufSize, thisSize);
+ /*
+ * Make sure we don't copy more than requested
+ */
+ if (thisSize > (copySize - size)) {
+ thisSize = copySize - size;
+ bufSize = 0;
+ }
+
+ /*
+ * Physicall address of this fragment
+ */
+ bufPhys = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf));
+
+ /*
+ * Set up the descriptor
+ */
+ pDmaDesc[i].byteCnt = MV_32BIT_LE(thisSize | BIT31);
+ if(isToMbuf) {
+ pDmaDesc[i].phyDestAdd = bufPhys;
+ pDmaDesc[i].phySrcAdd = MV_32BIT_LE(sramPhys);
+ /* invalidate the buffer */
+ if(skipFlush == MV_FALSE)
+ mvOsCacheInvalidate(NULL, pBuf, thisSize);
+ } else {
+ pDmaDesc[i].phySrcAdd = bufPhys;
+ pDmaDesc[i].phyDestAdd = MV_32BIT_LE(sramPhys);
+ /* flush the buffer */
+ if(skipFlush == MV_FALSE)
+ mvOsCacheFlush(NULL, pBuf, thisSize);
+ }
+
+ pDmaDesc[i].phyNextDescPtr =
+ MV_32BIT_LE(mvOsIoVirtToPhy(NULL,(&pDmaDesc[i+1])));
+
+ /* flush the DMA desc */
+ mvOsCacheFlush(NULL, &pDmaDesc[i], sizeof(MV_DMA_DESC));
+
+ /* Update state */
+ bufSize -= thisSize;
+ sramPhys += thisSize;
+ pBuf += thisSize;
+ size += thisSize;
+ i++;
+ }
+ }
+
+ return i;
+}
+#endif /* MV_NETBSD */
+/*******************************************************************************
+* mvCesaHmacIvGet - Calculate Inner and Outter values from HMAC key
+*
+* DESCRIPTION:
+* This function calculate Inner and Outer values used for HMAC algorithm.
+* This operation allows improve performance fro the whole HMAC processing.
+*
+* INPUT:
+* MV_CESA_MAC_MODE macMode - Authentication mode: HMAC_MD5 or HMAC_SHA1.
+* unsigned char key[] - Pointer to HMAC key.
+* int keyLength - Size of HMAC key (maximum 64 bytes)
+*
+* OUTPUT:
+* unsigned char innerIV[] - HASH(key^inner)
+* unsigned char outerIV[] - HASH(key^outter)
+*
+* RETURN: None
+*
+*******************************************************************************/
+static void mvCesaHmacIvGet(MV_CESA_MAC_MODE macMode, unsigned char key[], int keyLength,
+ unsigned char innerIV[], unsigned char outerIV[])
+{
+ unsigned char inner[MV_CESA_MAX_MAC_KEY_LENGTH];
+ unsigned char outer[MV_CESA_MAX_MAC_KEY_LENGTH];
+ int i, digestSize = 0;
+#if defined(MV_CPU_LE) || defined(MV_PPC)
+ MV_U32 swapped32, val32, *pVal32;
+#endif
+ for(i=0; i<keyLength; i++)
+ {
+ inner[i] = 0x36 ^ key[i];
+ outer[i] = 0x5c ^ key[i];
+ }
+
+ for(i=keyLength; i<MV_CESA_MAX_MAC_KEY_LENGTH; i++)
+ {
+ inner[i] = 0x36;
+ outer[i] = 0x5c;
+ }
+ if(macMode == MV_CESA_MAC_HMAC_MD5)
+ {
+ MV_MD5_CONTEXT ctx;
+
+ mvMD5Init(&ctx);
+ mvMD5Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH);
+
+ memcpy(innerIV, ctx.buf, MV_CESA_MD5_DIGEST_SIZE);
+ memset(&ctx, 0, sizeof(ctx));
+
+ mvMD5Init(&ctx);
+ mvMD5Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH);
+ memcpy(outerIV, ctx.buf, MV_CESA_MD5_DIGEST_SIZE);
+ memset(&ctx, 0, sizeof(ctx));
+ digestSize = MV_CESA_MD5_DIGEST_SIZE;
+ }
+ else if(macMode == MV_CESA_MAC_HMAC_SHA1)
+ {
+ MV_SHA1_CTX ctx;
+
+ mvSHA1Init(&ctx);
+ mvSHA1Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH);
+ memcpy(innerIV, ctx.state, MV_CESA_SHA1_DIGEST_SIZE);
+ memset(&ctx, 0, sizeof(ctx));
+
+ mvSHA1Init(&ctx);
+ mvSHA1Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH);
+ memcpy(outerIV, ctx.state, MV_CESA_SHA1_DIGEST_SIZE);
+ memset(&ctx, 0, sizeof(ctx));
+ digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+ }
+ else
+ {
+ mvOsPrintf("hmacGetIV: Unexpected macMode %d\n", macMode);
+ }
+#if defined(MV_CPU_LE) || defined(MV_PPC)
+ /* 32 bits Swap of Inner and Outer values */
+ pVal32 = (MV_U32*)innerIV;
+ for(i=0; i<digestSize/4; i++)
+ {
+ val32 = *pVal32;
+ swapped32 = MV_BYTE_SWAP_32BIT(val32);
+ *pVal32 = swapped32;
+ pVal32++;
+ }
+ pVal32 = (MV_U32*)outerIV;
+ for(i=0; i<digestSize/4; i++)
+ {
+ val32 = *pVal32;
+ swapped32 = MV_BYTE_SWAP_32BIT(val32);
+ *pVal32 = swapped32;
+ pVal32++;
+ }
+#endif /* defined(MV_CPU_LE) || defined(MV_PPC) */
+}
+
+
+/*******************************************************************************
+* mvCesaFragSha1Complete - Complete SHA1 authentication started by HW using SW
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_MBUF* pMbuf - Pointer to Mbuf structure where data
+* for SHA1 is placed.
+* int offset - Offset in the Mbuf structure where
+* unprocessed data for SHA1 is started.
+* MV_U8* pOuterIV - Pointer to OUTER for this session.
+* If pOuterIV==NULL - MAC mode is HASH_SHA1
+* If pOuterIV!=NULL - MAC mode is HMAC_SHA1
+* int macLeftSize - Size of unprocessed data for SHA1.
+* int macTotalSize - Total size of data for SHA1 in the
+* request (processed + unprocessed)
+*
+* OUTPUT:
+* MV_U8* pDigest - Pointer to place where calculated Digest will
+* be stored.
+*
+* RETURN: None
+*
+*******************************************************************************/
+static void mvCesaFragSha1Complete(MV_CESA_MBUF* pMbuf, int offset,
+ MV_U8* pOuterIV, int macLeftSize,
+ int macTotalSize, MV_U8* pDigest)
+{
+ MV_SHA1_CTX ctx;
+ MV_U8 *pData;
+ int i, frag, fragOffset, size;
+
+ /* Read temporary Digest from HW */
+ for(i=0; i<MV_CESA_SHA1_DIGEST_SIZE/4; i++)
+ {
+ ctx.state[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(i));
+ }
+ /* Initialize MV_SHA1_CTX structure */
+ memset(ctx.buffer, 0, 64);
+ /* Set count[0] in bits. 32 bits is enough for 512 MBytes */
+ /* so count[1] is always 0 */
+ ctx.count[0] = ((macTotalSize - macLeftSize) * 8);
+ ctx.count[1] = 0;
+
+ /* If HMAC - add size of Inner block (64 bytes) ro count[0] */
+ if(pOuterIV != NULL)
+ ctx.count[0] += (64 * 8);
+
+ /* Get place of unprocessed data in the Mbuf structure */
+ frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+ if(frag == MV_INVALID)
+ {
+ mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+ return;
+ }
+
+ pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+ size = pMbuf->pFrags[frag].bufSize - fragOffset;
+
+ /* Complete Inner part */
+ while(macLeftSize > 0)
+ {
+ if(macLeftSize <= size)
+ {
+ mvSHA1Update(&ctx, pData, macLeftSize);
+ break;
+ }
+ mvSHA1Update(&ctx, pData, size);
+ macLeftSize -= size;
+ frag++;
+ pData = pMbuf->pFrags[frag].bufVirtPtr;
+ size = pMbuf->pFrags[frag].bufSize;
+ }
+ mvSHA1Final(pDigest, &ctx);
+/*
+ mvOsPrintf("mvCesaFragSha1Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n",
+ pOuterIV, macLeftSize, macTotalSize);
+ mvDebugMemDump(pDigest, MV_CESA_SHA1_DIGEST_SIZE, 1);
+*/
+
+ if(pOuterIV != NULL)
+ {
+ /* If HMAC - Complete Outer part */
+ for(i=0; i<MV_CESA_SHA1_DIGEST_SIZE/4; i++)
+ {
+#if defined(MV_CPU_LE) || defined(MV_ARM)
+ ctx.state[i] = MV_BYTE_SWAP_32BIT(((MV_U32*)pOuterIV)[i]);
+#else
+ ctx.state[i] = ((MV_U32*)pOuterIV)[i];
+#endif
+ }
+ memset(ctx.buffer, 0, 64);
+
+ ctx.count[0] = 64*8;
+ ctx.count[1] = 0;
+ mvSHA1Update(&ctx, pDigest, MV_CESA_SHA1_DIGEST_SIZE);
+ mvSHA1Final(pDigest, &ctx);
+ }
+}
+
+/*******************************************************************************
+* mvCesaFragMd5Complete - Complete MD5 authentication started by HW using SW
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_MBUF* pMbuf - Pointer to Mbuf structure where data
+* for SHA1 is placed.
+* int offset - Offset in the Mbuf structure where
+* unprocessed data for MD5 is started.
+* MV_U8* pOuterIV - Pointer to OUTER for this session.
+* If pOuterIV==NULL - MAC mode is HASH_MD5
+* If pOuterIV!=NULL - MAC mode is HMAC_MD5
+* int macLeftSize - Size of unprocessed data for MD5.
+* int macTotalSize - Total size of data for MD5 in the
+* request (processed + unprocessed)
+*
+* OUTPUT:
+* MV_U8* pDigest - Pointer to place where calculated Digest will
+* be stored.
+*
+* RETURN: None
+*
+*******************************************************************************/
+static void mvCesaFragMd5Complete(MV_CESA_MBUF* pMbuf, int offset,
+ MV_U8* pOuterIV, int macLeftSize,
+ int macTotalSize, MV_U8* pDigest)
+{
+ MV_MD5_CONTEXT ctx;
+ MV_U8 *pData;
+ int i, frag, fragOffset, size;
+
+ /* Read temporary Digest from HW */
+ for(i=0; i<MV_CESA_MD5_DIGEST_SIZE/4; i++)
+ {
+ ctx.buf[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(i));
+ }
+ memset(ctx.in, 0, 64);
+
+ /* Set count[0] in bits. 32 bits is enough for 512 MBytes */
+ /* so count[1] is always 0 */
+ ctx.bits[0] = ((macTotalSize - macLeftSize) * 8);
+ ctx.bits[1] = 0;
+
+ /* If HMAC - add size of Inner block (64 bytes) ro count[0] */
+ if(pOuterIV != NULL)
+ ctx.bits[0] += (64 * 8);
+
+ frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+ if(frag == MV_INVALID)
+ {
+ mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+ return;
+ }
+
+ pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+ size = pMbuf->pFrags[frag].bufSize - fragOffset;
+
+ /* Complete Inner part */
+ while(macLeftSize > 0)
+ {
+ if(macLeftSize <= size)
+ {
+ mvMD5Update(&ctx, pData, macLeftSize);
+ break;
+ }
+ mvMD5Update(&ctx, pData, size);
+ macLeftSize -= size;
+ frag++;
+ pData = pMbuf->pFrags[frag].bufVirtPtr;
+ size = pMbuf->pFrags[frag].bufSize;
+ }
+ mvMD5Final(pDigest, &ctx);
+
+/*
+ mvOsPrintf("mvCesaFragMd5Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n",
+ pOuterIV, macLeftSize, macTotalSize);
+ mvDebugMemDump(pDigest, MV_CESA_MD5_DIGEST_SIZE, 1);
+*/
+ if(pOuterIV != NULL)
+ {
+ /* Complete Outer part */
+ for(i=0; i<MV_CESA_MD5_DIGEST_SIZE/4; i++)
+ {
+#if defined(MV_CPU_LE) || defined(MV_ARM)
+ ctx.buf[i] = MV_BYTE_SWAP_32BIT(((MV_U32*)pOuterIV)[i]);
+#else
+ ctx.buf[i] = ((MV_U32*)pOuterIV)[i];
+#endif
+ }
+ memset(ctx.in, 0, 64);
+
+ ctx.bits[0] = 64*8;
+ ctx.bits[1] = 0;
+ mvMD5Update(&ctx, pDigest, MV_CESA_MD5_DIGEST_SIZE);
+ mvMD5Final(pDigest, &ctx);
+ }
+}
+
+/*******************************************************************************
+* mvCesaFragAuthComplete -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_REQ* pReq,
+* MV_CESA_SA* pSA,
+* int macDataSize
+*
+* RETURN:
+* MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaFragAuthComplete(MV_CESA_REQ* pReq, MV_CESA_SA* pSA,
+ int macDataSize)
+{
+ MV_CESA_COMMAND* pCmd = pReq->pCmd;
+ MV_U8* pDigest;
+ MV_CESA_MAC_MODE macMode;
+ MV_U8* pOuterIV = NULL;
+
+ /* Copy data from Source fragment to Destination */
+ if(pCmd->pSrc != pCmd->pDst)
+ {
+ mvCesaMbufCopy(pCmd->pDst, pReq->frags.bufOffset,
+ pCmd->pSrc, pReq->frags.bufOffset, macDataSize);
+ }
+
+/*
+ mvCesaCopyFromMbuf(cesaSramVirtPtr->buf[0], pCmd->pSrc, pReq->frags.bufOffset, macDataSize);
+ mvCesaCopyToMbuf(cesaSramVirtPtr->buf[0], pCmd->pDst, pReq->frags.bufOffset, macDataSize);
+*/
+ pDigest = (mvCesaSramAddrGet() + pReq->frags.newDigestOffset);
+
+ macMode = (pSA->config & MV_CESA_MAC_MODE_MASK) >> MV_CESA_MAC_MODE_OFFSET;
+/*
+ mvOsPrintf("macDataSize=%d, macLength=%d, digestOffset=%d, macMode=%d\n",
+ macDataSize, pCmd->macLength, pCmd->digestOffset, macMode);
+*/
+ switch(macMode)
+ {
+ case MV_CESA_MAC_HMAC_MD5:
+ pOuterIV = pSA->pSramSA->macOuterIV;
+
+ case MV_CESA_MAC_MD5:
+ mvCesaFragMd5Complete(pCmd->pDst, pReq->frags.bufOffset, pOuterIV,
+ macDataSize, pCmd->macLength, pDigest);
+ break;
+
+ case MV_CESA_MAC_HMAC_SHA1:
+ pOuterIV = pSA->pSramSA->macOuterIV;
+
+ case MV_CESA_MAC_SHA1:
+ mvCesaFragSha1Complete(pCmd->pDst, pReq->frags.bufOffset, pOuterIV,
+ macDataSize, pCmd->macLength, pDigest);
+ break;
+
+ default:
+ mvOsPrintf("mvCesaFragAuthComplete: Unexpected macMode %d\n", macMode);
+ return MV_BAD_PARAM;
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeInit -
+*
+* DESCRIPTION:
+*
+*
+* INPUT: NONE
+*
+*
+* RETURN:
+* MV_CESA_COMMAND*
+*
+*******************************************************************************/
+static MV_CESA_COMMAND* mvCesaCtrModeInit(void)
+{
+ MV_CESA_MBUF *pMbuf;
+ MV_U8 *pBuf;
+ MV_CESA_COMMAND *pCmd;
+
+ pBuf = mvOsMalloc(sizeof(MV_CESA_COMMAND) +
+ sizeof(MV_CESA_MBUF) + sizeof(MV_BUF_INFO) + 100);
+ if(pBuf == NULL)
+ {
+ mvOsPrintf("mvCesaSessionOpen: Can't allocate %u bytes for CTR Mode\n",
+ sizeof(MV_CESA_COMMAND) + sizeof(MV_CESA_MBUF) + sizeof(MV_BUF_INFO) );
+ return NULL;
+ }
+ pCmd = (MV_CESA_COMMAND*)pBuf;
+ pBuf += sizeof(MV_CESA_COMMAND);
+
+ pMbuf = (MV_CESA_MBUF*)pBuf;
+ pBuf += sizeof(MV_CESA_MBUF);
+
+ pMbuf->pFrags = (MV_BUF_INFO*)pBuf;
+
+ pMbuf->numFrags = 1;
+ pCmd->pSrc = pMbuf;
+ pCmd->pDst = pMbuf;
+/*
+ mvOsPrintf("CtrModeInit: pCmd=%p, pSrc=%p, pDst=%p, pFrags=%p\n",
+ pCmd, pCmd->pSrc, pCmd->pDst,
+ pMbuf->pFrags);
+*/
+ return pCmd;
+}
+
+/*******************************************************************************
+* mvCesaCtrModePrepare -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+* MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaCtrModePrepare(MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd)
+{
+ MV_CESA_MBUF *pMbuf;
+ MV_U8 *pBuf, *pIV;
+ MV_U32 counter, *pCounter;
+ int cryptoSize = MV_ALIGN_UP(pCmd->cryptoLength, MV_CESA_AES_BLOCK_SIZE);
+/*
+ mvOsPrintf("CtrModePrepare: pCmd=%p, pCtrSrc=%p, pCtrDst=%p, pOrgCmd=%p, pOrgSrc=%p, pOrgDst=%p\n",
+ pCmd, pCmd->pSrc, pCmd->pDst,
+ pCtrModeCmd, pCtrModeCmd->pSrc, pCtrModeCmd->pDst);
+*/
+ pMbuf = pCtrModeCmd->pSrc;
+
+ /* Allocate buffer for Key stream */
+ pBuf = mvOsIoCachedMalloc(cesaOsHandle,cryptoSize,
+ &pMbuf->pFrags[0].bufPhysAddr,
+ &pMbuf->pFrags[0].memHandle);
+ if(pBuf == NULL)
+ {
+ mvOsPrintf("mvCesaCtrModePrepare: Can't allocate %d bytes\n", cryptoSize);
+ return MV_OUT_OF_CPU_MEM;
+ }
+ memset(pBuf, 0, cryptoSize);
+ mvOsCacheFlush(NULL, pBuf, cryptoSize);
+
+ pMbuf->pFrags[0].bufVirtPtr = pBuf;
+ pMbuf->mbufSize = cryptoSize;
+ pMbuf->pFrags[0].bufSize = cryptoSize;
+
+ pCtrModeCmd->pReqPrv = pCmd->pReqPrv;
+ pCtrModeCmd->sessionId = pCmd->sessionId;
+
+ /* ivFromUser and ivOffset are don't care */
+ pCtrModeCmd->cryptoOffset = 0;
+ pCtrModeCmd->cryptoLength = cryptoSize;
+
+ /* digestOffset, macOffset and macLength are don't care */
+
+ mvCesaCopyFromMbuf(pBuf, pCmd->pSrc, pCmd->ivOffset, MV_CESA_AES_BLOCK_SIZE);
+ pCounter = (MV_U32*)(pBuf + (MV_CESA_AES_BLOCK_SIZE - sizeof(counter)));
+ counter = *pCounter;
+ counter = MV_32BIT_BE(counter);
+ pIV = pBuf;
+ cryptoSize -= MV_CESA_AES_BLOCK_SIZE;
+
+ /* fill key stream */
+ while(cryptoSize > 0)
+ {
+ pBuf += MV_CESA_AES_BLOCK_SIZE;
+ memcpy(pBuf, pIV, MV_CESA_AES_BLOCK_SIZE - sizeof(counter));
+ pCounter = (MV_U32*)(pBuf + (MV_CESA_AES_BLOCK_SIZE - sizeof(counter)));
+ counter++;
+ *pCounter = MV_32BIT_BE(counter);
+ cryptoSize -= MV_CESA_AES_BLOCK_SIZE;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeComplete -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+* MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaCtrModeComplete(MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd)
+{
+ int srcFrag, dstFrag, srcOffset, dstOffset, keyOffset, srcSize, dstSize;
+ int cryptoSize = pCmd->cryptoLength;
+ MV_U8 *pSrc, *pDst, *pKey;
+ MV_STATUS status = MV_OK;
+/*
+ mvOsPrintf("CtrModeComplete: pCmd=%p, pCtrSrc=%p, pCtrDst=%p, pOrgCmd=%p, pOrgSrc=%p, pOrgDst=%p\n",
+ pCmd, pCmd->pSrc, pCmd->pDst,
+ pOrgCmd, pOrgCmd->pSrc, pOrgCmd->pDst);
+*/
+ /* XOR source data with key stream to destination data */
+ pKey = pCmd->pDst->pFrags[0].bufVirtPtr;
+ keyOffset = 0;
+
+ if( (pOrgCmd->pSrc != pOrgCmd->pDst) &&
+ (pOrgCmd->cryptoOffset > 0) )
+ {
+ /* Copy Prefix from source buffer to destination buffer */
+
+ status = mvCesaMbufCopy(pOrgCmd->pDst, 0,
+ pOrgCmd->pSrc, 0, pOrgCmd->cryptoOffset);
+/*
+ status = mvCesaCopyFromMbuf(tempBuf, pOrgCmd->pSrc,
+ 0, pOrgCmd->cryptoOffset);
+ status = mvCesaCopyToMbuf(tempBuf, pOrgCmd->pDst,
+ 0, pOrgCmd->cryptoOffset);
+*/
+ }
+
+ srcFrag = mvCesaMbufOffset(pOrgCmd->pSrc, pOrgCmd->cryptoOffset, &srcOffset);
+ pSrc = pOrgCmd->pSrc->pFrags[srcFrag].bufVirtPtr;
+ srcSize = pOrgCmd->pSrc->pFrags[srcFrag].bufSize;
+
+ dstFrag = mvCesaMbufOffset(pOrgCmd->pDst, pOrgCmd->cryptoOffset, &dstOffset);
+ pDst = pOrgCmd->pDst->pFrags[dstFrag].bufVirtPtr;
+ dstSize = pOrgCmd->pDst->pFrags[dstFrag].bufSize;
+
+ while(cryptoSize > 0)
+ {
+ pDst[dstOffset] = (pSrc[srcOffset] ^ pKey[keyOffset]);
+
+ cryptoSize--;
+ dstOffset++;
+ srcOffset++;
+ keyOffset++;
+
+ if(srcOffset >= srcSize)
+ {
+ srcFrag++;
+ srcOffset = 0;
+ pSrc = pOrgCmd->pSrc->pFrags[srcFrag].bufVirtPtr;
+ srcSize = pOrgCmd->pSrc->pFrags[srcFrag].bufSize;
+ }
+
+ if(dstOffset >= dstSize)
+ {
+ dstFrag++;
+ dstOffset = 0;
+ pDst = pOrgCmd->pDst->pFrags[dstFrag].bufVirtPtr;
+ dstSize = pOrgCmd->pDst->pFrags[dstFrag].bufSize;
+ }
+ }
+
+ if(pOrgCmd->pSrc != pOrgCmd->pDst)
+ {
+ /* Copy Suffix from source buffer to destination buffer */
+ srcOffset = pOrgCmd->cryptoOffset + pOrgCmd->cryptoLength;
+
+ if( (pOrgCmd->pDst->mbufSize - srcOffset) > 0)
+ {
+ status = mvCesaMbufCopy(pOrgCmd->pDst, srcOffset,
+ pOrgCmd->pSrc, srcOffset,
+ pOrgCmd->pDst->mbufSize - srcOffset);
+ }
+
+/*
+ status = mvCesaCopyFromMbuf(tempBuf, pOrgCmd->pSrc,
+ srcOffset, pOrgCmd->pSrc->mbufSize - srcOffset);
+ status = mvCesaCopyToMbuf(tempBuf, pOrgCmd->pDst,
+ srcOffset, pOrgCmd->pDst->mbufSize - srcOffset);
+*/
+ }
+
+ /* Free buffer used for Key stream */
+ mvOsIoCachedFree(cesaOsHandle,pCmd->pDst->pFrags[0].bufSize,
+ pCmd->pDst->pFrags[0].bufPhysAddr,
+ pCmd->pDst->pFrags[0].bufVirtPtr,
+ pCmd->pDst->pFrags[0].memHandle);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeFinish -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_COMMAND* pCmd
+*
+* RETURN:
+* MV_STATUS
+*
+*******************************************************************************/
+static void mvCesaCtrModeFinish(MV_CESA_COMMAND* pCmd)
+{
+ mvOsFree(pCmd);
+}
+
+/*******************************************************************************
+* mvCesaParamCheck -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, MV_U8* pFixOffset
+*
+* RETURN:
+* MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd,
+ MV_U8* pFixOffset)
+{
+ MV_U8 fixOffset = 0xFF;
+
+ /* Check AUTH operation parameters */
+ if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) )
+ {
+ /* MAC offset should be at least 4 byte aligned */
+ if( MV_IS_NOT_ALIGN(pCmd->macOffset, 4) )
+ {
+ mvOsPrintf("mvCesaAction: macOffset %d must be 4 byte aligned\n",
+ pCmd->macOffset);
+ return MV_BAD_PARAM;
+ }
+ /* Digest offset must be 4 byte aligned */
+ if( MV_IS_NOT_ALIGN(pCmd->digestOffset, 4) )
+ {
+ mvOsPrintf("mvCesaAction: digestOffset %d must be 4 byte aligned\n",
+ pCmd->digestOffset);
+ return MV_BAD_PARAM;
+ }
+ /* In addition all offsets should be the same alignment: 8 or 4 */
+ if(fixOffset == 0xFF)
+ {
+ fixOffset = (pCmd->macOffset % 8);
+ }
+ else
+ {
+ if( (pCmd->macOffset % 8) != fixOffset)
+ {
+ mvOsPrintf("mvCesaAction: macOffset %d mod 8 must be equal %d\n",
+ pCmd->macOffset, fixOffset);
+ return MV_BAD_PARAM;
+ }
+ }
+ if( (pCmd->digestOffset % 8) != fixOffset)
+ {
+ mvOsPrintf("mvCesaAction: digestOffset %d mod 8 must be equal %d\n",
+ pCmd->digestOffset, fixOffset);
+ return MV_BAD_PARAM;
+ }
+ }
+ /* Check CRYPTO operation parameters */
+ if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) )
+ {
+ /* CryptoOffset should be at least 4 byte aligned */
+ if( MV_IS_NOT_ALIGN(pCmd->cryptoOffset, 4) )
+ {
+ mvOsPrintf("CesaAction: cryptoOffset=%d must be 4 byte aligned\n",
+ pCmd->cryptoOffset);
+ return MV_BAD_PARAM;
+ }
+ /* cryptoLength should be the whole number of blocks */
+ if( MV_IS_NOT_ALIGN(pCmd->cryptoLength, pSA->cryptoBlockSize) )
+ {
+ mvOsPrintf("mvCesaAction: cryptoLength=%d must be %d byte aligned\n",
+ pCmd->cryptoLength, pSA->cryptoBlockSize);
+ return MV_BAD_PARAM;
+ }
+ if(fixOffset == 0xFF)
+ {
+ fixOffset = (pCmd->cryptoOffset % 8);
+ }
+ else
+ {
+ /* In addition all offsets should be the same alignment: 8 or 4 */
+ if( (pCmd->cryptoOffset % 8) != fixOffset)
+ {
+ mvOsPrintf("mvCesaAction: cryptoOffset %d mod 8 must be equal %d \n",
+ pCmd->cryptoOffset, fixOffset);
+ return MV_BAD_PARAM;
+ }
+ }
+
+ /* check for CBC mode */
+ if(pSA->cryptoIvSize > 0)
+ {
+ /* cryptoIV must not be part of CryptoLength */
+ if( ((pCmd->ivOffset + pSA->cryptoIvSize) > pCmd->cryptoOffset) &&
+ (pCmd->ivOffset < (pCmd->cryptoOffset + pCmd->cryptoLength)) )
+ {
+ mvOsPrintf("mvCesaFragParamCheck: cryptoIvOffset (%d) is part of cryptoLength (%d+%d)\n",
+ pCmd->ivOffset, pCmd->macOffset, pCmd->macLength);
+ return MV_BAD_PARAM;
+ }
+
+ /* ivOffset must be 4 byte aligned */
+ if( MV_IS_NOT_ALIGN(pCmd->ivOffset, 4) )
+ {
+ mvOsPrintf("CesaAction: ivOffset=%d must be 4 byte aligned\n",
+ pCmd->ivOffset);
+ return MV_BAD_PARAM;
+ }
+ /* In addition all offsets should be the same alignment: 8 or 4 */
+ if( (pCmd->ivOffset % 8) != fixOffset)
+ {
+ mvOsPrintf("mvCesaAction: ivOffset %d mod 8 must be %d\n",
+ pCmd->ivOffset, fixOffset);
+ return MV_BAD_PARAM;
+ }
+ }
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFragParamCheck -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+* MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaFragParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd)
+{
+ int offset;
+
+ if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) )
+ {
+ /* macOffset must be less that SRAM buffer size */
+ if(pCmd->macOffset > (sizeof(cesaSramVirtPtr->buf) - MV_CESA_AUTH_BLOCK_SIZE))
+ {
+ mvOsPrintf("mvCesaFragParamCheck: macOffset is too large (%d)\n",
+ pCmd->macOffset);
+ return MV_BAD_PARAM;
+ }
+ /* macOffset+macSize must be more than mbufSize - SRAM buffer size */
+ if( ((pCmd->macOffset + pCmd->macLength) > pCmd->pSrc->mbufSize) ||
+ ((pCmd->pSrc->mbufSize - (pCmd->macOffset + pCmd->macLength)) >=
+ sizeof(cesaSramVirtPtr->buf)) )
+ {
+ mvOsPrintf("mvCesaFragParamCheck: macLength is too large (%d), mbufSize=%d\n",
+ pCmd->macLength, pCmd->pSrc->mbufSize);
+ return MV_BAD_PARAM;
+ }
+ }
+
+ if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) )
+ {
+ /* cryptoOffset must be less that SRAM buffer size */
+ /* 4 for possible fixOffset */
+ if( (pCmd->cryptoOffset + 4) > (sizeof(cesaSramVirtPtr->buf) - pSA->cryptoBlockSize))
+ {
+ mvOsPrintf("mvCesaFragParamCheck: cryptoOffset is too large (%d)\n",
+ pCmd->cryptoOffset);
+ return MV_BAD_PARAM;
+ }
+
+ /* cryptoOffset+cryptoSize must be more than mbufSize - SRAM buffer size */
+ if( ((pCmd->cryptoOffset + pCmd->cryptoLength) > pCmd->pSrc->mbufSize) ||
+ ((pCmd->pSrc->mbufSize - (pCmd->cryptoOffset + pCmd->cryptoLength)) >=
+ (sizeof(cesaSramVirtPtr->buf) - pSA->cryptoBlockSize)) )
+ {
+ mvOsPrintf("mvCesaFragParamCheck: cryptoLength is too large (%d), mbufSize=%d\n",
+ pCmd->cryptoLength, pCmd->pSrc->mbufSize);
+ return MV_BAD_PARAM;
+ }
+ }
+
+ /* When MAC_THEN_CRYPTO or CRYPTO_THEN_MAC */
+ if( ((pSA->config & MV_CESA_OPERATION_MASK) ==
+ (MV_CESA_MAC_THEN_CRYPTO << MV_CESA_OPERATION_OFFSET)) ||
+ ((pSA->config & MV_CESA_OPERATION_MASK) ==
+ (MV_CESA_CRYPTO_THEN_MAC << MV_CESA_OPERATION_OFFSET)) )
+ {
+ if( (mvCtrlModelGet() == MV_5182_DEV_ID) ||
+ ( (mvCtrlModelGet() == MV_5181_DEV_ID) &&
+ (mvCtrlRevGet() >= MV_5181L_A0_REV) &&
+ (pCmd->macLength >= (1 << 14)) ) )
+ {
+ return MV_NOT_ALLOWED;
+ }
+
+ /* abs(cryptoOffset-macOffset) must be aligned cryptoBlockSize */
+ if(pCmd->cryptoOffset > pCmd->macOffset)
+ {
+ offset = pCmd->cryptoOffset - pCmd->macOffset;
+ }
+ else
+ {
+ offset = pCmd->macOffset - pCmd->cryptoOffset;
+ }
+
+ if( MV_IS_NOT_ALIGN(offset, pSA->cryptoBlockSize) )
+ {
+/*
+ mvOsPrintf("mvCesaFragParamCheck: (cryptoOffset - macOffset) must be %d byte aligned\n",
+ pSA->cryptoBlockSize);
+*/
+ return MV_NOT_ALLOWED;
+ }
+ /* Digest must not be part of CryptoLength */
+ if( ((pCmd->digestOffset + pSA->digestSize) > pCmd->cryptoOffset) &&
+ (pCmd->digestOffset < (pCmd->cryptoOffset + pCmd->cryptoLength)) )
+ {
+/*
+ mvOsPrintf("mvCesaFragParamCheck: digestOffset (%d) is part of cryptoLength (%d+%d)\n",
+ pCmd->digestOffset, pCmd->cryptoOffset, pCmd->cryptoLength);
+*/
+ return MV_NOT_ALLOWED;
+ }
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFragSizeFind -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd,
+* int cryptoOffset, int macOffset,
+*
+* OUTPUT:
+* int* pCopySize, int* pCryptoDataSize, int* pMacDataSize
+*
+* RETURN:
+* MV_STATUS
+*
+*******************************************************************************/
+static void mvCesaFragSizeFind(MV_CESA_SA* pSA, MV_CESA_REQ* pReq,
+ int cryptoOffset, int macOffset,
+ int* pCopySize, int* pCryptoDataSize, int* pMacDataSize)
+{
+ MV_CESA_COMMAND *pCmd = pReq->pCmd;
+ int cryptoDataSize, macDataSize, copySize;
+
+ cryptoDataSize = macDataSize = 0;
+ copySize = *pCopySize;
+
+ if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ cryptoDataSize = MV_MIN( (copySize - cryptoOffset),
+ (pCmd->cryptoLength - (pReq->frags.cryptoSize + 1)) );
+
+ /* cryptoSize for each fragment must be the whole number of blocksSize */
+ if( MV_IS_NOT_ALIGN(cryptoDataSize, pSA->cryptoBlockSize) )
+ {
+ cryptoDataSize = MV_ALIGN_DOWN(cryptoDataSize, pSA->cryptoBlockSize);
+ copySize = cryptoOffset + cryptoDataSize;
+ }
+ }
+ if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+ (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+ {
+ macDataSize = MV_MIN( (copySize - macOffset),
+ (pCmd->macLength - (pReq->frags.macSize + 1)));
+
+ /* macSize for each fragment (except last) must be the whole number of blocksSize */
+ if( MV_IS_NOT_ALIGN(macDataSize, MV_CESA_AUTH_BLOCK_SIZE) )
+ {
+ macDataSize = MV_ALIGN_DOWN(macDataSize, MV_CESA_AUTH_BLOCK_SIZE);
+ copySize = macOffset + macDataSize;
+ }
+ cryptoDataSize = copySize - cryptoOffset;
+ }
+ *pCopySize = copySize;
+
+ if(pCryptoDataSize != NULL)
+ *pCryptoDataSize = cryptoDataSize;
+
+ if(pMacDataSize != NULL)
+ *pMacDataSize = macDataSize;
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.h b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.h
new file mode 100644
index 000000000..c0abc9b7e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesa.h
@@ -0,0 +1,412 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvCesa.h - Header File for Cryptographic Engines and Security Accelerator
+*
+* DESCRIPTION:
+* This header file contains macros typedefs and function declaration for
+* the Marvell Cryptographic Engines and Security Accelerator.
+*
+*******************************************************************************/
+
+#ifndef __mvCesa_h__
+#define __mvCesa_h__
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mvDebug.h"
+
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+
+#include "cesa/mvCesa.h"
+#include "cesa/AES/mvAes.h"
+#include "mvSysHwConfig.h"
+
+#ifdef MV_INCLUDE_IDMA
+#include "idma/mvIdma.h"
+#include "idma/mvIdmaRegs.h"
+#else
+/* Redefine MV_DMA_DESC structure */
+typedef struct _mvDmaDesc
+{
+ MV_U32 byteCnt; /* The total number of bytes to transfer */
+ MV_U32 phySrcAdd; /* The physical source address */
+ MV_U32 phyDestAdd; /* The physical destination address */
+ MV_U32 phyNextDescPtr; /* If we are using chain mode DMA transfer, */
+ /* then this pointer should point to the */
+ /* physical address of the next descriptor, */
+ /* otherwise it should be NULL. */
+}MV_DMA_DESC;
+#endif /* MV_INCLUDE_IDMA */
+
+#include "cesa/mvCesaRegs.h"
+
+#define MV_CESA_AUTH_BLOCK_SIZE 64 /* bytes */
+
+#define MV_CESA_MD5_DIGEST_SIZE 16 /* bytes */
+#define MV_CESA_SHA1_DIGEST_SIZE 20 /* bytes */
+
+#define MV_CESA_MAX_DIGEST_SIZE MV_CESA_SHA1_DIGEST_SIZE
+
+#define MV_CESA_DES_KEY_LENGTH 8 /* bytes = 64 bits */
+#define MV_CESA_3DES_KEY_LENGTH 24 /* bytes = 192 bits */
+#define MV_CESA_AES_128_KEY_LENGTH 16 /* bytes = 128 bits */
+#define MV_CESA_AES_192_KEY_LENGTH 24 /* bytes = 192 bits */
+#define MV_CESA_AES_256_KEY_LENGTH 32 /* bytes = 256 bits */
+
+#define MV_CESA_MAX_CRYPTO_KEY_LENGTH MV_CESA_AES_256_KEY_LENGTH
+
+#define MV_CESA_DES_BLOCK_SIZE 8 /* bytes = 64 bits */
+#define MV_CESA_3DES_BLOCK_SIZE 8 /* bytes = 64 bits */
+
+#define MV_CESA_AES_BLOCK_SIZE 16 /* bytes = 128 bits */
+
+#define MV_CESA_MAX_IV_LENGTH MV_CESA_AES_BLOCK_SIZE
+
+#define MV_CESA_MAX_MAC_KEY_LENGTH 64 /* bytes */
+
+typedef struct
+{
+ MV_U8 cryptoKey[MV_CESA_MAX_CRYPTO_KEY_LENGTH];
+ MV_U8 macKey[MV_CESA_MAX_MAC_KEY_LENGTH];
+ MV_CESA_OPERATION operation;
+ MV_CESA_DIRECTION direction;
+ MV_CESA_CRYPTO_ALG cryptoAlgorithm;
+ MV_CESA_CRYPTO_MODE cryptoMode;
+ MV_U8 cryptoKeyLength;
+ MV_CESA_MAC_MODE macMode;
+ MV_U8 macKeyLength;
+ MV_U8 digestSize;
+
+} MV_CESA_OPEN_SESSION;
+
+typedef struct
+{
+ MV_BUF_INFO *pFrags;
+ MV_U16 numFrags;
+ MV_U16 mbufSize;
+
+} MV_CESA_MBUF;
+
+typedef struct
+{
+ void* pReqPrv; /* instead of reqId */
+ MV_U32 retCode;
+ MV_16 sessionId;
+
+} MV_CESA_RESULT;
+
+typedef void (*MV_CESA_CALLBACK) (MV_CESA_RESULT* pResult);
+
+
+typedef struct
+{
+ void* pReqPrv; /* instead of reqId */
+ MV_CESA_MBUF* pSrc;
+ MV_CESA_MBUF* pDst;
+ MV_CESA_CALLBACK* pFuncCB;
+ MV_16 sessionId;
+ MV_U16 ivFromUser;
+ MV_U16 ivOffset;
+ MV_U16 cryptoOffset;
+ MV_U16 cryptoLength;
+ MV_U16 digestOffset;
+ MV_U16 macOffset;
+ MV_U16 macLength;
+ MV_BOOL skipFlush;
+} MV_CESA_COMMAND;
+
+
+
+MV_STATUS mvCesaHalInit (int numOfSession, int queueDepth, char* pSramBase, MV_U32 cryptEngBase, void *osHandle);
+MV_STATUS mvCesaFinish (void);
+MV_STATUS mvCesaSessionOpen(MV_CESA_OPEN_SESSION *pSession, short* pSid);
+MV_STATUS mvCesaSessionClose(short sid);
+MV_STATUS mvCesaCryptoIvSet(MV_U8* pIV, int ivSize);
+
+MV_STATUS mvCesaAction (MV_CESA_COMMAND* pCmd);
+
+MV_U32 mvCesaInProcessGet(void);
+MV_STATUS mvCesaReadyDispatch(void);
+MV_STATUS mvCesaReadyGet(MV_CESA_RESULT* pResult);
+MV_BOOL mvCesaIsReady(void);
+
+int mvCesaMbufOffset(MV_CESA_MBUF* pMbuf, int offset, int* pBufOffset);
+MV_STATUS mvCesaCopyFromMbuf(MV_U8* pDst, MV_CESA_MBUF* pSrcMbuf,
+ int offset, int size);
+MV_STATUS mvCesaCopyToMbuf(MV_U8* pSrc, MV_CESA_MBUF* pDstMbuf,
+ int offset, int size);
+MV_STATUS mvCesaMbufCopy(MV_CESA_MBUF* pMbufDst, int dstMbufOffset,
+ MV_CESA_MBUF* pMbufSrc, int srcMbufOffset, int size);
+
+/********** Debug functions ********/
+
+void mvCesaDebugMbuf(const char* str, MV_CESA_MBUF *pMbuf, int offset, int size);
+void mvCesaDebugSA(short sid, int mode);
+void mvCesaDebugStats(void);
+void mvCesaDebugStatsClear(void);
+void mvCesaDebugRegs(void);
+void mvCesaDebugStatus(void);
+void mvCesaDebugQueue(int mode);
+void mvCesaDebugSram(int mode);
+void mvCesaDebugSAD(int mode);
+
+
+/******** CESA Private definitions ********/
+#if (MV_CESA_VERSION >= 2)
+#if (MV_CACHE_COHERENCY == MV_CACHE_COHER_SW)
+#define MV_CESA_TDMA_CTRL_VALUE MV_CESA_TDMA_DST_BURST_MASK(MV_CESA_TDMA_BURST_128B) \
+ | MV_CESA_TDMA_SRC_BURST_MASK(MV_CESA_TDMA_BURST_128B) \
+ | MV_CESA_TDMA_OUTSTAND_READ_EN_MASK \
+ | MV_CESA_TDMA_NO_BYTE_SWAP_MASK \
+ | MV_CESA_TDMA_ENABLE_MASK
+#else
+#define MV_CESA_TDMA_CTRL_VALUE MV_CESA_TDMA_DST_BURST_MASK(MV_CESA_TDMA_BURST_32B) \
+ | MV_CESA_TDMA_SRC_BURST_MASK(MV_CESA_TDMA_BURST_128B) \
+ /*| MV_CESA_TDMA_OUTSTAND_READ_EN_MASK */\
+ | MV_CESA_TDMA_ENABLE_MASK
+
+#endif
+#else
+#define MV_CESA_IDMA_CTRL_LOW_VALUE ICCLR_DST_BURST_LIM_128BYTE \
+ | ICCLR_SRC_BURST_LIM_128BYTE \
+ | ICCLR_INT_MODE_MASK \
+ | ICCLR_BLOCK_MODE \
+ | ICCLR_CHAN_ENABLE \
+ | ICCLR_DESC_MODE_16M
+#endif /* MV_CESA_VERSION >= 2 */
+
+#define MV_CESA_MAX_PKT_SIZE (64 * 1024)
+#define MV_CESA_MAX_MBUF_FRAGS 20
+
+#define MV_CESA_MAX_REQ_FRAGS ( (MV_CESA_MAX_PKT_SIZE / MV_CESA_MAX_BUF_SIZE) + 1)
+
+#define MV_CESA_MAX_DMA_DESC (MV_CESA_MAX_MBUF_FRAGS*2 + 5)
+
+#define MAX_CESA_CHAIN_LENGTH 20
+
+typedef enum
+{
+ MV_CESA_IDLE = 0,
+ MV_CESA_PENDING,
+ MV_CESA_PROCESS,
+ MV_CESA_READY,
+#if (MV_CESA_VERSION >= 3)
+ MV_CESA_CHAIN,
+#endif
+} MV_CESA_STATE;
+
+
+/* Session database */
+
+/* Map of Key materials of the session in SRAM.
+ * Each field must be 8 byte aligned
+ * Total size: 32 + 24 + 24 = 80 bytes
+ */
+typedef struct
+{
+ MV_U8 cryptoKey[MV_CESA_MAX_CRYPTO_KEY_LENGTH];
+ MV_U8 macInnerIV[MV_CESA_MAX_DIGEST_SIZE];
+ MV_U8 reservedInner[4];
+ MV_U8 macOuterIV[MV_CESA_MAX_DIGEST_SIZE];
+ MV_U8 reservedOuter[4];
+
+} MV_CESA_SRAM_SA;
+
+typedef struct
+{
+ MV_CESA_SRAM_SA* pSramSA;
+ MV_U32 config;
+ MV_U8 cryptoKeyLength;
+ MV_U8 cryptoIvSize;
+ MV_U8 cryptoBlockSize;
+ MV_U8 digestSize;
+ MV_U8 macKeyLength;
+ MV_U8 valid;
+ MV_U8 ctrMode;
+ MV_U32 count;
+
+} MV_CESA_SA;
+
+/* DMA list management */
+typedef struct
+{
+ MV_DMA_DESC* pDmaFirst;
+ MV_DMA_DESC* pDmaLast;
+
+} MV_CESA_DMA;
+
+
+typedef struct
+{
+ MV_U8 numFrag;
+ MV_U8 nextFrag;
+ int bufOffset;
+ int cryptoSize;
+ int macSize;
+ int newDigestOffset;
+ MV_U8 orgDigest[MV_CESA_MAX_DIGEST_SIZE];
+
+} MV_CESA_FRAGS;
+
+/* Request queue */
+typedef struct
+{
+ MV_U8 state;
+ MV_U8 fragMode;
+ MV_U8 fixOffset;
+ MV_CESA_COMMAND* pCmd;
+ MV_CESA_COMMAND* pOrgCmd;
+ MV_BUF_INFO dmaDescBuf;
+ MV_CESA_DMA dma[MV_CESA_MAX_REQ_FRAGS];
+ MV_BUF_INFO cesaDescBuf;
+ MV_CESA_DESC* pCesaDesc;
+ MV_CESA_FRAGS frags;
+
+
+} MV_CESA_REQ;
+
+
+/* SRAM map */
+/* Total SRAM size calculation */
+/* SRAM size =
+ * MV_CESA_MAX_BUF_SIZE +
+ * sizeof(MV_CESA_DESC) +
+ * MV_CESA_MAX_IV_LENGTH +
+ * MV_CESA_MAX_IV_LENGTH +
+ * MV_CESA_MAX_DIGEST_SIZE +
+ * sizeof(MV_CESA_SRAM_SA)
+ * = 1600 + 32 + 16 + 16 + 24 + 80 + 280 (reserved) = 2048 bytes
+ * = 3200 + 32 + 16 + 16 + 24 + 80 + 728 (reserved) = 4096 bytes
+ */
+typedef struct
+{
+ MV_U8 buf[MV_CESA_MAX_BUF_SIZE];
+ MV_CESA_DESC desc;
+ MV_U8 cryptoIV[MV_CESA_MAX_IV_LENGTH];
+ MV_U8 tempCryptoIV[MV_CESA_MAX_IV_LENGTH];
+ MV_U8 tempDigest[MV_CESA_MAX_DIGEST_SIZE+4];
+ MV_CESA_SRAM_SA sramSA;
+
+} MV_CESA_SRAM_MAP;
+
+
+typedef struct
+{
+ MV_U32 openedCount;
+ MV_U32 closedCount;
+ MV_U32 fragCount;
+ MV_U32 reqCount;
+ MV_U32 maxReqCount;
+ MV_U32 procCount;
+ MV_U32 readyCount;
+ MV_U32 notReadyCount;
+ MV_U32 startCount;
+#if (MV_CESA_VERSION >= 3)
+ MV_U32 maxChainUsage;
+#endif
+
+} MV_CESA_STATS;
+
+
+/* External variables */
+
+extern MV_CESA_STATS cesaStats;
+extern MV_CESA_FRAGS cesaFrags;
+
+extern MV_BUF_INFO cesaSramSaBuf;
+
+extern MV_CESA_SA* pCesaSAD;
+extern MV_U16 cesaMaxSA;
+
+extern MV_CESA_REQ* pCesaReqFirst;
+extern MV_CESA_REQ* pCesaReqLast;
+extern MV_CESA_REQ* pCesaReqEmpty;
+extern MV_CESA_REQ* pCesaReqProcess;
+extern int cesaQueueDepth;
+extern int cesaReqResources;
+#if (MV_CESA_VERSION>= 3)
+extern MV_U32 cesaChainLength;
+#endif
+
+extern MV_CESA_SRAM_MAP* cesaSramVirtPtr;
+extern MV_U32 cesaSramPhysAddr;
+
+static INLINE MV_ULONG mvCesaVirtToPhys(MV_BUF_INFO* pBufInfo, void* pVirt)
+{
+ return (pBufInfo->bufPhysAddr + ((MV_U8*)pVirt - pBufInfo->bufVirtPtr));
+}
+
+/* Additional DEBUG functions */
+void mvCesaDebugSramSA(MV_CESA_SRAM_SA* pSramSA, int mode);
+void mvCesaDebugCmd(MV_CESA_COMMAND* pCmd, int mode);
+void mvCesaDebugDescriptor(MV_CESA_DESC* pDesc);
+
+
+
+#endif /* __mvCesa_h__ */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaDebug.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaDebug.c
new file mode 100644
index 000000000..31b78a805
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaDebug.c
@@ -0,0 +1,484 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvDebug.h"
+
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+
+#include "cesa/mvCesa.h"
+#include "cesa/mvCesaRegs.h"
+#include "cesa/AES/mvAes.h"
+
+static const char* mvCesaDebugStateStr(MV_CESA_STATE state)
+{
+ switch(state)
+ {
+ case MV_CESA_IDLE:
+ return "Idle";
+
+ case MV_CESA_PENDING:
+ return "Pend";
+
+ case MV_CESA_PROCESS:
+ return "Proc";
+
+ case MV_CESA_READY:
+ return "Ready";
+
+ default:
+ break;
+ }
+ return "Unknown";
+}
+
+static const char* mvCesaDebugOperStr(MV_CESA_OPERATION oper)
+{
+ switch(oper)
+ {
+ case MV_CESA_MAC_ONLY:
+ return "MacOnly";
+
+ case MV_CESA_CRYPTO_ONLY:
+ return "CryptoOnly";
+
+ case MV_CESA_MAC_THEN_CRYPTO:
+ return "MacCrypto";
+
+ case MV_CESA_CRYPTO_THEN_MAC:
+ return "CryptoMac";
+
+ default:
+ break;
+ }
+ return "Null";
+}
+
+static const char* mvCesaDebugCryptoAlgStr(MV_CESA_CRYPTO_ALG cryptoAlg)
+{
+ switch(cryptoAlg)
+ {
+ case MV_CESA_CRYPTO_DES:
+ return "DES";
+
+ case MV_CESA_CRYPTO_3DES:
+ return "3DES";
+
+ case MV_CESA_CRYPTO_AES:
+ return "AES";
+
+ default:
+ break;
+ }
+ return "Null";
+}
+
+static const char* mvCesaDebugMacModeStr(MV_CESA_MAC_MODE macMode)
+{
+ switch(macMode)
+ {
+ case MV_CESA_MAC_MD5:
+ return "MD5";
+
+ case MV_CESA_MAC_SHA1:
+ return "SHA1";
+
+ case MV_CESA_MAC_HMAC_MD5:
+ return "HMAC-MD5";
+
+ case MV_CESA_MAC_HMAC_SHA1:
+ return "HMAC_SHA1";
+
+ default:
+ break;
+ }
+ return "Null";
+}
+
+void mvCesaDebugCmd(MV_CESA_COMMAND* pCmd, int mode)
+{
+ mvOsPrintf("pCmd=%p, pReqPrv=%p, pSrc=%p, pDst=%p, pCB=%p, sid=%d\n",
+ pCmd, pCmd->pReqPrv, pCmd->pSrc, pCmd->pDst,
+ pCmd->pFuncCB, pCmd->sessionId);
+ mvOsPrintf("isUser=%d, ivOffs=%d, crOffs=%d, crLen=%d, digest=%d, macOffs=%d, macLen=%d\n",
+ pCmd->ivFromUser, pCmd->ivOffset, pCmd->cryptoOffset, pCmd->cryptoLength,
+ pCmd->digestOffset, pCmd->macOffset, pCmd->macLength);
+}
+
+/* no need to use in tool */
+void mvCesaDebugMbuf(const char* str, MV_CESA_MBUF *pMbuf, int offset, int size)
+{
+ int frag, len, fragOffset;
+
+ if(str != NULL)
+ mvOsPrintf("%s: pMbuf=%p, numFrags=%d, mbufSize=%d\n",
+ str, pMbuf, pMbuf->numFrags, pMbuf->mbufSize);
+
+ frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+ if(frag == MV_INVALID)
+ {
+ mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+ return;
+ }
+
+ for(; frag<pMbuf->numFrags; frag++)
+ {
+ mvOsPrintf("#%2d. bufVirt=%p, bufSize=%d\n",
+ frag, pMbuf->pFrags[frag].bufVirtPtr,
+ pMbuf->pFrags[frag].bufSize);
+ if(size > 0)
+ {
+ len = MV_MIN(pMbuf->pFrags[frag].bufSize, size);
+ mvDebugMemDump(pMbuf->pFrags[frag].bufVirtPtr+fragOffset, len, 1);
+ size -= len;
+ fragOffset = 0;
+ }
+ }
+}
+
+void mvCesaDebugRegs(void)
+{
+ mvOsPrintf("\t CESA Registers:\n");
+
+ mvOsPrintf("MV_CESA_CMD_REG : 0x%X = 0x%08x\n",
+ MV_CESA_CMD_REG,
+ MV_REG_READ( MV_CESA_CMD_REG ) );
+
+ mvOsPrintf("MV_CESA_CHAN_DESC_OFFSET_REG : 0x%X = 0x%08x\n",
+ MV_CESA_CHAN_DESC_OFFSET_REG,
+ MV_REG_READ(MV_CESA_CHAN_DESC_OFFSET_REG) );
+
+ mvOsPrintf("MV_CESA_CFG_REG : 0x%X = 0x%08x\n",
+ MV_CESA_CFG_REG,
+ MV_REG_READ( MV_CESA_CFG_REG ) );
+
+ mvOsPrintf("MV_CESA_STATUS_REG : 0x%X = 0x%08x\n",
+ MV_CESA_STATUS_REG,
+ MV_REG_READ( MV_CESA_STATUS_REG ) );
+
+ mvOsPrintf("MV_CESA_ISR_CAUSE_REG : 0x%X = 0x%08x\n",
+ MV_CESA_ISR_CAUSE_REG,
+ MV_REG_READ( MV_CESA_ISR_CAUSE_REG ) );
+
+ mvOsPrintf("MV_CESA_ISR_MASK_REG : 0x%X = 0x%08x\n",
+ MV_CESA_ISR_MASK_REG,
+ MV_REG_READ( MV_CESA_ISR_MASK_REG ) );
+#if (MV_CESA_VERSION >= 2)
+ mvOsPrintf("MV_CESA_TDMA_CTRL_REG : 0x%X = 0x%08x\n",
+ MV_CESA_TDMA_CTRL_REG,
+ MV_REG_READ( MV_CESA_TDMA_CTRL_REG ) );
+
+ mvOsPrintf("MV_CESA_TDMA_BYTE_COUNT_REG : 0x%X = 0x%08x\n",
+ MV_CESA_TDMA_BYTE_COUNT_REG,
+ MV_REG_READ( MV_CESA_TDMA_BYTE_COUNT_REG ) );
+
+ mvOsPrintf("MV_CESA_TDMA_SRC_ADDR_REG : 0x%X = 0x%08x\n",
+ MV_CESA_TDMA_SRC_ADDR_REG,
+ MV_REG_READ( MV_CESA_TDMA_SRC_ADDR_REG ) );
+
+ mvOsPrintf("MV_CESA_TDMA_DST_ADDR_REG : 0x%X = 0x%08x\n",
+ MV_CESA_TDMA_DST_ADDR_REG,
+ MV_REG_READ( MV_CESA_TDMA_DST_ADDR_REG ) );
+
+ mvOsPrintf("MV_CESA_TDMA_NEXT_DESC_PTR_REG : 0x%X = 0x%08x\n",
+ MV_CESA_TDMA_NEXT_DESC_PTR_REG,
+ MV_REG_READ( MV_CESA_TDMA_NEXT_DESC_PTR_REG ) );
+
+ mvOsPrintf("MV_CESA_TDMA_CURR_DESC_PTR_REG : 0x%X = 0x%08x\n",
+ MV_CESA_TDMA_CURR_DESC_PTR_REG,
+ MV_REG_READ( MV_CESA_TDMA_CURR_DESC_PTR_REG ) );
+
+ mvOsPrintf("MV_CESA_TDMA_ERROR_CAUSE_REG : 0x%X = 0x%08x\n",
+ MV_CESA_TDMA_ERROR_CAUSE_REG,
+ MV_REG_READ( MV_CESA_TDMA_ERROR_CAUSE_REG ) );
+
+ mvOsPrintf("MV_CESA_TDMA_ERROR_MASK_REG : 0x%X = 0x%08x\n",
+ MV_CESA_TDMA_ERROR_MASK_REG,
+ MV_REG_READ( MV_CESA_TDMA_ERROR_CAUSE_REG ) );
+
+#endif
+}
+
+void mvCesaDebugStatus(void)
+{
+ mvOsPrintf("\n\t CESA Status\n\n");
+
+ mvOsPrintf("pReqQ=%p, qDepth=%d, reqSize=%ld bytes, qRes=%d, ",
+ pCesaReqFirst, cesaQueueDepth, sizeof(MV_CESA_REQ),
+ cesaReqResources);
+#if (MV_CESA_VERSION >= 3)
+ mvOsPrintf("chainLength=%u\n",cesaChainLength);
+#else
+ mvOsPrintf("\n");
+#endif
+
+ mvOsPrintf("pSAD=%p, maxSA=%d, sizeSA=%ld bytes\n",
+ pCesaSAD, cesaMaxSA, sizeof(MV_CESA_SA));
+
+ mvOsPrintf("\n");
+
+ mvCesaDebugRegs();
+ mvCesaDebugStats();
+ mvCesaDebugStatsClear();
+}
+
+void mvCesaDebugDescriptor(MV_CESA_DESC* pDesc)
+{
+ mvOsPrintf("config=0x%08x, crSrcOffs=0x%04x, crDstOffs=0x%04x\n",
+ pDesc->config, pDesc->cryptoSrcOffset, pDesc->cryptoDstOffset);
+
+ mvOsPrintf("crLen=0x%04x, crKeyOffs=0x%04x, ivOffs=0x%04x, ivBufOffs=0x%04x\n",
+ pDesc->cryptoDataLen, pDesc->cryptoKeyOffset,
+ pDesc->cryptoIvOffset, pDesc->cryptoIvBufOffset);
+
+ mvOsPrintf("macSrc=0x%04x, digest=0x%04x, macLen=0x%04x, inIv=0x%04x, outIv=0x%04x\n",
+ pDesc->macSrcOffset, pDesc->macDigestOffset, pDesc->macDataLen,
+ pDesc->macInnerIvOffset, pDesc->macOuterIvOffset);
+}
+
+void mvCesaDebugQueue(int mode)
+{
+ mvOsPrintf("\n\t CESA Request Queue:\n\n");
+
+ mvOsPrintf("pFirstReq=%p, pLastReq=%p, qDepth=%d, reqSize=%ld bytes\n",
+ pCesaReqFirst, pCesaReqLast, cesaQueueDepth, sizeof(MV_CESA_REQ));
+
+ mvOsPrintf("pEmpty=%p, pProcess=%p, qResources=%d\n",
+ pCesaReqEmpty, pCesaReqProcess,
+ cesaReqResources);
+
+ if(mode != 0)
+ {
+ int count = 0;
+ MV_CESA_REQ* pReq = pCesaReqFirst;
+
+ for(count=0; count<cesaQueueDepth; count++)
+ {
+ /* Print out requsts */
+ mvOsPrintf("%02d. pReq=%p, state=%s, frag=0x%x, pCmd=%p, pDma=%p, pDesc=%p\n",
+ count, pReq, mvCesaDebugStateStr(pReq->state),
+ pReq->fragMode, pReq->pCmd, pReq->dma[0].pDmaFirst, &pReq->pCesaDesc[0]);
+ if(pReq->fragMode != MV_CESA_FRAG_NONE)
+ {
+ int frag;
+
+ mvOsPrintf("pFrags=%p, num=%d, next=%d, bufOffset=%d, cryptoSize=%d, macSize=%d\n",
+ &pReq->frags, pReq->frags.numFrag, pReq->frags.nextFrag,
+ pReq->frags.bufOffset, pReq->frags.cryptoSize, pReq->frags.macSize);
+ for(frag=0; frag<pReq->frags.numFrag; frag++)
+ {
+ mvOsPrintf("#%d: pDmaFirst=%p, pDesc=%p\n", frag,
+ pReq->dma[frag].pDmaFirst, &pReq->pCesaDesc[frag]);
+ }
+ }
+ if(mode > 1)
+ {
+ /* Print out Command */
+ mvCesaDebugCmd(pReq->pCmd, mode);
+
+ /* Print out Descriptor */
+ mvCesaDebugDescriptor(&pReq->pCesaDesc[0]);
+ }
+ pReq++;
+ }
+ }
+}
+
+
+void mvCesaDebugSramSA(MV_CESA_SRAM_SA* pSramSA, int mode)
+{
+ if(pSramSA == NULL)
+ {
+ mvOsPrintf("cesaSramSA: Unexpected pSramSA=%p\n", pSramSA);
+ return;
+ }
+ mvOsPrintf("pSramSA=%p, sizeSramSA=%ld bytes\n",
+ pSramSA, sizeof(MV_CESA_SRAM_SA));
+
+ if(mode != 0)
+ {
+ mvOsPrintf("cryptoKey=%p, maxCryptoKey=%d bytes\n",
+ pSramSA->cryptoKey, MV_CESA_MAX_CRYPTO_KEY_LENGTH);
+ mvDebugMemDump(pSramSA->cryptoKey, MV_CESA_MAX_CRYPTO_KEY_LENGTH, 1);
+
+ mvOsPrintf("macInnerIV=%p, maxInnerIV=%d bytes\n",
+ pSramSA->macInnerIV, MV_CESA_MAX_DIGEST_SIZE);
+ mvDebugMemDump(pSramSA->macInnerIV, MV_CESA_MAX_DIGEST_SIZE, 1);
+
+ mvOsPrintf("macOuterIV=%p, maxOuterIV=%d bytes\n",
+ pSramSA->macOuterIV, MV_CESA_MAX_DIGEST_SIZE);
+ mvDebugMemDump(pSramSA->macOuterIV, MV_CESA_MAX_DIGEST_SIZE, 1);
+ }
+}
+
+void mvCesaDebugSA(short sid, int mode)
+{
+ MV_CESA_OPERATION oper;
+ MV_CESA_DIRECTION dir;
+ MV_CESA_CRYPTO_ALG cryptoAlg;
+ MV_CESA_CRYPTO_MODE cryptoMode;
+ MV_CESA_MAC_MODE macMode;
+ MV_CESA_SA* pSA = &pCesaSAD[sid];
+
+ if( (pSA->valid) || ((pSA->count != 0) && (mode > 0)) || (mode >= 2) )
+ {
+ mvOsPrintf("\n\nCESA SA Entry #%d (%p) - %s (count=%d)\n",
+ sid, pSA,
+ pSA->valid ? "Valid" : "Invalid", pSA->count);
+
+ oper = (pSA->config & MV_CESA_OPERATION_MASK) >> MV_CESA_OPERATION_OFFSET;
+ dir = (pSA->config & MV_CESA_DIRECTION_MASK) >> MV_CESA_DIRECTION_BIT;
+ mvOsPrintf("%s - %s ", mvCesaDebugOperStr(oper),
+ (dir == MV_CESA_DIR_ENCODE) ? "Encode" : "Decode");
+ if(oper != MV_CESA_MAC_ONLY)
+ {
+ cryptoAlg = (pSA->config & MV_CESA_CRYPTO_ALG_MASK) >> MV_CESA_CRYPTO_ALG_OFFSET;
+ cryptoMode = (pSA->config & MV_CESA_CRYPTO_MODE_MASK) >> MV_CESA_CRYPTO_MODE_BIT;
+ mvOsPrintf("- %s - %s ", mvCesaDebugCryptoAlgStr(cryptoAlg),
+ (cryptoMode == MV_CESA_CRYPTO_ECB) ? "ECB" : "CBC");
+ }
+ if(oper != MV_CESA_CRYPTO_ONLY)
+ {
+ macMode = (pSA->config & MV_CESA_MAC_MODE_MASK) >> MV_CESA_MAC_MODE_OFFSET;
+ mvOsPrintf("- %s ", mvCesaDebugMacModeStr(macMode));
+ }
+ mvOsPrintf("\n");
+
+ if(mode > 0)
+ {
+ mvOsPrintf("config=0x%08x, cryptoKeySize=%d, digestSize=%d\n",
+ pCesaSAD[sid].config, pCesaSAD[sid].cryptoKeyLength,
+ pCesaSAD[sid].digestSize);
+
+ mvCesaDebugSramSA(pCesaSAD[sid].pSramSA, mode);
+ }
+ }
+}
+
+
+/**/
+void mvCesaDebugSram(int mode)
+{
+ mvOsPrintf("\n\t SRAM contents: size=%ld, pVirt=%p\n\n",
+ sizeof(MV_CESA_SRAM_MAP), cesaSramVirtPtr);
+
+ mvOsPrintf("\n\t Sram buffer: size=%d, pVirt=%p\n",
+ MV_CESA_MAX_BUF_SIZE, cesaSramVirtPtr->buf);
+ if(mode != 0)
+ mvDebugMemDump(cesaSramVirtPtr->buf, 64, 1);
+
+ mvOsPrintf("\n");
+ mvOsPrintf("\n\t Sram descriptor: size=%ld, pVirt=%p\n",
+ sizeof(MV_CESA_DESC), &cesaSramVirtPtr->desc);
+ if(mode != 0)
+ {
+ mvOsPrintf("\n");
+ mvCesaDebugDescriptor(&cesaSramVirtPtr->desc);
+ }
+ mvOsPrintf("\n\t Sram IV: size=%d, pVirt=%p\n",
+ MV_CESA_MAX_IV_LENGTH, &cesaSramVirtPtr->cryptoIV);
+ if(mode != 0)
+ {
+ mvOsPrintf("\n");
+ mvDebugMemDump(cesaSramVirtPtr->cryptoIV, MV_CESA_MAX_IV_LENGTH, 1);
+ }
+ mvOsPrintf("\n");
+ mvCesaDebugSramSA(&cesaSramVirtPtr->sramSA, 0);
+}
+
+void mvCesaDebugSAD(int mode)
+{
+ int sid;
+
+ mvOsPrintf("\n\t Cesa SAD status: pSAD=%p, maxSA=%d\n",
+ pCesaSAD, cesaMaxSA);
+
+ for(sid=0; sid<cesaMaxSA; sid++)
+ {
+ mvCesaDebugSA(sid, mode);
+ }
+}
+
+void mvCesaDebugStats(void)
+{
+ mvOsPrintf("\n\t Cesa Statistics\n");
+
+ mvOsPrintf("Opened=%u, Closed=%u\n",
+ cesaStats.openedCount, cesaStats.closedCount);
+ mvOsPrintf("Req=%u, maxReq=%u, frags=%u, start=%u\n",
+ cesaStats.reqCount, cesaStats.maxReqCount,
+ cesaStats.fragCount, cesaStats.startCount);
+#if (MV_CESA_VERSION >= 3)
+ mvOsPrintf("maxChainUsage=%u\n",cesaStats.maxChainUsage);
+#endif
+ mvOsPrintf("\n");
+ mvOsPrintf("proc=%u, ready=%u, notReady=%u\n",
+ cesaStats.procCount, cesaStats.readyCount, cesaStats.notReadyCount);
+}
+
+void mvCesaDebugStatsClear(void)
+{
+ memset(&cesaStats, 0, sizeof(cesaStats));
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaRegs.h
new file mode 100644
index 000000000..6b7ce1239
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaRegs.h
@@ -0,0 +1,357 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvCesaRegs_h__
+#define __mvCesaRegs_h__
+
+#include "mvTypes.h"
+
+typedef struct
+{
+ /* word 0 */
+ MV_U32 config;
+ /* word 1 */
+ MV_U16 cryptoSrcOffset;
+ MV_U16 cryptoDstOffset;
+ /* word 2 */
+ MV_U16 cryptoDataLen;
+ MV_U16 reserved1;
+ /* word 3 */
+ MV_U16 cryptoKeyOffset;
+ MV_U16 reserved2;
+ /* word 4 */
+ MV_U16 cryptoIvOffset;
+ MV_U16 cryptoIvBufOffset;
+ /* word 5 */
+ MV_U16 macSrcOffset;
+ MV_U16 macTotalLen;
+ /* word 6 */
+ MV_U16 macDigestOffset;
+ MV_U16 macDataLen;
+ /* word 7 */
+ MV_U16 macInnerIvOffset;
+ MV_U16 macOuterIvOffset;
+
+} MV_CESA_DESC;
+
+/* operation */
+typedef enum
+{
+ MV_CESA_MAC_ONLY = 0,
+ MV_CESA_CRYPTO_ONLY = 1,
+ MV_CESA_MAC_THEN_CRYPTO = 2,
+ MV_CESA_CRYPTO_THEN_MAC = 3,
+
+ MV_CESA_MAX_OPERATION
+
+} MV_CESA_OPERATION;
+
+#define MV_CESA_OPERATION_OFFSET 0
+#define MV_CESA_OPERATION_MASK (0x3 << MV_CESA_OPERATION_OFFSET)
+
+/* mac algorithm */
+typedef enum
+{
+ MV_CESA_MAC_NULL = 0,
+ MV_CESA_MAC_MD5 = 4,
+ MV_CESA_MAC_SHA1 = 5,
+ MV_CESA_MAC_HMAC_MD5 = 6,
+ MV_CESA_MAC_HMAC_SHA1 = 7,
+
+} MV_CESA_MAC_MODE;
+
+#define MV_CESA_MAC_MODE_OFFSET 4
+#define MV_CESA_MAC_MODE_MASK (0x7 << MV_CESA_MAC_MODE_OFFSET)
+
+typedef enum
+{
+ MV_CESA_MAC_DIGEST_FULL = 0,
+ MV_CESA_MAC_DIGEST_96B = 1,
+
+} MV_CESA_MAC_DIGEST_SIZE;
+
+#define MV_CESA_MAC_DIGEST_SIZE_BIT 7
+#define MV_CESA_MAC_DIGEST_SIZE_MASK (1 << MV_CESA_MAC_DIGEST_SIZE_BIT)
+
+
+typedef enum
+{
+ MV_CESA_CRYPTO_NULL = 0,
+ MV_CESA_CRYPTO_DES = 1,
+ MV_CESA_CRYPTO_3DES = 2,
+ MV_CESA_CRYPTO_AES = 3,
+
+} MV_CESA_CRYPTO_ALG;
+
+#define MV_CESA_CRYPTO_ALG_OFFSET 8
+#define MV_CESA_CRYPTO_ALG_MASK (0x3 << MV_CESA_CRYPTO_ALG_OFFSET)
+
+
+/* direction */
+typedef enum
+{
+ MV_CESA_DIR_ENCODE = 0,
+ MV_CESA_DIR_DECODE = 1,
+
+} MV_CESA_DIRECTION;
+
+#define MV_CESA_DIRECTION_BIT 12
+#define MV_CESA_DIRECTION_MASK (1 << MV_CESA_DIRECTION_BIT)
+
+/* crypto IV mode */
+typedef enum
+{
+ MV_CESA_CRYPTO_ECB = 0,
+ MV_CESA_CRYPTO_CBC = 1,
+
+ /* NO HW Support */
+ MV_CESA_CRYPTO_CTR = 10,
+
+} MV_CESA_CRYPTO_MODE;
+
+#define MV_CESA_CRYPTO_MODE_BIT 16
+#define MV_CESA_CRYPTO_MODE_MASK (1 << MV_CESA_CRYPTO_MODE_BIT)
+
+/* 3DES mode */
+typedef enum
+{
+ MV_CESA_CRYPTO_3DES_EEE = 0,
+ MV_CESA_CRYPTO_3DES_EDE = 1,
+
+} MV_CESA_CRYPTO_3DES_MODE;
+
+#define MV_CESA_CRYPTO_3DES_MODE_BIT 20
+#define MV_CESA_CRYPTO_3DES_MODE_MASK (1 << MV_CESA_CRYPTO_3DES_MODE_BIT)
+
+
+/* AES Key Length */
+typedef enum
+{
+ MV_CESA_CRYPTO_AES_KEY_128 = 0,
+ MV_CESA_CRYPTO_AES_KEY_192 = 1,
+ MV_CESA_CRYPTO_AES_KEY_256 = 2,
+
+} MV_CESA_CRYPTO_AES_KEY_LEN;
+
+#define MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET 24
+#define MV_CESA_CRYPTO_AES_KEY_LEN_MASK (0x3 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET)
+
+/* Fragmentation mode */
+typedef enum
+{
+ MV_CESA_FRAG_NONE = 0,
+ MV_CESA_FRAG_FIRST = 1,
+ MV_CESA_FRAG_LAST = 2,
+ MV_CESA_FRAG_MIDDLE = 3,
+
+} MV_CESA_FRAG_MODE;
+
+#define MV_CESA_FRAG_MODE_OFFSET 30
+#define MV_CESA_FRAG_MODE_MASK (0x3 << MV_CESA_FRAG_MODE_OFFSET)
+/*---------------------------------------------------------------------------*/
+
+/********** Security Accelerator Command Register **************/
+#define MV_CESA_CMD_REG (MV_CESA_REG_BASE + 0xE00)
+
+#define MV_CESA_CMD_CHAN_ENABLE_BIT 0
+#define MV_CESA_CMD_CHAN_ENABLE_MASK (1 << MV_CESA_CMD_CHAN_ENABLE_BIT)
+
+#define MV_CESA_CMD_CHAN_DISABLE_BIT 2
+#define MV_CESA_CMD_CHAN_DISABLE_MASK (1 << MV_CESA_CMD_CHAN_DISABLE_BIT)
+
+/********** Security Accelerator Descriptor Pointers Register **********/
+#define MV_CESA_CHAN_DESC_OFFSET_REG (MV_CESA_REG_BASE + 0xE04)
+
+/********** Security Accelerator Configuration Register **********/
+#define MV_CESA_CFG_REG (MV_CESA_REG_BASE + 0xE08)
+
+#define MV_CESA_CFG_STOP_DIGEST_ERR_BIT 0
+#define MV_CESA_CFG_STOP_DIGEST_ERR_MASK (1 << MV_CESA_CFG_STOP_DIGEST_ERR_BIT)
+
+#define MV_CESA_CFG_WAIT_DMA_BIT 7
+#define MV_CESA_CFG_WAIT_DMA_MASK (1 << MV_CESA_CFG_WAIT_DMA_BIT)
+
+#define MV_CESA_CFG_ACT_DMA_BIT 9
+#define MV_CESA_CFG_ACT_DMA_MASK (1 << MV_CESA_CFG_ACT_DMA_BIT)
+
+#define MV_CESA_CFG_CHAIN_MODE_BIT 11
+#define MV_CESA_CFG_CHAIN_MODE_MASK (1 << MV_CESA_CFG_CHAIN_MODE_BIT)
+
+/********** Security Accelerator Status Register ***********/
+#define MV_CESA_STATUS_REG (MV_CESA_REG_BASE + 0xE0C)
+
+#define MV_CESA_STATUS_ACTIVE_BIT 0
+#define MV_CESA_STATUS_ACTIVE_MASK (1 << MV_CESA_STATUS_ACTIVE_BIT)
+
+#define MV_CESA_STATUS_DIGEST_ERR_BIT 8
+#define MV_CESA_STATUS_DIGEST_ERR_MASK (1 << MV_CESA_STATUS_DIGEST_ERR_BIT)
+
+
+/* Cryptographic Engines and Security Accelerator Interrupt Cause Register */
+#define MV_CESA_ISR_CAUSE_REG (MV_CESA_REG_BASE + 0xE20)
+
+/* Cryptographic Engines and Security Accelerator Interrupt Mask Register */
+#define MV_CESA_ISR_MASK_REG (MV_CESA_REG_BASE + 0xE24)
+
+#define MV_CESA_CAUSE_AUTH_MASK (1 << 0)
+#define MV_CESA_CAUSE_DES_MASK (1 << 1)
+#define MV_CESA_CAUSE_AES_ENCR_MASK (1 << 2)
+#define MV_CESA_CAUSE_AES_DECR_MASK (1 << 3)
+#define MV_CESA_CAUSE_DES_ALL_MASK (1 << 4)
+
+#define MV_CESA_CAUSE_ACC_BIT 5
+#define MV_CESA_CAUSE_ACC_MASK (1 << MV_CESA_CAUSE_ACC_BIT)
+
+#define MV_CESA_CAUSE_ACC_DMA_BIT 7
+#define MV_CESA_CAUSE_ACC_DMA_MASK (1 << MV_CESA_CAUSE_ACC_DMA_BIT)
+#define MV_CESA_CAUSE_ACC_DMA_ALL_MASK (3 << MV_CESA_CAUSE_ACC_DMA_BIT)
+
+#define MV_CESA_CAUSE_DMA_COMPL_BIT 9
+#define MV_CESA_CAUSE_DMA_COMPL_MASK (1 << MV_CESA_CAUSE_DMA_COMPL_BIT)
+
+#define MV_CESA_CAUSE_DMA_OWN_ERR_BIT 10
+#define MV_CESA_CAUSE_DMA_OWN_ERR_MASK (1 < MV_CESA_CAUSE_DMA_OWN_ERR_BIT)
+
+#define MV_CESA_CAUSE_DMA_CHAIN_PKT_BIT 11
+#define MV_CESA_CAUSE_DMA_CHAIN_PKT_MASK (1 < MV_CESA_CAUSE_DMA_CHAIN_PKT_BIT)
+
+
+#define MV_CESA_AUTH_DATA_IN_REG (MV_CESA_REG_BASE + 0xd38)
+#define MV_CESA_AUTH_BIT_COUNT_LOW_REG (MV_CESA_REG_BASE + 0xd20)
+#define MV_CESA_AUTH_BIT_COUNT_HIGH_REG (MV_CESA_REG_BASE + 0xd24)
+
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_REG(i) (MV_CESA_REG_BASE + 0xd00 + (i<<2))
+
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_A_REG (MV_CESA_REG_BASE + 0xd00)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_B_REG (MV_CESA_REG_BASE + 0xd04)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_C_REG (MV_CESA_REG_BASE + 0xd08)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_D_REG (MV_CESA_REG_BASE + 0xd0c)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_E_REG (MV_CESA_REG_BASE + 0xd10)
+#define MV_CESA_AUTH_COMMAND_REG (MV_CESA_REG_BASE + 0xd18)
+
+#define MV_CESA_AUTH_ALGORITHM_BIT 0
+#define MV_CESA_AUTH_ALGORITHM_MD5 (0<<AUTH_ALGORITHM_BIT)
+#define MV_CESA_AUTH_ALGORITHM_SHA1 (1<<AUTH_ALGORITHM_BIT)
+
+#define MV_CESA_AUTH_IV_MODE_BIT 1
+#define MV_CESA_AUTH_IV_MODE_INIT (0<<AUTH_IV_MODE_BIT)
+#define MV_CESA_AUTH_IV_MODE_CONTINUE (1<<AUTH_IV_MODE_BIT)
+
+#define MV_CESA_AUTH_DATA_BYTE_SWAP_BIT 2
+#define MV_CESA_AUTH_DATA_BYTE_SWAP_MASK (1<<AUTH_DATA_BYTE_SWAP_BIT)
+
+
+#define MV_CESA_AUTH_IV_BYTE_SWAP_BIT 4
+#define MV_CESA_AUTH_IV_BYTE_SWAP_MASK (1<<AUTH_IV_BYTE_SWAP_BIT)
+
+#define MV_CESA_AUTH_TERMINATION_BIT 31
+#define MV_CESA_AUTH_TERMINATION_MASK (1<<AUTH_TERMINATION_BIT)
+
+
+/*************** TDMA Control Register ************************************************/
+#define MV_CESA_TDMA_CTRL_REG (MV_CESA_TDMA_REG_BASE + 0x840)
+
+#define MV_CESA_TDMA_BURST_32B 3
+#define MV_CESA_TDMA_BURST_128B 4
+
+#define MV_CESA_TDMA_DST_BURST_OFFSET 0
+#define MV_CESA_TDMA_DST_BURST_ALL_MASK (0x7<<MV_CESA_TDMA_DST_BURST_OFFSET)
+#define MV_CESA_TDMA_DST_BURST_MASK(burst) ((burst)<<MV_CESA_TDMA_DST_BURST_OFFSET)
+
+#define MV_CESA_TDMA_OUTSTAND_READ_EN_BIT 4
+#define MV_CESA_TDMA_OUTSTAND_READ_EN_MASK (1<<MV_CESA_TDMA_OUTSTAND_READ_EN_BIT)
+
+#define MV_CESA_TDMA_SRC_BURST_OFFSET 6
+#define MV_CESA_TDMA_SRC_BURST_ALL_MASK (0x7<<MV_CESA_TDMA_SRC_BURST_OFFSET)
+#define MV_CESA_TDMA_SRC_BURST_MASK(burst) ((burst)<<MV_CESA_TDMA_SRC_BURST_OFFSET)
+
+#define MV_CESA_TDMA_CHAIN_MODE_BIT 9
+#define MV_CESA_TDMA_NON_CHAIN_MODE_MASK (1<<MV_CESA_TDMA_CHAIN_MODE_BIT)
+
+#define MV_CESA_TDMA_BYTE_SWAP_BIT 11
+#define MV_CESA_TDMA_BYTE_SWAP_MASK (0 << MV_CESA_TDMA_BYTE_SWAP_BIT)
+#define MV_CESA_TDMA_NO_BYTE_SWAP_MASK (1 << MV_CESA_TDMA_BYTE_SWAP_BIT)
+
+#define MV_CESA_TDMA_ENABLE_BIT 12
+#define MV_CESA_TDMA_ENABLE_MASK (1<<MV_CESA_TDMA_ENABLE_BIT)
+
+#define MV_CESA_TDMA_FETCH_NEXT_DESC_BIT 13
+#define MV_CESA_TDMA_FETCH_NEXT_DESC_MASK (1<<MV_CESA_TDMA_FETCH_NEXT_DESC_BIT)
+
+#define MV_CESA_TDMA_CHAN_ACTIVE_BIT 14
+#define MV_CESA_TDMA_CHAN_ACTIVE_MASK (1<<MV_CESA_TDMA_CHAN_ACTIVE_BIT)
+/*------------------------------------------------------------------------------------*/
+
+#define MV_CESA_TDMA_BYTE_COUNT_REG (MV_CESA_TDMA_REG_BASE + 0x800)
+#define MV_CESA_TDMA_SRC_ADDR_REG (MV_CESA_TDMA_REG_BASE + 0x810)
+#define MV_CESA_TDMA_DST_ADDR_REG (MV_CESA_TDMA_REG_BASE + 0x820)
+#define MV_CESA_TDMA_NEXT_DESC_PTR_REG (MV_CESA_TDMA_REG_BASE + 0x830)
+#define MV_CESA_TDMA_CURR_DESC_PTR_REG (MV_CESA_TDMA_REG_BASE + 0x870)
+
+#define MV_CESA_TDMA_ERROR_CAUSE_REG (MV_CESA_TDMA_REG_BASE + 0x8C0)
+#define MV_CESA_TDMA_ERROR_MASK_REG (MV_CESA_TDMA_REG_BASE + 0x8C4)
+
+
+#endif /* __mvCesaRegs_h__ */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaTest.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaTest.c
new file mode 100644
index 000000000..74632938a
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCesaTest.c
@@ -0,0 +1,3096 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+
+#if defined(MV_VXWORKS)
+
+#include "sysLib.h"
+#include "logLib.h"
+#include "tickLib.h"
+#include "intLib.h"
+#include "config.h"
+
+
+SEM_ID cesaSemId = NULL;
+SEM_ID cesaWaitSemId = NULL;
+
+#define CESA_TEST_LOCK(flags) flags = intLock()
+#define CESA_TEST_UNLOCK(flags) intUnlock(flags)
+
+#define CESA_TEST_WAIT_INIT() cesaWaitSemId = semBCreate(SEM_Q_PRIORITY, SEM_EMPTY)
+#define CESA_TEST_WAKE_UP() semGive(cesaWaitSemId)
+#define CESA_TEST_WAIT(cond, ms) semTake(cesaWaitSemId, (sysClkRateGet()*ms)/1000)
+
+#define CESA_TEST_TICK_GET() tickGet()
+#define CESA_TEST_TICK_TO_MS(tick) (((tick)*1000)/sysClkRateGet())
+
+#elif defined(MV_LINUX)
+
+#include <linux/wait.h>
+wait_queue_head_t cesaTest_waitq;
+spinlock_t cesaLock;
+
+#define CESA_TEST_LOCK(flags) spin_lock_irqsave( &cesaLock, flags)
+#define CESA_TEST_UNLOCK(flags) spin_unlock_irqrestore( &cesaLock, flags);
+
+#define CESA_TEST_WAIT_INIT() init_waitqueue_head(&cesaTest_waitq)
+#define CESA_TEST_WAKE_UP() wake_up(&cesaTest_waitq)
+#define CESA_TEST_WAIT(cond, ms) wait_event_timeout(cesaTest_waitq, (cond), msecs_to_jiffies(ms))
+
+#define CESA_TEST_TICK_GET() jiffies
+#define CESA_TEST_TICK_TO_MS(tick) jiffies_to_msecs(tick)
+
+#elif defined(MV_NETBSD)
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+static int cesaLock;
+
+#define CESA_TEST_LOCK(flags) flags = splnet()
+#define CESA_TEST_UNLOCK(flags) splx(flags)
+
+#define CESA_TEST_WAIT_INIT() /* nothing */
+#define CESA_TEST_WAKE_UP() wakeup(&cesaLock)
+#define CESA_TEST_WAIT(cond, ms) \
+do { \
+ while (!(cond)) \
+ tsleep(&cesaLock, PWAIT, "cesatest",mstohz(ms)); \
+} while (/*CONSTCOND*/0)
+
+#define CESA_TEST_TICK_GET() hardclock_ticks
+#define CESA_TEST_TICK_TO_MS(tick) ((1000/hz)*(tick))
+
+#define request_irq(i,h,t,n,a) \
+ !mv_intr_establish((i),IPL_NET,(int(*)(void *))(h),(a))
+
+#else
+#error "Only Linux, VxWorks, or NetBSD OS are supported"
+#endif
+
+#include "mvDebug.h"
+
+#include "mvSysHwConfig.h"
+#include "boardEnv/mvBoardEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "cntmr/mvCntmr.h"
+#include "cesa/mvCesa.h"
+#include "cesa/mvCesaRegs.h"
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+
+#if defined(CONFIG_MV646xx)
+#include "marvell_pic.h"
+#endif
+
+#define MV_CESA_USE_TIMER_ID 0
+#define CESA_DEF_BUF_SIZE 1500
+#define CESA_DEF_BUF_NUM 1
+#define CESA_DEF_SESSION_NUM 32
+
+#define CESA_DEF_ITER_NUM 100
+
+#define CESA_DEF_REQ_SIZE 256
+
+
+/* CESA Tests Debug */
+#undef CESA_TEST_DEBUG
+
+#ifdef CESA_TEST_DEBUG
+
+# define CESA_TEST_DEBUG_PRINT(msg) mvOsPrintf msg
+# define CESA_TEST_DEBUG_CODE(code) code
+
+typedef struct
+{
+ int type; /* 0 - isrEmpty, 1 - cesaReadyGet, 2 - cesaAction */
+ MV_U32 timeStamp;
+ MV_U32 cause;
+ MV_U32 realCause;
+ MV_U32 dmaCause;
+ int resources;
+ MV_CESA_REQ* pReqReady;
+ MV_CESA_REQ* pReqEmpty;
+ MV_CESA_REQ* pReqProcess;
+} MV_CESA_TEST_TRACE;
+
+#define MV_CESA_TEST_TRACE_SIZE 25
+
+static int cesaTestTraceIdx = 0;
+static MV_CESA_TEST_TRACE cesaTestTrace[MV_CESA_TEST_TRACE_SIZE];
+
+static void cesaTestTraceAdd(int type, MV_U32 cause)
+{
+ cesaTestTrace[cesaTestTraceIdx].type = type;
+ cesaTestTrace[cesaTestTraceIdx].cause = cause;
+ cesaTestTrace[cesaTestTraceIdx].realCause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+ cesaTestTrace[cesaTestTraceIdx].dmaCause = MV_REG_READ(IDMA_CAUSE_REG);
+ cesaTestTrace[cesaTestTraceIdx].resources = cesaReqResources;
+ cesaTestTrace[cesaTestTraceIdx].pReqReady = pCesaReqReady;
+ cesaTestTrace[cesaTestTraceIdx].pReqEmpty = pCesaReqEmpty;
+ cesaTestTrace[cesaTestTraceIdx].pReqProcess = pCesaReqProcess;
+ cesaTestTrace[cesaTestTraceIdx].timeStamp = mvCntmrRead(MV_CESA_USE_TIMER_ID);
+ cesaTestTraceIdx++;
+ if(cesaTestTraceIdx == MV_CESA_TEST_TRACE_SIZE)
+ cesaTestTraceIdx = 0;
+}
+
+#else
+
+# define CESA_TEST_DEBUG_PRINT(msg)
+# define CESA_TEST_DEBUG_CODE(code)
+
+#endif /* CESA_TEST_DEBUG */
+
+int cesaExpReqId=0;
+int cesaCbIter=0;
+
+int cesaIdx;
+int cesaIteration;
+int cesaRateSize;
+int cesaReqSize;
+unsigned long cesaTaskId;
+int cesaBufNum;
+int cesaBufSize;
+int cesaCheckOffset;
+int cesaCheckSize;
+int cesaCheckMode;
+int cesaTestIdx;
+int cesaCaseIdx;
+
+
+MV_U32 cesaTestIsrCount = 0;
+MV_U32 cesaTestIsrMissCount = 0;
+
+MV_U32 cesaCryptoError = 0;
+MV_U32 cesaReqIdError = 0;
+MV_U32 cesaError = 0;
+
+char* cesaHexBuffer = NULL;
+
+char* cesaBinBuffer = NULL;
+char* cesaExpBinBuffer = NULL;
+
+char* cesaInputHexStr = NULL;
+char* cesaOutputHexStr = NULL;
+
+MV_BUF_INFO cesaReqBufs[CESA_DEF_REQ_SIZE];
+
+MV_CESA_COMMAND* cesaCmdRing;
+MV_CESA_RESULT cesaResult;
+
+int cesaTestFull = 0;
+
+MV_BOOL cesaIsReady = MV_FALSE;
+MV_U32 cesaCycles = 0;
+MV_U32 cesaBeginTicks = 0;
+MV_U32 cesaEndTicks = 0;
+MV_U32 cesaRate = 0;
+MV_U32 cesaRateAfterDot = 0;
+
+void *cesaTestOSHandle = NULL;
+
+enum
+{
+ CESA_FAST_CHECK_MODE = 0,
+ CESA_FULL_CHECK_MODE,
+ CESA_NULL_CHECK_MODE,
+ CESA_SHOW_CHECK_MODE,
+ CESA_SW_SHOW_CHECK_MODE,
+ CESA_SW_NULL_CHECK_MODE,
+
+ CESA_MAX_CHECK_MODE
+};
+
+enum
+{
+ DES_TEST_TYPE = 0,
+ TRIPLE_DES_TEST_TYPE = 1,
+ AES_TEST_TYPE = 2,
+ MD5_TEST_TYPE = 3,
+ SHA_TEST_TYPE = 4,
+ COMBINED_TEST_TYPE = 5,
+
+ MAX_TEST_TYPE
+};
+
+/* Tests data base */
+typedef struct
+{
+ short sid;
+ char cryptoAlgorithm; /* DES/3DES/AES */
+ char cryptoMode; /* ECB or CBC */
+ char macAlgorithm; /* MD5 / SHA1 */
+ char operation; /* CRYPTO/HMAC/CRYPTO+HMAC/HMAC+CRYPTO */
+ char direction; /* ENCODE(SIGN)/DECODE(VERIFY) */
+ unsigned char* pCryptoKey;
+ int cryptoKeySize;
+ unsigned char* pMacKey;
+ int macKeySize;
+ const char* name;
+
+} MV_CESA_TEST_SESSION;
+
+typedef struct
+{
+ MV_CESA_TEST_SESSION* pSessions;
+ int numSessions;
+
+} MV_CESA_TEST_DB_ENTRY;
+
+typedef struct
+{
+ char* plainHexStr;
+ char* cipherHexStr;
+ unsigned char* pCryptoIV;
+ int cryptoLength;
+ int macLength;
+ int digestOffset;
+
+} MV_CESA_TEST_CASE;
+
+typedef struct
+{
+ int size;
+ const char* outputHexStr;
+
+} MV_CESA_SIZE_TEST;
+
+static unsigned char cryptoKey1[] = {0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef};
+
+static unsigned char cryptoKey7[] = {0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef};
+static unsigned char iv1[] = {0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef};
+
+
+static unsigned char cryptoKey2[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F};
+
+static unsigned char cryptoKey3[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17};
+
+static unsigned char cryptoKey4[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f};
+
+static unsigned char cryptoKey5[] = {0x56, 0xe4, 0x7a, 0x38, 0xc5, 0x59, 0x89, 0x74,
+ 0xbc, 0x46, 0x90, 0x3d, 0xba, 0x29, 0x03, 0x49};
+
+
+static unsigned char key3des1[] = {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF,
+ 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01,
+ 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x23};
+
+/* Input ASCII string: The quick brown fox jump */
+static char plain3des1[] = "54686520717566636B2062726F776E20666F78206A756D70";
+static char cipher3des1[] = "A826FD8CE53B855FCCE21C8112256FE668D5C05DD9B6B900";
+
+static unsigned char key3des2[] = {0x62, 0x7f, 0x46, 0x0e, 0x08, 0x10, 0x4a, 0x10,
+ 0x43, 0xcd, 0x26, 0x5d, 0x58, 0x40, 0xea, 0xf1,
+ 0x31, 0x3e, 0xdf, 0x97, 0xdf, 0x2a, 0x8a, 0x8c};
+
+static unsigned char iv3des2[] = {0x8e, 0x29, 0xf7, 0x5e, 0xa7, 0x7e, 0x54, 0x75};
+
+static char plain3des2[] = "326a494cd33fe756";
+
+static char cipher3desCbc2[] = "8e29f75ea77e5475"
+ "b22b8d66de970692";
+
+static unsigned char key3des3[] = {0x37, 0xae, 0x5e, 0xbf, 0x46, 0xdf, 0xf2, 0xdc,
+ 0x07, 0x54, 0xb9, 0x4f, 0x31, 0xcb, 0xb3, 0x85,
+ 0x5e, 0x7f, 0xd3, 0x6d, 0xc8, 0x70, 0xbf, 0xae};
+
+static unsigned char iv3des3[] = {0x3d, 0x1d, 0xe3, 0xcc, 0x13, 0x2e, 0x3b, 0x65};
+
+static char plain3des3[] = "84401f78fe6c10876d8ea23094ea5309";
+
+static char cipher3desCbc3[] = "3d1de3cc132e3b65"
+ "7b1f7c7e3b1c948ebd04a75ffba7d2f5";
+
+static unsigned char iv5[] = {0x8c, 0xe8, 0x2e, 0xef, 0xbe, 0xa0, 0xda, 0x3c,
+ 0x44, 0x69, 0x9e, 0xd7, 0xdb, 0x51, 0xb7, 0xd9};
+
+static unsigned char aesCtrKey[] = {0x76, 0x91, 0xBE, 0x03, 0x5E, 0x50, 0x20, 0xA8,
+ 0xAC, 0x6E, 0x61, 0x85, 0x29, 0xF9, 0xA0, 0xDC};
+
+static unsigned char mdKey1[] = {0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b};
+
+static unsigned char mdKey2[] = {0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa};
+
+static unsigned char shaKey1[] = {0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b};
+
+static unsigned char shaKey2[] = {0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa};
+
+static unsigned char mdKey4[] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10};
+
+static unsigned char shaKey4[] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
+ 0x11, 0x12, 0x13, 0x14};
+
+
+static MV_CESA_TEST_SESSION desTestSessions[] =
+{
+/*000*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey7, sizeof(cryptoKey7)/sizeof(cryptoKey7[0]),
+ NULL, 0,
+ "DES ECB encode",
+ },
+/*001*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_DECODE,
+ cryptoKey7, sizeof(cryptoKey7)/sizeof(cryptoKey7[0]),
+ NULL, 0,
+ "DES ECB decode",
+ },
+/*002*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey7, sizeof(cryptoKey7)/sizeof(cryptoKey7[0]),
+ NULL, 0,
+ "DES CBC encode"
+ },
+/*003*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_DECODE,
+ cryptoKey7, sizeof(cryptoKey7)/sizeof(cryptoKey7[0]),
+ NULL, 0,
+ "DES CBC decode"
+ },
+/*004*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0, NULL, 0,
+ "NULL Crypto Algorithm encode"
+ },
+};
+
+
+static MV_CESA_TEST_SESSION tripleDesTestSessions[] =
+{
+/*100*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ NULL, 0,
+ "3DES ECB encode",
+ },
+/*101*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_DECODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ NULL, 0,
+ "3DES ECB decode",
+ },
+/*102*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ NULL, 0,
+ "3DES CBC encode"
+ },
+/*103*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_DECODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ NULL, 0,
+ "3DES CBC decode"
+ },
+/*104*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ key3des1, sizeof(key3des1),
+ NULL, 0,
+ "3DES ECB encode"
+ },
+/*105*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ key3des2, sizeof(key3des2),
+ NULL, 0,
+ "3DES ECB encode"
+ },
+/*106*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ key3des3, sizeof(key3des3),
+ NULL, 0,
+ "3DES ECB encode"
+ },
+};
+
+
+static MV_CESA_TEST_SESSION aesTestSessions[] =
+{
+/*200*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey2, sizeof(cryptoKey2)/sizeof(cryptoKey2[0]),
+ NULL, 0,
+ "AES-128 ECB encode"
+ },
+/*201*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_DECODE,
+ cryptoKey2, sizeof(cryptoKey2)/sizeof(cryptoKey2[0]),
+ NULL, 0,
+ "AES-128 ECB decode"
+ },
+/*202*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey5, sizeof(cryptoKey5)/sizeof(cryptoKey5[0]),
+ NULL, 0,
+ "AES-128 CBC encode"
+ },
+/*203*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_DECODE,
+ cryptoKey5, sizeof(cryptoKey5)/sizeof(cryptoKey5[0]),
+ NULL, 0,
+ "AES-128 CBC decode"
+ },
+/*204*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey3, sizeof(cryptoKey3)/sizeof(cryptoKey3[0]),
+ NULL, 0,
+ "AES-192 ECB encode"
+ },
+/*205*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_DECODE,
+ cryptoKey3, sizeof(cryptoKey3)/sizeof(cryptoKey3[0]),
+ NULL, 0,
+ "AES-192 ECB decode"
+ },
+/*206*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey4, sizeof(cryptoKey4)/sizeof(cryptoKey4[0]),
+ NULL, 0,
+ "AES-256 ECB encode"
+ },
+/*207*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_DECODE,
+ cryptoKey4, sizeof(cryptoKey4)/sizeof(cryptoKey4[0]),
+ NULL, 0,
+ "AES-256 ECB decode"
+ },
+/*208*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CTR,
+ MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+ MV_CESA_DIR_ENCODE,
+ aesCtrKey, sizeof(aesCtrKey)/sizeof(aesCtrKey[0]),
+ NULL, 0,
+ "AES-128 CTR encode"
+ },
+};
+
+
+static MV_CESA_TEST_SESSION md5TestSessions[] =
+{
+/*300*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0,
+ mdKey1, sizeof(mdKey1),
+ "HMAC-MD5 Generate Signature"
+ },
+/*301*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_DECODE,
+ NULL, 0,
+ mdKey1, sizeof(mdKey1),
+ "HMAC-MD5 Verify Signature"
+ },
+/*302*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0,
+ mdKey2, sizeof(mdKey2),
+ "HMAC-MD5 Generate Signature"
+ },
+/*303*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_DECODE,
+ NULL, 0,
+ mdKey2, sizeof(mdKey2),
+ "HMAC-MD5 Verify Signature"
+ },
+/*304*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0,
+ mdKey4, sizeof(mdKey4),
+ "HMAC-MD5 Generate Signature"
+ },
+/*305*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_MD5, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0,
+ NULL, 0,
+ "HASH-MD5 Generate Signature"
+ },
+};
+
+
+static MV_CESA_TEST_SESSION shaTestSessions[] =
+{
+/*400*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0,
+ shaKey1, sizeof(shaKey1),
+ "HMAC-SHA1 Generate Signature"
+ },
+/*401*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_DECODE,
+ NULL, 0,
+ shaKey1, sizeof(shaKey1),
+ "HMAC-SHA1 Verify Signature"
+ },
+/*402*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0,
+ shaKey2, sizeof(shaKey2),
+ "HMAC-SHA1 Generate Signature"
+ },
+/*403*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_DECODE,
+ NULL, 0,
+ shaKey2, sizeof(shaKey2),
+ "HMAC-SHA1 Verify Signature"
+ },
+/*404*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0,
+ shaKey4, sizeof(shaKey4),
+ "HMAC-SHA1 Generate Signature"
+ },
+/*405*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_SHA1, MV_CESA_MAC_ONLY,
+ MV_CESA_DIR_ENCODE,
+ NULL, 0,
+ NULL, 0,
+ "HASH-SHA1 Generate Signature"
+ },
+};
+
+static MV_CESA_TEST_SESSION combinedTestSessions[] =
+{
+/*500*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey1, MV_CESA_DES_KEY_LENGTH,
+ mdKey4, sizeof(mdKey4),
+ "DES + MD5 encode"
+ },
+/*501*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey1, MV_CESA_DES_KEY_LENGTH,
+ shaKey4, sizeof(shaKey4),
+ "DES + SHA1 encode"
+ },
+/*502*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ mdKey4, sizeof(mdKey4),
+ "3DES + MD5 encode"
+ },
+/*503*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ shaKey4, sizeof(shaKey4),
+ "3DES + SHA1 encode"
+ },
+/*504*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ mdKey4, sizeof(mdKey4),
+ "3DES CBC + MD5 encode"
+ },
+/*505*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ shaKey4, sizeof(shaKey4),
+ "3DES CBC + SHA1 encode"
+ },
+/*506*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey5, sizeof(cryptoKey5)/sizeof(cryptoKey5[0]),
+ mdKey4, sizeof(mdKey4),
+ "AES-128 CBC + MD5 encode"
+ },
+/*507*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+ MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+ MV_CESA_DIR_ENCODE,
+ cryptoKey5, sizeof(cryptoKey5)/sizeof(cryptoKey5[0]),
+ shaKey4, sizeof(shaKey4),
+ "AES-128 CBC + SHA1 encode"
+ },
+/*508*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+ MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_THEN_CRYPTO,
+ MV_CESA_DIR_DECODE,
+ cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+ mdKey4, sizeof(mdKey4),
+ "HMAC-MD5 + 3DES decode"
+ },
+};
+
+
+static MV_CESA_TEST_DB_ENTRY cesaTestsDB[MAX_TEST_TYPE+1] =
+{
+ { desTestSessions, sizeof(desTestSessions)/sizeof(desTestSessions[0]) },
+ { tripleDesTestSessions, sizeof(tripleDesTestSessions)/sizeof(tripleDesTestSessions[0]) },
+ { aesTestSessions, sizeof(aesTestSessions)/sizeof(aesTestSessions[0]) },
+ { md5TestSessions, sizeof(md5TestSessions)/sizeof(md5TestSessions[0]) },
+ { shaTestSessions, sizeof(shaTestSessions)/sizeof(shaTestSessions[0]) },
+ { combinedTestSessions, sizeof(combinedTestSessions)/sizeof(combinedTestSessions[0]) },
+ { NULL, 0 }
+};
+
+
+char cesaNullPlainHexText[] = "000000000000000000000000000000000000000000000000";
+
+char cesaPlainAsciiText[] = "Now is the time for all ";
+char cesaPlainHexEbc[] = "4e6f77206973207468652074696d6520666f7220616c6c20";
+char cesaCipherHexEcb[] = "3fa40e8a984d48156a271787ab8883f9893d51ec4b563b53";
+char cesaPlainHexCbc[] = "1234567890abcdef4e6f77206973207468652074696d6520666f7220616c6c20";
+char cesaCipherHexCbc[] = "1234567890abcdefe5c7cdde872bf27c43e934008c389c0f683788499a7c05f6";
+
+char cesaAesPlainHexEcb[] = "000102030405060708090a0b0c0d0e0f";
+char cesaAes128cipherHexEcb[] = "0a940bb5416ef045f1c39458c653ea5a";
+char cesaAes192cipherHexEcb[] = "0060bffe46834bb8da5cf9a61ff220ae";
+char cesaAes256cipherHexEcb[] = "5a6e045708fb7196f02e553d02c3a692";
+
+char cesaAsciiStr1[] = "Hi There";
+char cesaDataHexStr1[] = "4869205468657265";
+char cesaHmacMd5digestHex1[] = "9294727a3638bb1c13f48ef8158bfc9d";
+char cesaHmacSha1digestHex1[] = "b617318655057264e28bc0b6fb378c8ef146be00";
+char cesaDataAndMd5digest1[] = "48692054686572659294727a3638bb1c13f48ef8158bfc9d";
+char cesaDataAndSha1digest1[] = "4869205468657265b617318655057264e28bc0b6fb378c8ef146be00";
+
+char cesaAesPlainText[] = "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+ "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
+ "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
+ "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf";
+
+char cesaAes128CipherCbc[] = "c30e32ffedc0774e6aff6af0869f71aa"
+ "0f3af07a9a31a9c684db207eb0ef8e4e"
+ "35907aa632c3ffdf868bb7b29d3d46ad"
+ "83ce9f9a102ee99d49a53e87f4c3da55";
+
+char cesaAesIvPlainText[] = "8ce82eefbea0da3c44699ed7db51b7d9"
+ "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+ "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
+ "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
+ "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf";
+
+char cesaAes128IvCipherCbc[] = "8ce82eefbea0da3c44699ed7db51b7d9"
+ "c30e32ffedc0774e6aff6af0869f71aa"
+ "0f3af07a9a31a9c684db207eb0ef8e4e"
+ "35907aa632c3ffdf868bb7b29d3d46ad"
+ "83ce9f9a102ee99d49a53e87f4c3da55";
+
+char cesaAesCtrPlain[] = "00E0017B27777F3F4A1786F000000001"
+ "000102030405060708090A0B0C0D0E0F"
+ "101112131415161718191A1B1C1D1E1F"
+ "20212223";
+
+char cesaAesCtrCipher[] = "00E0017B27777F3F4A1786F000000001"
+ "C1CF48A89F2FFDD9CF4652E9EFDB72D7"
+ "4540A42BDE6D7836D59A5CEAAEF31053"
+ "25B2072F";
+
+
+
+/* Input cesaHmacHex3 is '0xdd' repeated 50 times */
+char cesaHmacMd5digestHex3[] = "56be34521d144c88dbb8c733f0e8b3f6";
+char cesaHmacSha1digestHex3[] = "125d7342b9ac11cd91a39af48aa17b4f63f175d3";
+char cesaDataHexStr3[50*2+1] = "";
+char cesaDataAndMd5digest3[sizeof(cesaDataHexStr3)+sizeof(cesaHmacMd5digestHex3)+8*2+1] = "";
+char cesaDataAndSha1digest3[sizeof(cesaDataHexStr3)+sizeof(cesaHmacSha1digestHex3)+8*2+1] = "";
+
+/* Ascii string is "abc" */
+char hashHexStr3[] = "616263";
+char hashMd5digest3[] = "900150983cd24fb0d6963f7d28e17f72";
+char hashSha1digest3[] = "a9993e364706816aba3e25717850c26c9cd0d89d";
+
+char hashHexStr80[] = "31323334353637383930"
+ "31323334353637383930"
+ "31323334353637383930"
+ "31323334353637383930"
+ "31323334353637383930"
+ "31323334353637383930"
+ "31323334353637383930"
+ "31323334353637383930";
+
+char hashMd5digest80[] = "57edf4a22be3c955ac49da2e2107b67a";
+
+char tripleDesThenMd5digest80[] = "b7726a03aad490bd6c5a452a89a1b271";
+char tripleDesThenSha1digest80[] = "b2ddeaca91030eab5b95a234ef2c0f6e738ff883";
+
+char cbc3desThenMd5digest80[] = "6f463057e1a90e0e91ae505b527bcec0";
+char cbc3desThenSha1digest80[] = "1b002ed050be743aa98860cf35659646bb8efcc0";
+
+char cbcAes128ThenMd5digest80[] = "6b6e863ac5a71d15e3e9b1c86c9ba05f";
+char cbcAes128ThenSha1digest80[] = "13558472d1fc1c90dffec6e5136c7203452d509b";
+
+
+static MV_CESA_TEST_CASE cesaTestCases[] =
+{
+ /* plainHexStr cipherHexStr IV crypto mac digest */
+ /* Length Length Offset */
+ /*0*/ { NULL, NULL, NULL, 0, 0, -1 },
+ /*1*/ { cesaPlainHexEbc, cesaCipherHexEcb, NULL, 24, 0, -1 },
+ /*2*/ { cesaPlainHexCbc, cesaCipherHexCbc, NULL, 24, 0, -1 },
+ /*3*/ { cesaAesPlainHexEcb, cesaAes128cipherHexEcb, NULL, 16, 0, -1 },
+ /*4*/ { cesaAesPlainHexEcb, cesaAes192cipherHexEcb, NULL, 16, 0, -1 },
+ /*5*/ { cesaAesPlainHexEcb, cesaAes256cipherHexEcb, NULL, 16, 0, -1 },
+ /*6*/ { cesaDataHexStr1, cesaHmacMd5digestHex1, NULL, 0, 8, -1 },
+ /*7*/ { NULL, cesaDataAndMd5digest1, NULL, 0, 8, -1 },
+ /*8*/ { cesaDataHexStr3, cesaHmacMd5digestHex3, NULL, 0, 50, -1 },
+ /*9*/ { NULL, cesaDataAndMd5digest3, NULL, 0, 50, -1 },
+/*10*/ { cesaAesPlainText, cesaAes128IvCipherCbc, iv5, 64, 0, -1 },
+/*11*/ { cesaDataHexStr1, cesaHmacSha1digestHex1, NULL, 0, 8, -1 },
+/*12*/ { NULL, cesaDataAndSha1digest1, NULL, 0, 8, -1 },
+/*13*/ { cesaDataHexStr3, cesaHmacSha1digestHex3, NULL, 0, 50, -1 },
+/*14*/ { NULL, cesaDataAndSha1digest3, NULL, 0, 50, -1 },
+/*15*/ { hashHexStr3, hashMd5digest3, NULL, 0, 3, -1 },
+/*16*/ { hashHexStr3, hashSha1digest3, NULL, 0, 3, -1 },
+/*17*/ { hashHexStr80, tripleDesThenMd5digest80, NULL, 80, 80, -1 },
+/*18*/ { hashHexStr80, tripleDesThenSha1digest80, NULL, 80, 80, -1 },
+/*19*/ { hashHexStr80, cbc3desThenMd5digest80, iv1, 80, 80, -1 },
+/*20*/ { hashHexStr80, cbc3desThenSha1digest80, iv1, 80, 80, -1 },
+/*21*/ { hashHexStr80, cbcAes128ThenMd5digest80, iv5, 80, 80, -1 },
+/*22*/ { hashHexStr80, cbcAes128ThenSha1digest80, iv5, 80, 80, -1 },
+/*23*/ { cesaAesCtrPlain, cesaAesCtrCipher, NULL, 36, 0, -1 },
+/*24*/ { cesaAesIvPlainText, cesaAes128IvCipherCbc, NULL, 64, 0, -1 },
+/*25*/ { plain3des1, cipher3des1, NULL, 0, 0, -1 },
+/*26*/ { plain3des2, cipher3desCbc2, iv3des2,0, 0, -1 },
+/*27*/ { plain3des3, cipher3desCbc3, iv3des3,0, 0, -1 },
+};
+
+
+/* Key = 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ * 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa
+ * Input 0xdd repeated "size" times
+ */
+static MV_CESA_SIZE_TEST mdMultiSizeTest302[] =
+{
+ { 80, "7a031a640c14a4872814930b1ef3a5b2" },
+ { 512, "5488e6c5a14dc72a79f28312ca5b939b" },
+ { 1000, "d00814f586a8b78a05724239d2531821" },
+ { 1001, "bf07df7b7f49d3f5b5ecacd4e9e63281" },
+ { 1002, "1ed4a1a802e87817a819d4e37bb4d0f7" },
+ { 1003, "5972ab64a4f265ee371dac2f2f137f90" },
+ { 1004, "71f95e7ec3aa7df2548e90898abdb28e" },
+ { 1005, "e082790b4857fcfc266e92e59e608814" },
+ { 1006, "9500f02fd8ac7fde8b10e4fece9a920d" },
+ { 1336, "e42edcce57d0b75b01aa09d71427948b" },
+ { 1344, "bb5454ada0deb49ba0a97ffd60f57071" },
+ { 1399, "0f44d793e744b24d53f44f295082ee8c" },
+ { 1400, "359de8a03a9b707928c6c60e0e8d79f1" },
+ { 1401, "e913858b484cbe2b384099ea88d8855b" },
+ { 1402, "d9848a164af53620e0540c1d7d87629e" },
+ { 1403, "0c9ee1c2c9ef45e9b625c26cbaf3e822" },
+ { 1404, "12edd4f609416e3c936170360561b064" },
+ { 1405, "7fc912718a05446395345009132bf562" },
+ { 1406, "882f17425e579ff0d85a91a59f308aa0" },
+ { 1407, "005cae408630a2fb5db82ad9db7e59da" },
+ { 1408, "64655f8b404b3fea7a3e3e609bc5088f" },
+ { 1409, "4a145284a7f74e01b6bb1a0ec6a0dd80" },
+ { 2048, "67caf64475650732def374ebb8bde3fd" },
+ { 2049, "6c84f11f472825f7e6cd125c2981884b" },
+ { 2050, "8999586754a73a99efbe4dbad2816d41" },
+ { 2051, "ba6946b610e098d286bc81091659dfff" },
+ { 2052, "d0afa01c92d4d13def2b024f36faed83" },
+ { 3072, "61d8beac61806afa2585d74a9a0e6974" },
+ { 3074, "f6501a28dcc24d1e4770505c51a87ed3" },
+ { 3075, "ea4a6929be67e33e61ff475369248b73" },
+ { 4048, "aa8c4d68f282a07e7385acdfa69f4bed" },
+ { 4052, "afb5ed2c0e1d430ea59e59ed5ed6b18a" },
+ { 4058, "9e8553f9bdd43aebe0bd729f0e600c99" },
+ { 6144, "f628f3e5d183fe5cdd3a5abee39cf872" },
+ { 6150, "89a3efcea9a2f25f919168ad4a1fd292" },
+ { 6400, "cdd176b7fb747873efa4da5e32bdf88f" },
+ { 6528, "b1d707b027354aca152c45ee559ccd3f" },
+ { 8192, "c600ea4429ac47f9941f09182166e51a" },
+ {16384, "16e8754bfbeb4c649218422792267a37" },
+ {18432, "0fd0607521b0aa8b52219cfbe215f63e" },
+ { 0, NULL },
+};
+
+/* Key = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ * 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST mdMultiSizeTest304[] =
+{
+ { 80, "a456c4723fee6068530af5a2afa71627" },
+ { 512, "f85c2a2344f5de68b432208ad13e5794" },
+ { 1000, "35464d6821fd4a293a41eb84e274c8c5" },
+ { 1001, "c08eedbdce60cceb54bc2d732bb32c8b" },
+ { 1002, "5664f71800c011cc311cb6943339c1b8" },
+ { 1003, "779c723b044c585dc7802b13e8501bdc" },
+ { 1004, "55e500766a2c307bc5c5fdd15e4cacd4" },
+ { 1005, "d5f978954f5c38529d1679d2b714f068" },
+ { 1006, "cd3efc827ce628b7281b72172693abf9" },
+ { 1336, "6f04479910785878ae6335b8d1e87edf" },
+ { 1344, "b6d27b50c2bce1ba2a8e1b5cc4324368" },
+ { 1399, "65f70a1d4c86e5eaeb0704c8a7816795" },
+ { 1400, "3394b5adc4cb3ff98843ca260a44a88a" },
+ { 1401, "3a06f3582033a66a4e57e0603ce94e74" },
+ { 1402, "e4d97f5ed51edc48abfa46eeb5c31752" },
+ { 1403, "3d05e40b080ee3bedf293cb87b7140e7" },
+ { 1404, "8cf294fc3cd153ab18dccb2a52cbf244" },
+ { 1405, "d1487bd42f6edd9b4dab316631159221" },
+ { 1406, "0527123b6bf6936cf5d369dc18c6c70f" },
+ { 1407, "3224a06639db70212a0cd1ae1fcc570a" },
+ { 1408, "a9e13335612c0356f5e2c27086e86c43" },
+ { 1409, "a86d1f37d1ed8a3552e9a4f04dceea98" },
+ { 2048, "396905c9b961cd0f6152abfb69c4449c" },
+ { 2049, "49f39bff85d9dcf059fadb89efc4a70f" },
+ { 2050, "3a2b4823bc4d0415656550226a63e34a" },
+ { 2051, "dec60580d406c782540f398ad0bcc7e0" },
+ { 2052, "32f76610a14310309eb748fe025081bf" },
+ { 3072, "45edc1a42bf9d708a621076b63b774da" },
+ { 3074, "9be1b333fe7c0c9f835fb369dc45f778" },
+ { 3075, "8c06fcac7bd0e7b7a17fd6508c09a549" },
+ { 4048, "0ddaef848184bf0ad98507a10f1e90e4" },
+ { 4052, "81976bcaeb274223983996c137875cb8" },
+ { 4058, "0b0a7a1c82bc7cbc64d8b7cd2dc2bb22" },
+ { 6144, "1c24056f52725ede2dff0d7f9fc9855f" },
+ { 6150, "b7f4b65681c4e43ee68ca466ca9ca4ec" },
+ { 6400, "443bbaab9f7331ddd4bf11b659cd43c8" },
+ { 6528, "216f44f23047cfee03a7a64f88f9a995" },
+ { 8192, "ac7a993b2cad54879dba1bde63e39097" },
+ { 8320, "55ed7be9682d6c0025b3221a62088d08" },
+ {16384, "c6c722087653b62007aea668277175e5" },
+ {18432, "f1faca8e907872c809e14ffbd85792d6" },
+ { 0, NULL },
+};
+
+/* HASH-MD5
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * repeated "size" times
+ */
+static MV_CESA_SIZE_TEST mdMultiSizeTest305[] =
+{
+ { 80, "57edf4a22be3c955ac49da2e2107b67a" },
+ { 512, "c729ae8f0736cc377a9767a660eaa04e" },
+ { 1000, "f1257a8659eb92d36fe14c6bf3852a6a" },
+ { 1001, "f8a46fe8ea04fdc8c7de0e84042d3878" },
+ { 1002, "da188dd67bff87d58aa3c02af2d0cc0f" },
+ { 1003, "961753017feee04c9b93a8e51658a829" },
+ { 1004, "dd68c4338608dcc87807a711636bf2af" },
+ { 1005, "e338d567d3ce66bf69ada29658a8759b" },
+ { 1006, "443c9811e8b92599b0b149e8d7ec700a" },
+ { 1336, "89a98511706008ba4cbd0b4a24fa5646" },
+ { 1344, "335a919805f370b9e402a62c6fe01739" },
+ { 1399, "5d18d0eddcd84212fe28d812b5e80e3b" },
+ { 1400, "6b695c240d2dffd0dffc99459ca76db6" },
+ { 1401, "49590f61298a76719bc93a57a30136f5" },
+ { 1402, "94c2999fa3ef1910a683d69b2b8476f2" },
+ { 1403, "37073a02ab00ecba2645c57c228860db" },
+ { 1404, "1bcd06994fce28b624f0c5fdc2dcdd2b" },
+ { 1405, "11b93671a64c95079e8cf9e7cddc8b3d" },
+ { 1406, "4b6695772a4c66313fa4871017d05f36" },
+ { 1407, "d1539b97fbfda1c075624e958de19c5b" },
+ { 1408, "b801b9b69920907cd018e8063092ede9" },
+ { 1409, "b765f1406cfe78e238273ed01bbcaf7e" },
+ { 2048, "1d7e2c64ac29e2b3fb4c272844ed31f5" },
+ { 2049, "71d38fac49c6b1f4478d8d88447bcdd0" },
+ { 2050, "141c34a5592b1bebfa731e0b23d0cdba" },
+ { 2051, "c5e1853f21c59f5d6039bd13d4b380d8" },
+ { 2052, "dd44a0d128b63d4b5cccd967906472d7" },
+ { 3072, "37d158e33b21390822739d13db7b87fe" },
+ { 3074, "aef3b209d01d39d0597fe03634bbf441" },
+ { 3075, "335ffb428eabf210bada96d74d5a4012" },
+ { 4048, "2434c2b43d798d2819487a886261fc64" },
+ { 4052, "ac2fa84a8a33065b2e92e36432e861f8" },
+ { 4058, "856781f85616c341c3533d090c1e1e84" },
+ { 6144, "e5d134c652c18bf19833e115f7a82e9b" },
+ { 6150, "a09a353be7795fac2401dac5601872e6" },
+ { 6400, "08b9033ac6a1821398f50af75a2dbc83" },
+ { 6528, "3d47aa193a8540c091e7e02f779e6751" },
+ { 8192, "d3164e710c0626f6f395b38f20141cb7" },
+ { 8320, "b727589d9183ff4e8491dd24466974a3" },
+ {16384, "3f54d970793d2274d5b20d10a69938ac" },
+ {18432, "f558511dcf81985b7a1bb57fad970531" },
+ { 0, NULL },
+};
+
+
+/* Key = 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ * 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa
+ * 0xaa, 0xaa, 0xaa, 0xaa
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST shaMultiSizeTest402[] =
+{
+ { 80, "e812f370e659705a1649940d1f78cd7af18affd3" },
+ { 512, "e547f886b2c15d995ed76a8a924cb408c8080f66" },
+ { 1000, "239443194409f1a5342ecde1a092c8f3a3ed790a" },
+ { 1001, "f278ab9a102850a9f48dc4e9e6822afe2d0c52b5" },
+ { 1002, "8bcc667df5ab6ece988b3af361d09747c77f4e72" },
+ { 1003, "0fae6046c7dc1d3e356b25af836f6077a363f338" },
+ { 1004, "0ea48401cc92ae6bc92ae76685269cb0167fbe1a" },
+ { 1005, "ecbcd7c879b295bafcd8766cbeac58cc371e31d1" },
+ { 1006, "eb4a4a3d07d1e9a15e6f1ab8a9c47f243e27324c" },
+ { 1336, "f5950ee1d77c10e9011d2149699c9366fe52529c" },
+ { 1344, "b04263604a63c351b0b3b9cf1785b4bdba6c8838" },
+ { 1399, "8cb1cff61d5b784045974a2fc69386e3b8d24218" },
+ { 1400, "9bb2f3fcbeddb2b90f0be797cd647334a2816d51" },
+ { 1401, "23ae462a7a0cb440f7445791079a5d75a535dd33" },
+ { 1402, "832974b524a4d3f9cc2f45a3cabf5ccef65cd2aa" },
+ { 1403, "d1c683742fe404c3c20d5704a5430e7832a7ec95" },
+ { 1404, "867c79042e64f310628e219d8b85594cd0c7adc3" },
+ { 1405, "c9d81d49d13d94358f56ccfd61af02b36c69f7c3" },
+ { 1406, "0df43daab2786172f9b8d07d61f14a070cf1287a" },
+ { 1407, "0fd8f3ad7f169534b274d4c66bbddd89f759e391" },
+ { 1408, "3987511182b18473a564436003139b808fa46343" },
+ { 1409, "ef667e063c9e9f539a8987a8d0bd3066ee85d901" },
+ { 2048, "921109c99f3fedaca21727156d5f2b4460175327" },
+ { 2049, "47188600dd165eb45f27c27196d3c46f4f042c1b" },
+ { 2050, "8831939904009338de10e7fa670847041387807d" },
+ { 2051, "2f8ebb5db2997d614e767be1050366f3641e7520" },
+ { 2052, "669e51cd730dae158d3bef8adba075bd95a0d011" },
+ { 3072, "cfee66cfd83abc8451af3c96c6b35a41cc6c55f5" },
+ { 3074, "216ea26f02976a261b7d21a4dd3085157bedfabd" },
+ { 3075, "bd612ebba021fd8e012b14c3bd60c8c5161fabc0" },
+ { 4048, "c2564c1fdf2d5e9d7dde7aace2643428e90662e8" },
+ { 4052, "91ce61fe924b445dfe7b5a1dcd10a27caec16df6" },
+ { 4058, "db2a9be5ee8124f091c7ebd699266c5de223c164" },
+ { 6144, "855109903feae2ba3a7a05a326b8a171116eb368" },
+ { 6150, "37520bb3a668294d9c7b073e7e3daf8fee248a78" },
+ { 6400, "60a353c841b6d2b1a05890349dad2fa33c7536b7" },
+ { 6528, "9e53a43a69bb42d7c8522ca8bd632e421d5edb36" },
+ { 8192, "a918cb0da862eaea0a33ee0efea50243e6b4927c" },
+ { 8320, "29a5dcf55d1db29cd113fcf0572ae414f1c71329" },
+ {16384, "6fb27966138e0c8d5a0d65ace817ebd53633cee1" },
+ {18432, "ca09900d891c7c9ae2a559b10f63a217003341c1" },
+ { 0, NULL },
+};
+
+/* Key = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ * 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * 0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST shaMultiSizeTest404[] =
+{
+ { 80, "beaf20a34b06a87558d156c0949bc3957d40222e" },
+ { 512, "3353955358d886bc2940a3c7f337ff7dafb59c7b" },
+ { 1000, "8737a542c5e9b2b6244b757ebb69d5bd602a829f" },
+ { 1001, "fd9e7582d8a5d3c9fe3b923e4e6a41b07a1eb4d4" },
+ { 1002, "a146d14a6fc3c274ff600568f4d75b977989e00d" },
+ { 1003, "be22601bbc027ddef2dec97d30b3dc424fd803c5" },
+ { 1004, "3e71fe99b2fe2b7bfdf4dbf0c7f3da25d7ea35e7" },
+ { 1005, "2c422735d7295408fddd76f5e8a83a2a8da13df3" },
+ { 1006, "6d875319049314b61855101a647b9ba3313428e6" },
+ { 1336, "c1631ea80bad9dc43a180712461b65a0598c711c" },
+ { 1344, "816069bf91d34581005746e2e0283d0f9c7b7605" },
+ { 1399, "4e139866dc61cfcb8b67ca2ebd637b3a538593af" },
+ { 1400, "ff2a0f8dd2b02c5417910f6f55d33a78e081a723" },
+ { 1401, "ab00c12be62336964cbce31ae97fe2a0002984d5" },
+ { 1402, "61349e7f999f3a1acc56c3e9a5060a9c4a7b05b6" },
+ { 1403, "3edbc0f61e435bc1317fa27d840076093fb79353" },
+ { 1404, "d052c6dfdbe63d45dab23ef9893e2aa4636aca1e" },
+ { 1405, "0cc16b7388d67bf0add15a31e6e6c753cfae4987" },
+ { 1406, "c96ba7eaad74253c38c22101b558d2850b1d1b90" },
+ { 1407, "3445428a40d2c6556e7c55797ad8d323b61a48d9" },
+ { 1408, "8d6444f937a09317c89834187b8ea9b8d3a8c56b" },
+ { 1409, "c700acd3ecd19014ea2bdb4d42510c467e088475" },
+ { 2048, "ee27d2a0cb77470c2f496212dfd68b5bb7b04e4b" },
+ { 2049, "683762d7a02983b26a6d046e6451d9cd82c25932" },
+ { 2050, "0fd20f1d55a9ee18363c2a6fd54aa13aee69992f" },
+ { 2051, "86c267d8cc4bc8d59090e4f8b303da960fd228b7" },
+ { 2052, "452395ae05b3ec503eea34f86fc0832485ad97c1" },
+ { 3072, "75198e3cfd0b9bcff2dabdf8e38e6fdaa33ca49a" },
+ { 3074, "4e24785ef080141ce4aab4675986d9acea624d7c" },
+ { 3075, "3a20c5978dd637ec0e809bf84f0d9ccf30bc65bf" },
+ { 4048, "3c32da256be7a7554922bf5fed51b0d2d09e59ad" },
+ { 4052, "fff898426ea16e54325ae391a32c6c9bce4c23c0" },
+ { 4058, "c800b9e562e1c91e1310116341a3c91d37f848ec" },
+ { 6144, "d91d509d0cc4376c2d05bf9a5097717a373530e6" },
+ { 6150, "d957030e0f13c5df07d9eec298542d8f94a07f12" },
+ { 6400, "bb745313c3d7dc17b3f955e5534ad500a1082613" },
+ { 6528, "77905f80d9ca82080bbb3e5654896dabfcfd1bdb" },
+ { 8192, "5237fd9a81830c974396f99f32047586612ff3c0" },
+ { 8320, "57668e28d5f2dba0839518a11db0f6af3d7e08bf" },
+ {16384, "62e093fde467f0748087beea32e9af97d5c61241" },
+ {18432, "845fb33130c7d6ea554fd5aacb9c50cf7ccb5929" },
+ { 0, NULL },
+};
+
+/* HASH-SHA1
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * repeated "size" times
+ */
+static MV_CESA_SIZE_TEST shaMultiSizeTest405[] =
+{
+ { 80, "50abf5706a150990a08b2c5ea40fa0e585554732" },
+ { 512, "f14516a08948fa27917a974d219741a697ba0087" },
+ { 1000, "0bd18c378d5788817eb4f1e5dc07d867efa5cbf4" },
+ { 1001, "ca29b85c35db1b8aef83c977893a11159d1b7aa2" },
+ { 1002, "d83bc973eaaedb8a31437994dabbb3304b0be086" },
+ { 1003, "2cf7bbef0acd6c00536b5c58ca470df9a3a90b6c" },
+ { 1004, "e4375d09b1223385a8a393066f8209acfd936a80" },
+ { 1005, "1029b38043e027745d019ce1d2d68e3d8b9d8f99" },
+ { 1006, "deea16dcebbd8ac137e2b984deb639b9fb5e9680" },
+ { 1336, "ea031b065fff63dcfb6a41956e4777520cdbc55d" },
+ { 1344, "b52096c6445e6c0a8355995c70dc36ae186c863c" },
+ { 1399, "cde2f6f8379870db4b32cf17471dc828a8dbff2b" },
+ { 1400, "e53ff664064bc09fe5054c650806bd42d8179518" },
+ { 1401, "d1156db5ddafcace64cdb510ff0d4af9b9a8ad64" },
+ { 1402, "34ede0e9a909dd84a2ae291539105c0507b958e1" },
+ { 1403, "a772ca3536da77e6ad3251e4f9e1234a4d7b87c0" },
+ { 1404, "29740fd2b04e7a8bfd32242db6233156ad699948" },
+ { 1405, "65b17397495b70ce4865dad93bf991b74c97cce1" },
+ { 1406, "a7ee89cd0754061fdb91af7ea6abad2c69d542e3" },
+ { 1407, "3eebf82f7420188e23d328b7ce93580b279a5715" },
+ { 1408, "e08d3363a8b9a490dfb3a4c453452b8f114deeec" },
+ { 1409, "95d74df739181a4ff30b8c39e28793a36598e924" },
+ { 2048, "aa40262509c2abf84aab0197f83187fc90056d91" },
+ { 2049, "7dec28ef105bc313bade8d9a7cdeac58b99de5ea" },
+ { 2050, "d2e30f77ec81197de20f56588a156094ecb88450" },
+ { 2051, "6b22ccc874833e96551a39da0c0edcaa0d969d92" },
+ { 2052, "f843141e57875cd669af58744bc60aa9ea59549c" },
+ { 3072, "09c5fedeaa62c132e673cc3c608a00142273d086" },
+ { 3074, "b09e95eea9c7b1b007a58accec488301901a7f3d" },
+ { 3075, "e6226b77b4ada287a8c9bbcf4ed71eec5ce632dc" },
+ { 4048, "e99394894f855821951ddddf5bfc628547435f5c" },
+ { 4052, "32d2f1af38be9cfba6cd03d55a254d0b3e1eb382" },
+ { 4058, "d906552a4f2aca3a22e1fecccbcd183d7289d0ef" },
+ { 6144, "2e7f62d35a860988e1224dc0543204af19316041" },
+ { 6150, "d6b89698ee133df46fec9d552fadc328aa5a1b51" },
+ { 6400, "dff50e90c46853988fa3a4b4ce5dda6945aae976" },
+ { 6528, "9e63ec0430b96db02d38bc78357a2f63de2ab7f8" },
+ { 8192, "971eb71ed60394d5ab5abb12e88420bdd41b5992" },
+ { 8320, "91606a31b46afeaac965cecf87297e791b211013" },
+ {16384, "547f830a5ec1f5f170ce818f156b1002cabc7569" },
+ {18432, "f16f272787f3b8d539652e4dc315af6ab4fda0ef" },
+ { 0, NULL },
+};
+
+/* CryptoKey = 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef;
+ * MacKey = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ * 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST tripleDesMdMultiSizeTest502[] =
+{
+ { 64, "9586962a2aaaef28803dec2e17807a7f" },
+ { 80, "b7726a03aad490bd6c5a452a89a1b271" },
+ { 352, "f1ed9563aecc3c0d2766eb2bed3b4e4c" },
+ { 512, "0f9decb11ab40fe86f4d4d9397bc020e" },
+ { 1000, "3ba69deac12cab8ff9dff7dbd9669927" },
+ { 1336, "6cf47bf1e80e03e2c1d0945bc50d37d2" },
+ { 1344, "4be388dab21ceb3fa1b8d302e9b821f7" },
+ { 1400, "a58b79fb21dd9bfc6ec93e3b99fb0ef1" },
+ { 1408, "8bc97379fc2ac3237effcdd4f7a86528" },
+ { 2048, "1339f03ab3076f25a20bc4cba16eb5bf" },
+ { 3072, "731204d2d90c4b36ae41f5e1fb874288" },
+ { 4048, "c028d998cfda5642547b7e1ed5ea16e4" },
+ { 6144, "b1b19cd910cc51bd22992f1e59f1e068" },
+ { 6400, "44e4613496ba622deb0e7cb768135a2f" },
+ { 6528, "3b06b0a86f8db9cd67f9448dfcf10549" },
+ { 8192, "d581780b7163138a0f412be681457d82" },
+ {16384, "03b8ac05527faaf1bed03df149c65ccf" },
+ {18432, "677c8a86a41dab6c5d81b85b8fb10ff6" },
+ { 0, NULL },
+};
+
+
+/* CryptoKey = 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef;
+ * MacKey = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ * 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * 0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST tripleDesShaMultiSizeTest503[] =
+{
+ { 64, "44a1e9bcbfc1429630d9ea68b7a48b0427a684f2" },
+ { 80, "b2ddeaca91030eab5b95a234ef2c0f6e738ff883" },
+ { 352, "4b91864c7ff629bdff75d9726421f76705452aaf" },
+ { 512, "6dd37faceeb2aa98ba74f4242ed6734a4d546af5" },
+ { 1000, "463661c30300be512a9df40904f0757cde5f1141" },
+ { 1336, "b931f831d9034fe59c65176400b039fe9c1f44a5" },
+ { 1344, "af8866b1cd4a4887d6185bfe72470ffdfb3648e1" },
+ { 1400, "49c6caf07296d5e31d2504d088bc5b20c3ee7cdb" },
+ { 1408, "fcae8deedbc6ebf0763575dc7e9de075b448a0f4" },
+ { 2048, "edece5012146c1faa0dd10f50b183ba5d2af58ac" },
+ { 3072, "5b83625adb43a488b8d64fecf39bb766818547b7" },
+ { 4048, "d2c533678d26c970293af60f14c8279dc708bfc9" },
+ { 6144, "b8f67af4f991b08b725f969b049ebf813bfacc5c" },
+ { 6400, "d9a6c7f746ac7a60ef2edbed2841cf851c25cfb0" },
+ { 6528, "376792b8c8d18161d15579fb7829e6e3a27e9946" },
+ { 8192, "d890eabdca195b34ef8724b28360cffa92ae5655" },
+ {16384, "a167ee52639ec7bf19aee9c6e8f76667c14134b9" },
+ {18432, "e4396ab56f67296b220985a12078f4a0e365d2cc" },
+ { 0, NULL },
+};
+
+/* CryptoKey = 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef
+ * IV = 0x12345678, 0x90abcdef
+ * MacKey = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ * 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST cbc3desMdMultiSizeTest504[] =
+{
+ { 64, "8d10e00802460ede0058c139ba48bd2d" },
+ { 80, "6f463057e1a90e0e91ae505b527bcec0" },
+ { 352, "4938d48bdf86aece2c6851e7c6079788" },
+ { 512, "516705d59f3cf810ebf2a13a23a7d42e" },
+ { 1000, "a5a000ee5c830e67ddc6a2d2e5644b31" },
+ { 1336, "44af60087b74ed07950088efbe3b126a" },
+ { 1344, "1f5b39e0577920af731dabbfcf6dfc2a" },
+ { 1400, "6804ea640e29b9cd39e08bc37dbce734" },
+ { 1408, "4fb436624b02516fc9d1535466574bf9" },
+ { 2048, "c909b0985c423d8d86719f701e9e83db" },
+ { 3072, "cfe0bc34ef97213ee3d3f8b10122db21" },
+ { 4048, "03ea10b5ae4ddeb20aed6af373082ed1" },
+ { 6144, "b9a0ff4f87fc14b3c2dc6f0ed0998fdf" },
+ { 6400, "6995f85d9d4985dd99e974ec7dda9dd6" },
+ { 6528, "bbbb548ce2fa3d58467f6a6a5168a0e6" },
+ { 8192, "afe101fbe745bb449ae4f50d10801456" },
+ {16384, "9741706d0b1c923340c4660ff97cacdf" },
+ {18432, "b0217becb73cb8f61fd79c7ce9d023fb" },
+ { 0, NULL },
+};
+
+
+/* CryptoKey = 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef;
+ * IV = 0x12345678, 0x90abcdef
+ * MacKey = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ * 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * 0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST cbc3desShaMultiSizeTest505[] =
+{
+ { 64, "409187e5bdb0be4a7754ca3747f7433dc4f01b98" },
+ { 80, "1b002ed050be743aa98860cf35659646bb8efcc0" },
+ { 352, "6cbf7ebe50fa4fa6eecc19eca23f9eae553ccfff" },
+ { 512, "cfb5253fb4bf72b743320c30c7e48c54965853b0" },
+ { 1000, "95e04e1ca2937e7c5a9aba9e42d2bcdb8a7af21f" },
+ { 1336, "3b5c1f5eee5837ebf67b83ae01405542d77a6627" },
+ { 1344, "2b3d42ab25615437f98a1ee310b81d07a02badc2" },
+ { 1400, "7f8687df7c1af44e4baf3c934b6cca5ab6bc993e" },
+ { 1408, "473a581c5f04f7527d50793c845471ac87e86430" },
+ { 2048, "e41d20cae7ebe34e6e828ed62b1e5734019037bb" },
+ { 3072, "275664afd7a561d804e6b0d204e53939cde653ae" },
+ { 4048, "0d220cc5b34aeeb46bbbd637dde6290b5a8285a3" },
+ { 6144, "cb393ddcc8b1c206060625b7d822ef9839e67bc5" },
+ { 6400, "dd3317e2a627fc04800f74a4b05bfda00fab0347" },
+ { 6528, "8a74c3b2441ab3f5a7e08895cc432566219a7c41" },
+ { 8192, "b8e6ef3a549ed0e005bd5b8b1a5fe6689e9711a7" },
+ {16384, "55f59404008276cdac0e2ba0d193af2d40eac5ce" },
+ {18432, "86ae6c4fc72369a54cce39938e2d0296cd9c6ec5" },
+ { 0, NULL },
+};
+
+
+/* CryptoKey = 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef
+ * IV = 0x12345678, 0x90abcdef
+ * MacKey = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ * 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to AES block size (16 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST cbcAes128md5multiSizeTest506[] =
+{
+ { 16, "7ca4c2ba866751598720c5c4aa0d6786" },
+ { 64, "7dba7fb988e80da609b1fea7254bced8" },
+ { 80, "6b6e863ac5a71d15e3e9b1c86c9ba05f" },
+ { 352, "a1ceb9c2e3021002400d525187a9f38c" },
+ { 512, "596c055c1c55db748379223164075641" },
+ { 1008, "f920989c02f3b3603f53c99d89492377" },
+ { 1344, "2e496b73759d77ed32ea222dbd2e7b41" },
+ { 1408, "7178c046b3a8d772efdb6a71c4991ea4" },
+ { 2048, "a917f0099c69eb94079a8421714b6aad" },
+ { 3072, "693cd5033d7f5391d3c958519fa9e934" },
+ { 4048, "139dca91bcff65b3c40771749052906b" },
+ { 6144, "428d9cef6df4fb70a6e9b6bbe4819e55" },
+ { 6400, "9c0b909e76daa811e12b1fc17000a0c4" },
+ { 6528, "ad876f6297186a7be1f1b907ed860eda" },
+ { 8192, "479cbbaca37dd3191ea1f3e8134a0ef4" },
+ {16384, "60fda559c74f91df538100c9842f2f15" },
+ {18432, "4a3eb1cba1fa45f3981270953f720c42" },
+ { 0, NULL },
+};
+
+
+/* CryptoKey = 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef,
+ * 0x01234567, 0x89abcdef;
+ * IV = 0x12345678, 0x90abcdef
+ * MacKey = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ * 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * 0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to AES block size (16 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST cbcAes128sha1multiSizeTest507[] =
+{
+ { 16, "9aa8dc1c45f0946daf78057fa978759c625c1fee" },
+ { 64, "9f588fc1ede851e5f8b20256abc9979465ae2189" },
+ { 80, "13558472d1fc1c90dffec6e5136c7203452d509b" },
+ { 352, "6b93518e006cfaa1f7adb24615e7291fb0a27e06" },
+ { 512, "096874951a77fbbf333e49d80c096ee2016e09bd" },
+ { 1008, "696fc203c2e4b5ae0ec5d1db3f623c490bc6dbac" },
+ { 1344, "79bf77509935ccd3528caaac6a5eb6481f74029b" },
+ { 1408, "627f9462b95fc188e8cfa7eec15119bdc5d4fcf1" },
+ { 2048, "3d50d0c005feba92fe41502d609fced9c882b4d1" },
+ { 3072, "758807e5b983e3a91c06fb218fe0f73f77111e94" },
+ { 4048, "ca90e85242e33f005da3504416a52098d0d31fb2" },
+ { 6144, "8044c1d4fd06642dfc46990b4f18b61ef1e972cf" },
+ { 6400, "166f1f4ea57409f04feba9fb1e39af0e00bd6f43" },
+ { 6528, "0389016a39485d6e330f8b4215ddf718b404f7e9" },
+ { 8192, "6df7ee2a8b61d6f7f860ce8dbf778f0c2a5b508b" },
+ {16384, "a70a6d8dfa1f91ded621c3dbaed34162bc48783f" },
+ {18432, "8dfad627922ce15df1eed10bdbed49244efa57db" },
+ { 0, NULL },
+};
+
+
+void cesaTestPrintStatus(void);
+
+
+/*------------------------- LOCAL FUNCTIONs ---------------------------------*/
+MV_STATUS testCmd(int sid, int iter, MV_CESA_COMMAND* pCmd,
+ MV_CESA_TEST_SESSION* pTestSession, MV_U8* pIV, int ivSize);
+MV_STATUS testClose(int idx);
+MV_STATUS testOpen(int idx);
+void close_session(int sid);
+void cesaTestCheckReady(const MV_CESA_RESULT *r);
+void cesaCheckReady(MV_CESA_RESULT* r);
+void printTestResults(int idx, MV_STATUS status, int checkMode);
+void cesaLastResult(void);
+void cesaTestPrintReq(int req, int offset, int size);
+
+void cesaTestPrintStatus(void);
+void cesaTestPrintSession(int idx);
+void sizeTest(int testIdx, int iter, int checkMode);
+void multiTest(int iter, int reqSize, int checkMode);
+void oneTest(int testIdx, int caseIdx,int iter, int reqSize, int checkMode);
+void multiSizeTest(int idx, int iter, int checkMode, char* inputData);
+void cesaTest(int iter, int reqSize, int checkMode);
+void cesaOneTest(int testIdx, int caseIdx,int iter, int reqSize, int checkMode);
+void combiTest(int iter, int reqSize, int checkMode);
+void shaTest(int iter, int reqSize, int checkMode);
+void mdTest(int iter, int reqSize, int checkMode);
+void aesTest(int iter, int reqSize, int checkMode);
+void tripleDesTest(int iter, int reqSize, int checkMode);
+void desTest(int iter, int reqSize, int checkMode);
+void cesaTestStop(void);
+MV_STATUS testRun(int idx, int caseIdx, int iter,int reqSize, int checkMode);
+void cesaTestStart(int bufNum, int bufSize);
+
+
+static MV_U32 getRate(MV_U32* remainder)
+{
+ MV_U32 kBits, milliSec, rate;
+
+ milliSec = 0;
+ if( (cesaEndTicks - cesaBeginTicks) > 0)
+ {
+ milliSec = CESA_TEST_TICK_TO_MS(cesaEndTicks - cesaBeginTicks);
+ }
+ if(milliSec == 0)
+ {
+ if(remainder != NULL)
+ *remainder = 0;
+ return 0;
+ }
+
+ kBits = (cesaIteration*cesaRateSize*8)/1000;
+ rate = kBits/milliSec;
+ if(remainder != NULL)
+ *remainder = ((kBits % milliSec)*10)/milliSec;
+
+ return rate;
+}
+
+static char* extractMbuf(MV_CESA_MBUF *pMbuf,
+ int offset, int size, char* hexStr)
+{
+ mvCesaCopyFromMbuf((MV_U8*)cesaBinBuffer, pMbuf, offset, size);
+ mvBinToHex((const MV_U8*)cesaBinBuffer, hexStr, size);
+
+ return hexStr;
+}
+
+static MV_BOOL cesaCheckMbuf(MV_CESA_MBUF *pMbuf,
+ const char* hexString, int offset,
+ int checkSize)
+{
+ MV_BOOL isFailed = MV_FALSE;
+ MV_STATUS status;
+ int size = strlen(hexString)/2;
+ int checkedSize = 0;
+/*
+ mvOsPrintf("cesaCheckMbuf: pMbuf=%p, offset=%d, checkSize=%d, mBufSize=%d\n",
+ pMbuf, offset, checkSize, pMbuf->mbufSize);
+*/
+ if(pMbuf->mbufSize < (checkSize + offset))
+ {
+ mvOsPrintf("checkSize (%d) is too large: offset=%d, mbufSize=%d\n",
+ checkSize, offset, pMbuf->mbufSize);
+ return MV_TRUE;
+ }
+ status = mvCesaCopyFromMbuf((MV_U8*)cesaBinBuffer, pMbuf, offset, checkSize);
+ if(status != MV_OK)
+ {
+ mvOsPrintf("CesaTest: Can't copy %d bytes from Mbuf=%p to checkBuf=%p\n",
+ checkSize, pMbuf, cesaBinBuffer);
+ return MV_TRUE;
+ }
+/*
+ mvDebugMemDump(cesaBinBuffer, size, 1);
+*/
+ mvHexToBin(hexString, (MV_U8*)cesaExpBinBuffer, size);
+
+ /* Compare buffers */
+ while(checkSize > checkedSize)
+ {
+ size = MV_MIN(size, (checkSize - checkedSize));
+ if(memcmp(cesaExpBinBuffer, &cesaBinBuffer[checkedSize], size) != 0)
+ {
+ mvOsPrintf("CheckMbuf failed: checkSize=%d, size=%d, checkedSize=%d\n",
+ checkSize, size, checkedSize);
+ mvDebugMemDump(&cesaBinBuffer[checkedSize], size, 1);
+ mvDebugMemDump(cesaExpBinBuffer, size, 1);
+
+ isFailed = MV_TRUE;
+ break;
+ }
+ checkedSize += size;
+ }
+
+ return isFailed;
+}
+
+static MV_STATUS cesaSetMbuf(MV_CESA_MBUF *pMbuf,
+ const char* hexString,
+ int offset, int reqSize)
+{
+ MV_STATUS status = MV_OK;
+ int copySize, size = strlen(hexString)/2;
+
+ mvHexToBin(hexString, (MV_U8*)cesaBinBuffer, size);
+
+ copySize = 0;
+ while(reqSize > copySize)
+ {
+ size = MV_MIN(size, (reqSize - copySize));
+
+ status = mvCesaCopyToMbuf((MV_U8*)cesaBinBuffer, pMbuf, offset+copySize, size);
+ if(status != MV_OK)
+ {
+ mvOsPrintf("cesaSetMbuf Error: Copy %d of %d bytes to MBuf\n",
+ copySize, reqSize);
+ break;
+ }
+ copySize += size;
+ }
+ pMbuf->mbufSize = offset+copySize;
+ return status;
+}
+
+static MV_CESA_TEST_SESSION* getTestSessionDb(int idx, int* pTestIdx)
+{
+ int testIdx, dbIdx = idx/100;
+
+ if(dbIdx > MAX_TEST_TYPE)
+ {
+ mvOsPrintf("Wrong index %d - No such test type\n", idx);
+ return NULL;
+ }
+ testIdx = idx % 100;
+
+ if(testIdx >= cesaTestsDB[dbIdx].numSessions)
+ {
+ mvOsPrintf("Wrong index %d - No such test\n", idx);
+ return NULL;
+ }
+ if(pTestIdx != NULL)
+ *pTestIdx = testIdx;
+
+ return cesaTestsDB[dbIdx].pSessions;
+}
+
+/* Debug */
+void cesaTestPrintReq(int req, int offset, int size)
+{
+ MV_CESA_MBUF* pMbuf;
+
+ mvOsPrintf("cesaTestPrintReq: req=%d, offset=%d, size=%d\n",
+ req, offset, size);
+ mvDebugMemDump(cesaCmdRing, 128, 4);
+
+ pMbuf = cesaCmdRing[req].pSrc;
+ mvCesaDebugMbuf("src", pMbuf, offset,size);
+ pMbuf = cesaCmdRing[req].pDst;
+ mvCesaDebugMbuf("dst", pMbuf, offset, size);
+
+ cesaTestPrintStatus();
+}
+
+void cesaLastResult(void)
+{
+ mvOsPrintf("Last Result: ReqId = %d, SessionId = %d, rc = (%d)\n",
+ (MV_U32)cesaResult.pReqPrv, cesaResult.sessionId,
+ cesaResult.retCode);
+}
+
+void printTestResults(int idx, MV_STATUS status, int checkMode)
+{
+ int testIdx;
+ MV_CESA_TEST_SESSION* pTestSessions = getTestSessionDb(idx, &testIdx);
+
+ if(pTestSessions == NULL)
+ return;
+
+ mvOsPrintf("%-35s %4dx%-4d : ", pTestSessions[testIdx].name,
+ cesaIteration, cesaReqSize);
+ if( (status == MV_OK) &&
+ (cesaCryptoError == 0) &&
+ (cesaError == 0) &&
+ (cesaReqIdError == 0) )
+ {
+ mvOsPrintf("Passed, Rate=%3u.%u Mbps (%5u cpp)\n",
+ cesaRate, cesaRateAfterDot, cesaEndTicks - cesaBeginTicks);
+ }
+ else
+ {
+ mvOsPrintf("Failed, Status = 0x%x\n", status);
+ if(cesaCryptoError > 0)
+ mvOsPrintf("cryptoError : %d\n", cesaCryptoError);
+ if(cesaReqIdError > 0)
+ mvOsPrintf("reqIdError : %d\n", cesaReqIdError);
+ if(cesaError > 0)
+ mvOsPrintf("cesaError : %d\n", cesaError);
+ }
+ if(cesaTestIsrMissCount > 0)
+ mvOsPrintf("cesaIsrMissed : %d\n", cesaTestIsrMissCount);
+}
+
+void cesaCheckReady(MV_CESA_RESULT* r)
+{
+ int reqId;
+ MV_CESA_MBUF *pMbuf;
+ MV_BOOL isFailed;
+
+ cesaResult = *r;
+ reqId = (int)cesaResult.pReqPrv;
+ pMbuf = cesaCmdRing[reqId].pDst;
+
+/*
+ mvOsPrintf("cesaCheckReady: reqId=%d, checkOffset=%d, checkSize=%d\n",
+ reqId, cesaCheckOffset, cesaCheckSize);
+*/
+ /* Check expected reqId */
+ if(reqId != cesaExpReqId)
+ {
+ cesaReqIdError++;
+/*
+ mvOsPrintf("CESA reqId Error: cbIter=%d (%d), reqId=%d, expReqId=%d\n",
+ cesaCbIter, cesaIteration, reqId, cesaExpReqId);
+*/
+ }
+ else
+ {
+ if( (cesaCheckMode == CESA_FULL_CHECK_MODE) ||
+ (cesaCheckMode == CESA_FAST_CHECK_MODE) )
+ {
+ if(cesaResult.retCode != MV_OK)
+ {
+ cesaError++;
+
+ mvOsPrintf("CESA Error: cbIter=%d (%d), reqId=%d, rc=%d\n",
+ cesaCbIter, cesaIteration, reqId, cesaResult.retCode);
+ }
+ else
+ {
+ if( (cesaCheckSize > 0) && (cesaOutputHexStr != NULL) )
+ {
+ /* Check expected output */
+
+ isFailed = cesaCheckMbuf(pMbuf, cesaOutputHexStr, cesaCheckOffset, cesaCheckSize);
+ if(isFailed)
+ {
+ mvOsPrintf("CESA Crypto Error: cbIter=%d (%d), reqId=%d\n",
+ cesaCbIter, cesaIteration, reqId);
+
+ CESA_TEST_DEBUG_PRINT(("Error: reqId=%d, reqSize=%d, checkOffset=%d, checkSize=%d\n",
+ reqId, cesaReqSize, cesaCheckOffset, cesaCheckSize));
+
+ CESA_TEST_DEBUG_PRINT(("Output str: %s\n", cesaOutputHexStr));
+
+ CESA_TEST_DEBUG_CODE( mvCesaDebugMbuf("error", pMbuf, 0, cesaCheckOffset+cesaCheckSize) );
+
+ cesaCryptoError++;
+ }
+ }
+ }
+ }
+ }
+ if(cesaCheckMode == CESA_SHOW_CHECK_MODE)
+ {
+ extractMbuf(pMbuf, cesaCheckOffset, cesaCheckSize, cesaHexBuffer);
+ mvOsPrintf("%4d, %s\n", cesaCheckOffset, cesaHexBuffer);
+ }
+
+ cesaCbIter++;
+ if(cesaCbIter >= cesaIteration)
+ {
+ cesaCbIter = 0;
+ cesaExpReqId = 0;
+ cesaIsReady = MV_TRUE;
+
+ cesaEndTicks = CESA_TEST_TICK_GET();
+ cesaRate = getRate(&cesaRateAfterDot);
+ }
+ else
+ {
+ cesaExpReqId = reqId + 1;
+ if(cesaExpReqId == CESA_DEF_REQ_SIZE)
+ cesaExpReqId = 0;
+ }
+}
+
+
+#ifdef MV_NETBSD
+static int cesaTestReadyIsr(void *arg)
+#else
+#ifdef __KERNEL__
+static irqreturn_t cesaTestReadyIsr( int irq , void *dev_id)
+#endif
+#ifdef MV_VXWORKS
+void cesaTestReadyIsr(void)
+#endif
+#endif
+{
+ MV_U32 cause;
+ MV_STATUS status;
+ MV_CESA_RESULT result;
+
+ cesaTestIsrCount++;
+ /* Clear cause register */
+ cause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+ if( (cause & MV_CESA_CAUSE_ACC_DMA_ALL_MASK) == 0)
+ {
+ mvOsPrintf("cesaTestReadyIsr: cause=0x%x\n", cause);
+#ifdef MV_NETBSD
+ return 0;
+#else
+#ifdef __KERNEL__
+ return 1;
+#else
+ return;
+#endif
+#endif
+ }
+
+ MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG, 0);
+
+ while(MV_TRUE)
+ {
+ /* Get Ready requests */
+ status = mvCesaReadyGet(&result);
+ if(status == MV_OK)
+ cesaCheckReady(&result);
+
+ break;
+ }
+ if( (cesaTestFull == 1) && (status != MV_BUSY) )
+ {
+ cesaTestFull = 0;
+ CESA_TEST_WAKE_UP();
+ }
+
+#ifdef __KERNEL__
+ return 1;
+#endif
+}
+
+void
+cesaTestCheckReady(const MV_CESA_RESULT *r)
+{
+ MV_CESA_RESULT result = *r;
+
+ cesaCheckReady(&result);
+
+ if (cesaTestFull == 1) {
+ cesaTestFull = 0;
+ CESA_TEST_WAKE_UP();
+ }
+}
+
+static INLINE int open_session(MV_CESA_OPEN_SESSION* pOs)
+{
+ MV_U16 sid;
+ MV_STATUS status;
+
+ status = mvCesaSessionOpen(pOs, (short*)&sid);
+ if(status != MV_OK)
+ {
+ mvOsPrintf("CesaTest: Can't open new session - status = 0x%x\n",
+ status);
+ return -1;
+ }
+
+ return (int)sid;
+}
+
+void close_session(int sid)
+{
+ MV_STATUS status;
+
+ status = mvCesaSessionClose(sid);
+ if(status != MV_OK)
+ {
+ mvOsPrintf("CesaTest: Can't close session %d - status = 0x%x\n",
+ sid, status);
+ }
+}
+
+MV_STATUS testOpen(int idx)
+{
+ MV_CESA_OPEN_SESSION os;
+ int sid, i, testIdx;
+ MV_CESA_TEST_SESSION* pTestSession;
+ MV_U16 digestSize = 0;
+
+ pTestSession = getTestSessionDb(idx, &testIdx);
+ if(pTestSession == NULL)
+ {
+ mvOsPrintf("Test %d is not exist\n", idx);
+ return MV_BAD_PARAM;
+ }
+ pTestSession = &pTestSession[testIdx];
+
+ if(pTestSession->sid != -1)
+ {
+ mvOsPrintf("Session for test %d already created: sid=%d\n",
+ idx, pTestSession->sid);
+ return MV_OK;
+ }
+
+ os.cryptoAlgorithm = pTestSession->cryptoAlgorithm;
+ os.macMode = pTestSession->macAlgorithm;
+ switch(os.macMode)
+ {
+ case MV_CESA_MAC_MD5:
+ case MV_CESA_MAC_HMAC_MD5:
+ digestSize = MV_CESA_MD5_DIGEST_SIZE;
+ break;
+
+ case MV_CESA_MAC_SHA1:
+ case MV_CESA_MAC_HMAC_SHA1:
+ digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+ break;
+
+ case MV_CESA_MAC_NULL:
+ digestSize = 0;
+ }
+ os.cryptoMode = pTestSession->cryptoMode;
+ os.direction = pTestSession->direction;
+ os.operation = pTestSession->operation;
+
+ for(i=0; i<pTestSession->cryptoKeySize; i++)
+ os.cryptoKey[i] = pTestSession->pCryptoKey[i];
+
+ os.cryptoKeyLength = pTestSession->cryptoKeySize;
+
+ for(i=0; i<pTestSession->macKeySize; i++)
+ os.macKey[i] = pTestSession->pMacKey[i];
+
+ os.macKeyLength = pTestSession->macKeySize;
+ os.digestSize = digestSize;
+
+ sid = open_session(&os);
+ if(sid == -1)
+ {
+ mvOsPrintf("Can't open session for test %d: rc=0x%x\n",
+ idx, cesaResult.retCode);
+ return cesaResult.retCode;
+ }
+ CESA_TEST_DEBUG_PRINT(("Opened session: sid = %d\n", sid));
+ pTestSession->sid = sid;
+ return MV_OK;
+}
+
+MV_STATUS testClose(int idx)
+{
+ int testIdx;
+ MV_CESA_TEST_SESSION* pTestSession;
+
+ pTestSession = getTestSessionDb(idx, &testIdx);
+ if(pTestSession == NULL)
+ {
+ mvOsPrintf("Test %d is not exist\n", idx);
+ return MV_BAD_PARAM;
+ }
+ pTestSession = &pTestSession[testIdx];
+
+ if(pTestSession->sid == -1)
+ {
+ mvOsPrintf("Test session %d is not opened\n", idx);
+ return MV_NO_SUCH;
+ }
+
+ close_session(pTestSession->sid);
+ pTestSession->sid = -1;
+
+ return MV_OK;
+}
+
+MV_STATUS testCmd(int sid, int iter, MV_CESA_COMMAND* pCmd,
+ MV_CESA_TEST_SESSION* pTestSession, MV_U8* pIV, int ivSize)
+{
+ int cmdReqId = 0;
+ int i;
+ MV_STATUS rc = MV_OK;
+ char ivZeroHex[] = "0000";
+
+ if(iter == 0)
+ iter = CESA_DEF_ITER_NUM;
+
+ if(pCmd == NULL)
+ {
+ mvOsPrintf("testCmd failed: pCmd=NULL\n");
+ return MV_BAD_PARAM;
+ }
+ pCmd->sessionId = sid;
+
+ cesaCryptoError = 0;
+ cesaReqIdError = 0;
+ cesaError = 0;
+ cesaTestIsrMissCount = 0;
+ cesaIsReady = MV_FALSE;
+ cesaIteration = iter;
+
+ if(cesaInputHexStr == NULL)
+ cesaInputHexStr = cesaPlainHexEbc;
+
+ for(i=0; i<CESA_DEF_REQ_SIZE; i++)
+ {
+ pCmd->pSrc = (MV_CESA_MBUF*)(cesaCmdRing[i].pSrc);
+ if(pIV != NULL)
+ {
+ /* If IV from SA - set IV in Source buffer to zeros */
+ cesaSetMbuf(pCmd->pSrc, ivZeroHex, 0, pCmd->cryptoOffset);
+ cesaSetMbuf(pCmd->pSrc, cesaInputHexStr, pCmd->cryptoOffset,
+ (cesaReqSize - pCmd->cryptoOffset));
+ }
+ else
+ {
+ cesaSetMbuf(pCmd->pSrc, cesaInputHexStr, 0, cesaReqSize);
+ }
+ pCmd->pDst = (MV_CESA_MBUF*)(cesaCmdRing[i].pDst);
+ cesaSetMbuf(pCmd->pDst, cesaNullPlainHexText, 0, cesaReqSize);
+
+ memcpy(&cesaCmdRing[i], pCmd, sizeof(*pCmd));
+ }
+
+ if(cesaCheckMode == CESA_SW_SHOW_CHECK_MODE)
+ {
+ MV_U8 pDigest[MV_CESA_MAX_DIGEST_SIZE];
+
+ if(pTestSession->macAlgorithm == MV_CESA_MAC_MD5)
+ {
+ mvMD5(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, pDigest);
+ mvOsPrintf("SW HASH_MD5: reqSize=%d, macLength=%d\n",
+ cesaReqSize, pCmd->macLength);
+ mvDebugMemDump(pDigest, MV_CESA_MD5_DIGEST_SIZE, 1);
+ return MV_OK;
+ }
+ if(pTestSession->macAlgorithm == MV_CESA_MAC_SHA1)
+ {
+ mvSHA1(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, pDigest);
+ mvOsPrintf("SW HASH_SHA1: reqSize=%d, macLength=%d\n",
+ cesaReqSize, pCmd->macLength);
+ mvDebugMemDump(pDigest, MV_CESA_SHA1_DIGEST_SIZE, 1);
+ return MV_OK;
+ }
+ }
+
+ cesaBeginTicks = CESA_TEST_TICK_GET();
+ CESA_TEST_DEBUG_CODE( memset(cesaTestTrace, 0, sizeof(cesaTestTrace));
+ cesaTestTraceIdx = 0;
+ );
+
+ if(cesaCheckMode == CESA_SW_NULL_CHECK_MODE)
+ {
+ volatile MV_U8 pDigest[MV_CESA_MAX_DIGEST_SIZE];
+
+ for(i=0; i<iter; i++)
+ {
+ if(pTestSession->macAlgorithm == MV_CESA_MAC_MD5)
+ {
+ mvMD5(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, (unsigned char*)pDigest);
+ }
+ if(pTestSession->macAlgorithm == MV_CESA_MAC_SHA1)
+ {
+ mvSHA1(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, (MV_U8 *)pDigest);
+ }
+ }
+ cesaEndTicks = CESA_TEST_TICK_GET();
+ cesaRate = getRate(&cesaRateAfterDot);
+ cesaIsReady = MV_TRUE;
+
+ return MV_OK;
+ }
+
+ /*cesaTestIsrCount = 0;*/
+ /*mvCesaDebugStatsClear();*/
+
+#ifndef MV_NETBSD
+ MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG, 0);
+#endif
+
+ for(i=0; i<iter; i++)
+ {
+ unsigned long flags;
+
+ pCmd = &cesaCmdRing[cmdReqId];
+ pCmd->pReqPrv = (void*)cmdReqId;
+
+ CESA_TEST_LOCK(flags);
+
+ rc = mvCesaAction(pCmd);
+ if(rc == MV_NO_RESOURCE)
+ cesaTestFull = 1;
+
+ CESA_TEST_UNLOCK(flags);
+
+ if(rc == MV_NO_RESOURCE)
+ {
+ CESA_TEST_LOCK(flags);
+ CESA_TEST_WAIT( (cesaTestFull == 0), 100);
+ CESA_TEST_UNLOCK(flags);
+ if(cesaTestFull == 1)
+ {
+ mvOsPrintf("CESA Test timeout: i=%d, iter=%d, cesaTestFull=%d\n",
+ i, iter, cesaTestFull);
+ cesaTestFull = 0;
+ return MV_TIMEOUT;
+ }
+
+ CESA_TEST_LOCK(flags);
+
+ rc = mvCesaAction(pCmd);
+
+ CESA_TEST_UNLOCK(flags);
+ }
+ if( (rc != MV_OK) && (rc != MV_NO_MORE) )
+ {
+ mvOsPrintf("mvCesaAction failed: rc=%d\n", rc);
+ return rc;
+ }
+
+ cmdReqId++;
+ if(cmdReqId >= CESA_DEF_REQ_SIZE)
+ cmdReqId = 0;
+
+#ifdef MV_LINUX
+ /* Reschedule each 16 requests */
+ if( (i & 0xF) == 0)
+ schedule();
+#endif
+ }
+ return MV_OK;
+}
+
+void cesaTestStart(int bufNum, int bufSize)
+{
+ int i, j, idx;
+ MV_CESA_MBUF *pMbufSrc, *pMbufDst;
+ MV_BUF_INFO *pFragsSrc, *pFragsDst;
+ char *pBuf;
+#ifndef MV_NETBSD
+ int numOfSessions, queueDepth;
+ char *pSram;
+ MV_STATUS status;
+ MV_CPU_DEC_WIN addrDecWin;
+#endif
+
+ cesaCmdRing = mvOsMalloc(sizeof(MV_CESA_COMMAND) * CESA_DEF_REQ_SIZE);
+ if(cesaCmdRing == NULL)
+ {
+ mvOsPrintf("testStart: Can't allocate %ld bytes of memory\n",
+ sizeof(MV_CESA_COMMAND) * CESA_DEF_REQ_SIZE);
+ return;
+ }
+ memset(cesaCmdRing, 0, sizeof(MV_CESA_COMMAND) * CESA_DEF_REQ_SIZE);
+
+ if(bufNum == 0)
+ bufNum = CESA_DEF_BUF_NUM;
+
+ if(bufSize == 0)
+ bufSize = CESA_DEF_BUF_SIZE;
+
+ cesaBufNum = bufNum;
+ cesaBufSize = bufSize;
+ mvOsPrintf("CESA test started: bufNum = %d, bufSize = %d\n",
+ bufNum, bufSize);
+
+ cesaHexBuffer = mvOsMalloc(2*bufNum*bufSize);
+ if(cesaHexBuffer == NULL)
+ {
+ mvOsPrintf("testStart: Can't malloc %d bytes for cesaHexBuffer.\n",
+ 2*bufNum*bufSize);
+ return;
+ }
+ memset(cesaHexBuffer, 0, (2*bufNum*bufSize));
+
+ cesaBinBuffer = mvOsMalloc(bufNum*bufSize);
+ if(cesaBinBuffer == NULL)
+ {
+ mvOsPrintf("testStart: Can't malloc %d bytes for cesaBinBuffer\n",
+ bufNum*bufSize);
+ return;
+ }
+ memset(cesaBinBuffer, 0, (bufNum*bufSize));
+
+ cesaExpBinBuffer = mvOsMalloc(bufNum*bufSize);
+ if(cesaExpBinBuffer == NULL)
+ {
+ mvOsPrintf("testStart: Can't malloc %d bytes for cesaExpBinBuffer\n",
+ bufNum*bufSize);
+ return;
+ }
+ memset(cesaExpBinBuffer, 0, (bufNum*bufSize));
+
+ CESA_TEST_WAIT_INIT();
+
+ pMbufSrc = mvOsMalloc(sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+ pFragsSrc = mvOsMalloc(sizeof(MV_BUF_INFO) * bufNum * CESA_DEF_REQ_SIZE);
+
+ pMbufDst = mvOsMalloc(sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+ pFragsDst = mvOsMalloc(sizeof(MV_BUF_INFO) * bufNum * CESA_DEF_REQ_SIZE);
+
+ if( (pMbufSrc == NULL) || (pFragsSrc == NULL) ||
+ (pMbufDst == NULL) || (pFragsDst == NULL) )
+ {
+ mvOsPrintf("testStart: Can't malloc Src and Dst pMbuf and pFrags structures.\n");
+ /* !!!! Dima cesaTestCleanup();*/
+ return;
+ }
+
+ memset(pMbufSrc, 0, sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+ memset(pFragsSrc, 0, sizeof(MV_BUF_INFO) * bufNum * CESA_DEF_REQ_SIZE);
+
+ memset(pMbufDst, 0, sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+ memset(pFragsDst, 0, sizeof(MV_BUF_INFO) * bufNum * CESA_DEF_REQ_SIZE);
+
+ mvOsPrintf("Cesa Test Start: pMbufSrc=%p, pFragsSrc=%p, pMbufDst=%p, pFragsDst=%p\n",
+ pMbufSrc, pFragsSrc, pMbufDst, pFragsDst);
+
+ idx = 0;
+ for(i=0; i<CESA_DEF_REQ_SIZE; i++)
+ {
+ pBuf = mvOsIoCachedMalloc(cesaTestOSHandle,bufSize * bufNum * 2,
+ &cesaReqBufs[i].bufPhysAddr,
+ &cesaReqBufs[i].memHandle);
+ if(pBuf == NULL)
+ {
+ mvOsPrintf("testStart: Can't malloc %d bytes for pBuf\n",
+ bufSize * bufNum * 2);
+ return;
+ }
+
+ memset(pBuf, 0, bufSize * bufNum * 2);
+ mvOsCacheFlush(cesaTestOSHandle,pBuf, bufSize * bufNum * 2);
+ if(pBuf == NULL)
+ {
+ mvOsPrintf("cesaTestStart: Can't allocate %d bytes for req_%d buffers\n",
+ bufSize * bufNum * 2, i);
+ return;
+ }
+
+ cesaReqBufs[i].bufVirtPtr = (MV_U8*)pBuf;
+ cesaReqBufs[i].bufSize = bufSize * bufNum * 2;
+
+ cesaCmdRing[i].pSrc = &pMbufSrc[i];
+ cesaCmdRing[i].pSrc->pFrags = &pFragsSrc[idx];
+ cesaCmdRing[i].pSrc->numFrags = bufNum;
+ cesaCmdRing[i].pSrc->mbufSize = 0;
+
+ cesaCmdRing[i].pDst = &pMbufDst[i];
+ cesaCmdRing[i].pDst->pFrags = &pFragsDst[idx];
+ cesaCmdRing[i].pDst->numFrags = bufNum;
+ cesaCmdRing[i].pDst->mbufSize = 0;
+
+ for(j=0; j<bufNum; j++)
+ {
+ cesaCmdRing[i].pSrc->pFrags[j].bufVirtPtr = (MV_U8*)pBuf;
+ cesaCmdRing[i].pSrc->pFrags[j].bufSize = bufSize;
+ pBuf += bufSize;
+ cesaCmdRing[i].pDst->pFrags[j].bufVirtPtr = (MV_U8*)pBuf;
+ cesaCmdRing[i].pDst->pFrags[j].bufSize = bufSize;
+ pBuf += bufSize;
+ }
+ idx += bufNum;
+ }
+
+#ifndef MV_NETBSD
+ if (mvCpuIfTargetWinGet(CRYPT_ENG, &addrDecWin) == MV_OK)
+ pSram = (char*)addrDecWin.addrWin.baseLow;
+ else
+ {
+ mvOsPrintf("mvCesaInit: ERR. mvCpuIfTargetWinGet failed\n");
+ return;
+ }
+
+#ifdef MV_CESA_NO_SRAM
+ pSram = mvOsMalloc(4*1024+8);
+ if(pSram == NULL)
+ {
+ mvOsPrintf("CesaTest: can't allocate %d bytes for SRAM simulation\n",
+ 4*1024+8);
+ /* !!!! Dima cesaTestCleanup();*/
+ return;
+ }
+ pSram = (MV_U8*)MV_ALIGN_UP((MV_U32)pSram, 8);
+#endif /* MV_CESA_NO_SRAM */
+
+ numOfSessions = CESA_DEF_SESSION_NUM;
+ queueDepth = CESA_DEF_REQ_SIZE - MV_CESA_MAX_CHAN;
+
+ status = mvCesaInit(numOfSessions, queueDepth, pSram, NULL);
+ if(status != MV_OK)
+ {
+ mvOsPrintf("mvCesaInit is Failed: status = 0x%x\n", status);
+ /* !!!! Dima cesaTestCleanup();*/
+ return;
+ }
+#endif /* !MV_NETBSD */
+
+ /* Prepare data for tests */
+ for(i=0; i<50; i++)
+ strcat((char*)cesaDataHexStr3, "dd");
+
+ strcpy((char*)cesaDataAndMd5digest3, cesaDataHexStr3);
+ strcpy((char*)cesaDataAndSha1digest3, cesaDataHexStr3);
+
+ /* Digest must be 8 byte aligned */
+ for(; i<56; i++)
+ {
+ strcat((char*)cesaDataAndMd5digest3, "00");
+ strcat((char*)cesaDataAndSha1digest3, "00");
+ }
+ strcat((char*)cesaDataAndMd5digest3, cesaHmacMd5digestHex3);
+ strcat((char*)cesaDataAndSha1digest3, cesaHmacSha1digestHex3);
+
+#ifndef MV_NETBSD
+ MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+ MV_REG_WRITE( MV_CESA_ISR_MASK_REG, MV_CESA_CAUSE_ACC_DMA_MASK);
+#endif
+
+#ifdef MV_VXWORKS
+ {
+ MV_STATUS status;
+
+ status = intConnect((VOIDFUNCPTR *)INT_LVL_CESA, cesaTestReadyIsr, (int)NULL);
+ if (status != OK)
+ {
+ mvOsPrintf("CESA: Can't connect CESA (%d) interrupt, status=0x%x \n",
+ INT_LVL_CESA, status);
+ /* !!!! Dima cesaTestCleanup();*/
+ return;
+ }
+ cesaSemId = semMCreate(SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
+ if(cesaSemId == NULL)
+ {
+ mvOsPrintf("cesaTestStart: Can't create semaphore\n");
+ return;
+ }
+ intEnable(INT_LVL_CESA);
+ }
+#endif /* MV_VXWORKS */
+
+#if !defined(MV_NETBSD) && defined(__KERNEL__)
+ if( request_irq(CESA_IRQ, cesaTestReadyIsr, (SA_INTERRUPT) , "cesa_test", NULL ) )
+ {
+ mvOsPrintf( "cannot assign irq\n" );
+ /* !!!! Dima cesaTestCleanup();*/
+ return;
+ }
+ spin_lock_init( &cesaLock );
+#endif
+}
+
+MV_STATUS testRun(int idx, int caseIdx, int iter,
+ int reqSize, int checkMode)
+{
+ int testIdx, count, sid, digestSize;
+ int blockSize;
+ MV_CESA_TEST_SESSION* pTestSession;
+ MV_CESA_COMMAND cmd;
+ MV_STATUS status;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ pTestSession = getTestSessionDb(idx, &testIdx);
+ if(pTestSession == NULL)
+ {
+ mvOsPrintf("Test %d is not exist\n", idx);
+ return MV_BAD_PARAM;
+ }
+ pTestSession = &pTestSession[testIdx];
+
+ sid = pTestSession->sid;
+ if(sid == -1)
+ {
+ mvOsPrintf("Test %d is not opened\n", idx);
+ return MV_BAD_STATE;
+ }
+ switch(pTestSession->cryptoAlgorithm)
+ {
+ case MV_CESA_CRYPTO_DES:
+ case MV_CESA_CRYPTO_3DES:
+ blockSize = MV_CESA_DES_BLOCK_SIZE;
+ break;
+
+ case MV_CESA_CRYPTO_AES:
+ blockSize = MV_CESA_AES_BLOCK_SIZE;
+ break;
+
+ case MV_CESA_CRYPTO_NULL:
+ blockSize = 0;
+ break;
+
+ default:
+ mvOsPrintf("cesaTestRun: Bad CryptoAlgorithm=%d\n",
+ pTestSession->cryptoAlgorithm);
+ return MV_BAD_PARAM;
+ }
+ switch(pTestSession->macAlgorithm)
+ {
+ case MV_CESA_MAC_MD5:
+ case MV_CESA_MAC_HMAC_MD5:
+ digestSize = MV_CESA_MD5_DIGEST_SIZE;
+ break;
+
+ case MV_CESA_MAC_SHA1:
+ case MV_CESA_MAC_HMAC_SHA1:
+ digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+ break;
+ default:
+ digestSize = 0;
+ }
+
+ if(iter == 0)
+ iter = CESA_DEF_ITER_NUM;
+
+ if(pTestSession->direction == MV_CESA_DIR_ENCODE)
+ {
+ cesaOutputHexStr = cesaTestCases[caseIdx].cipherHexStr;
+ cesaInputHexStr = cesaTestCases[caseIdx].plainHexStr;
+ }
+ else
+ {
+ cesaOutputHexStr = cesaTestCases[caseIdx].plainHexStr;
+ cesaInputHexStr = cesaTestCases[caseIdx].cipherHexStr;
+ }
+
+ cmd.sessionId = sid;
+ if(checkMode == CESA_FAST_CHECK_MODE)
+ {
+ cmd.cryptoLength = cesaTestCases[caseIdx].cryptoLength;
+ cmd.macLength = cesaTestCases[caseIdx].macLength;
+ }
+ else
+ {
+ cmd.cryptoLength = reqSize;
+ cmd.macLength = reqSize;
+ }
+ cesaRateSize = cmd.cryptoLength;
+ cesaReqSize = cmd.cryptoLength;
+ cmd.cryptoOffset = 0;
+ if(pTestSession->operation != MV_CESA_MAC_ONLY)
+ {
+ if( (pTestSession->cryptoMode == MV_CESA_CRYPTO_CBC) ||
+ (pTestSession->cryptoMode == MV_CESA_CRYPTO_CTR) )
+ {
+ cmd.ivOffset = 0;
+ cmd.cryptoOffset = blockSize;
+ if(cesaTestCases[caseIdx].pCryptoIV == NULL)
+ {
+ cmd.ivFromUser = 1;
+ }
+ else
+ {
+ cmd.ivFromUser = 0;
+ mvCesaCryptoIvSet(cesaTestCases[caseIdx].pCryptoIV, blockSize);
+ }
+ cesaReqSize = cmd.cryptoOffset + cmd.cryptoLength;
+ }
+ }
+
+/*
+ mvOsPrintf("ivFromUser=%d, cryptoLength=%d, cesaReqSize=%d, cryptoOffset=%d\n",
+ cmd.ivFromUser, cmd.cryptoLength, cesaReqSize, cmd.cryptoOffset);
+*/
+ if(pTestSession->operation != MV_CESA_CRYPTO_ONLY)
+ {
+ cmd.macOffset = cmd.cryptoOffset;
+
+ if(cesaTestCases[caseIdx].digestOffset == -1)
+ {
+ cmd.digestOffset = cmd.macOffset + cmd.macLength;
+ cmd.digestOffset = MV_ALIGN_UP(cmd.digestOffset, 8);
+ }
+ else
+ {
+ cmd.digestOffset = cesaTestCases[caseIdx].digestOffset;
+ }
+ if( (cmd.digestOffset + digestSize) > cesaReqSize)
+ cesaReqSize = cmd.digestOffset + digestSize;
+ }
+
+ cesaCheckMode = checkMode;
+
+ if(checkMode == CESA_NULL_CHECK_MODE)
+ {
+ cesaCheckSize = 0;
+ cesaCheckOffset = 0;
+ }
+ else
+ {
+ if(pTestSession->operation == MV_CESA_CRYPTO_ONLY)
+ {
+ cesaCheckOffset = 0;
+ cesaCheckSize = cmd.cryptoLength;
+ }
+ else
+ {
+ cesaCheckSize = digestSize;
+ cesaCheckOffset = cmd.digestOffset;
+ }
+ }
+/*
+ mvOsPrintf("reqSize=%d, checkSize=%d, checkOffset=%d, checkMode=%d\n",
+ cesaReqSize, cesaCheckSize, cesaCheckOffset, cesaCheckMode);
+
+ mvOsPrintf("blockSize=%d, ivOffset=%d, ivFromUser=%d, crOffset=%d, crLength=%d\n",
+ blockSize, cmd.ivOffset, cmd.ivFromUser,
+ cmd.cryptoOffset, cmd.cryptoLength);
+
+ mvOsPrintf("macOffset=%d, digestOffset=%d, macLength=%d\n",
+ cmd.macOffset, cmd.digestOffset, cmd.macLength);
+*/
+ status = testCmd(sid, iter, &cmd, pTestSession,
+ cesaTestCases[caseIdx].pCryptoIV, blockSize);
+
+ if(status != MV_OK)
+ return status;
+
+ /* Wait when all callbacks is received */
+ count = 0;
+ while(cesaIsReady == MV_FALSE)
+ {
+ mvOsSleep(10);
+ count++;
+ if(count > 100)
+ {
+ mvOsPrintf("testRun: Timeout occured\n");
+ return MV_TIMEOUT;
+ }
+ }
+
+ return MV_OK;
+}
+
+
+void cesaTestStop(void)
+{
+ MV_CESA_MBUF *pMbufSrc, *pMbufDst;
+ MV_BUF_INFO *pFragsSrc, *pFragsDst;
+ int i;
+
+ /* Release all allocated memories */
+ pMbufSrc = (MV_CESA_MBUF*)(cesaCmdRing[0].pSrc);
+ pFragsSrc = cesaCmdRing[0].pSrc->pFrags;
+
+ pMbufDst = (MV_CESA_MBUF*)(cesaCmdRing[0].pDst);
+ pFragsDst = cesaCmdRing[0].pDst->pFrags;
+
+ mvOsFree(pMbufSrc);
+ mvOsFree(pMbufDst);
+ mvOsFree(pFragsSrc);
+ mvOsFree(pFragsDst);
+
+ for(i=0; i<CESA_DEF_REQ_SIZE; i++)
+ {
+ mvOsIoCachedFree(cesaTestOSHandle,cesaReqBufs[i].bufSize,
+ cesaReqBufs[i].bufPhysAddr,cesaReqBufs[i].bufVirtPtr,
+ cesaReqBufs[i].memHandle);
+ }
+ cesaDataHexStr3[0] = '\0';
+}
+
+void desTest(int iter, int reqSize, int checkMode)
+{
+ int mode, i;
+ MV_STATUS status;
+
+ mode = checkMode;
+ if(checkMode == CESA_FULL_CHECK_MODE)
+ mode = CESA_FAST_CHECK_MODE;
+ i = iter;
+ if(mode != CESA_NULL_CHECK_MODE)
+ i = 1;
+
+ testOpen(0);
+ testOpen(1);
+ testOpen(2);
+ testOpen(3);
+
+/* DES / ECB mode / Encrypt only */
+ status = testRun(0, 1, iter, reqSize, checkMode);
+ printTestResults(0, status, checkMode);
+
+/* DES / ECB mode / Decrypt only */
+ status = testRun(1, 1, iter, reqSize, checkMode);
+ printTestResults(1, status, checkMode);
+
+/* DES / CBC mode / Encrypt only */
+ status = testRun(2, 2, i, reqSize, mode);
+ printTestResults(2, status, mode);
+
+/* DES / CBC mode / Decrypt only */
+ status = testRun(3, 2, iter, reqSize, mode);
+ printTestResults(3, status, mode);
+
+ testClose(0);
+ testClose(1);
+ testClose(2);
+ testClose(3);
+}
+
+void tripleDesTest(int iter, int reqSize, int checkMode)
+{
+ int mode, i;
+ MV_STATUS status;
+
+ mode = checkMode;
+ if(checkMode == CESA_FULL_CHECK_MODE)
+ mode = CESA_FAST_CHECK_MODE;
+ i = iter;
+ if(mode != CESA_NULL_CHECK_MODE)
+ i = 1;
+
+ testOpen(100);
+ testOpen(101);
+ testOpen(102);
+ testOpen(103);
+
+/* 3DES / ECB mode / Encrypt only */
+ status = testRun(100, 1, iter, reqSize, checkMode);
+ printTestResults(100, status, checkMode);
+
+/* 3DES / ECB mode / Decrypt only */
+ status = testRun(101, 1, iter, reqSize, checkMode);
+ printTestResults(101, status, checkMode);
+
+/* 3DES / CBC mode / Encrypt only */
+ status = testRun(102, 2, i, reqSize, mode);
+ printTestResults(102, status, mode);
+
+/* 3DES / CBC mode / Decrypt only */
+ status = testRun(103, 2, iter, reqSize, mode);
+ printTestResults(103, status, mode);
+
+ testClose(100);
+ testClose(101);
+ testClose(102);
+ testClose(103);
+}
+
+void aesTest(int iter, int reqSize, int checkMode)
+{
+ MV_STATUS status;
+ int mode, i;
+
+ mode = checkMode;
+ if(checkMode == CESA_FULL_CHECK_MODE)
+ mode = CESA_FAST_CHECK_MODE;
+
+ i = iter;
+ if(mode != CESA_NULL_CHECK_MODE)
+ i = 1;
+
+ testOpen(200);
+ testOpen(201);
+ testOpen(202);
+ testOpen(203);
+ testOpen(204);
+ testOpen(205);
+ testOpen(206);
+ testOpen(207);
+ testOpen(208);
+
+/* AES-128 Encode ECB mode */
+ status = testRun(200, 3, iter, reqSize, checkMode);
+ printTestResults(200, status, checkMode);
+
+/* AES-128 Decode ECB mode */
+ status = testRun(201, 3, iter, reqSize, checkMode);
+ printTestResults(201, status, checkMode);
+
+/* AES-128 Encode CBC mode (IV from SA) */
+ status = testRun(202, 10, i, reqSize, mode);
+ printTestResults(202, status, mode);
+
+/* AES-128 Encode CBC mode (IV from User) */
+ status = testRun(202, 24, i, reqSize, mode);
+ printTestResults(202, status, mode);
+
+/* AES-128 Decode CBC mode */
+ status = testRun(203, 24, iter, reqSize, mode);
+ printTestResults(203, status, checkMode);
+
+/* AES-192 Encode ECB mode */
+ status = testRun(204, 4, iter, reqSize, checkMode);
+ printTestResults(204, status, checkMode);
+
+/* AES-192 Decode ECB mode */
+ status = testRun(205, 4, iter, reqSize, checkMode);
+ printTestResults(205, status, checkMode);
+
+/* AES-256 Encode ECB mode */
+ status = testRun(206, 5, iter, reqSize, checkMode);
+ printTestResults(206, status, checkMode);
+
+/* AES-256 Decode ECB mode */
+ status = testRun(207, 5, iter, reqSize, checkMode);
+ printTestResults(207, status, checkMode);
+
+#if defined(MV_LINUX)
+/* AES-128 Encode CTR mode */
+ status = testRun(208, 23, iter, reqSize, mode);
+ printTestResults(208, status, checkMode);
+#endif
+ testClose(200);
+ testClose(201);
+ testClose(202);
+ testClose(203);
+ testClose(204);
+ testClose(205);
+ testClose(206);
+ testClose(207);
+ testClose(208);
+}
+
+
+void mdTest(int iter, int reqSize, int checkMode)
+{
+ int mode;
+ MV_STATUS status;
+
+ if(iter == 0)
+ iter = CESA_DEF_ITER_NUM;
+
+ mode = checkMode;
+ if(checkMode == CESA_FULL_CHECK_MODE)
+ mode = CESA_FAST_CHECK_MODE;
+
+ testOpen(300);
+ testOpen(301);
+ testOpen(302);
+ testOpen(303);
+ testOpen(305);
+
+/* HMAC-MD5 Generate signature test */
+ status = testRun(300, 6, iter, reqSize, mode);
+ printTestResults(300, status, checkMode);
+
+/* HMAC-MD5 Verify Signature test */
+ status = testRun(301, 7, iter, reqSize, mode);
+ printTestResults(301, status, checkMode);
+
+/* HMAC-MD5 Generate signature test */
+ status = testRun(302, 8, iter, reqSize, mode);
+ printTestResults(302, status, checkMode);
+
+/* HMAC-MD5 Verify Signature test */
+ status = testRun(303, 9, iter, reqSize, mode);
+ printTestResults(303, status, checkMode);
+
+/* HASH-MD5 Generate signature test */
+ status = testRun(305, 15, iter, reqSize, mode);
+ printTestResults(305, status, checkMode);
+
+ testClose(300);
+ testClose(301);
+ testClose(302);
+ testClose(303);
+ testClose(305);
+}
+
+void shaTest(int iter, int reqSize, int checkMode)
+{
+ int mode;
+ MV_STATUS status;
+
+ if(iter == 0)
+ iter = CESA_DEF_ITER_NUM;
+
+ mode = checkMode;
+ if(checkMode == CESA_FULL_CHECK_MODE)
+ mode = CESA_FAST_CHECK_MODE;
+
+ testOpen(400);
+ testOpen(401);
+ testOpen(402);
+ testOpen(403);
+ testOpen(405);
+
+/* HMAC-SHA1 Generate signature test */
+ status = testRun(400, 11, iter, reqSize, mode);
+ printTestResults(400, status, checkMode);
+
+/* HMAC-SHA1 Verify Signature test */
+ status = testRun(401, 12, iter, reqSize, mode);
+ printTestResults(401, status, checkMode);
+
+/* HMAC-SHA1 Generate signature test */
+ status = testRun(402, 13, iter, reqSize, mode);
+ printTestResults(402, status, checkMode);
+
+/* HMAC-SHA1 Verify Signature test */
+ status = testRun(403, 14, iter, reqSize, mode);
+ printTestResults(403, status, checkMode);
+
+/* HMAC-SHA1 Generate signature test */
+ status = testRun(405, 16, iter, reqSize, mode);
+ printTestResults(405, status, checkMode);
+
+ testClose(400);
+ testClose(401);
+ testClose(402);
+ testClose(403);
+ testClose(405);
+}
+
+void combiTest(int iter, int reqSize, int checkMode)
+{
+ MV_STATUS status;
+ int mode, i;
+
+ mode = checkMode;
+ if(checkMode == CESA_FULL_CHECK_MODE)
+ mode = CESA_FAST_CHECK_MODE;
+
+ if(iter == 0)
+ iter = CESA_DEF_ITER_NUM;
+
+ i = iter;
+ if(mode != CESA_NULL_CHECK_MODE)
+ i = 1;
+
+ testOpen(500);
+ testOpen(501);
+ testOpen(502);
+ testOpen(503);
+ testOpen(504);
+ testOpen(505);
+ testOpen(506);
+ testOpen(507);
+
+/* DES ECB + MD5 encode test */
+ status = testRun(500, 17, iter, reqSize, mode);
+ printTestResults(500, status, mode);
+
+/* DES ECB + SHA1 encode test */
+ status = testRun(501, 18, iter, reqSize, mode);
+ printTestResults(501, status, mode);
+
+/* 3DES ECB + MD5 encode test */
+ status = testRun(502, 17, iter, reqSize, mode);
+ printTestResults(502, status, mode);
+
+/* 3DES ECB + SHA1 encode test */
+ status = testRun(503, 18, iter, reqSize, mode);
+ printTestResults(503, status, mode);
+
+/* 3DES CBC + MD5 encode test */
+ status = testRun(504, 19, i, reqSize, mode);
+ printTestResults(504, status, mode);
+
+/* 3DES CBC + SHA1 encode test */
+ status = testRun(505, 20, i, reqSize, mode);
+ printTestResults(505, status, mode);
+
+/* AES-128 CBC + MD5 encode test */
+ status = testRun(506, 21, i, reqSize, mode);
+ printTestResults(506, status, mode);
+
+/* AES-128 CBC + SHA1 encode test */
+ status = testRun(507, 22, i, reqSize, mode);
+ printTestResults(507, status, mode);
+
+ testClose(500);
+ testClose(501);
+ testClose(502);
+ testClose(503);
+ testClose(504);
+ testClose(505);
+ testClose(506);
+ testClose(507);
+}
+
+void cesaOneTest(int testIdx, int caseIdx,
+ int iter, int reqSize, int checkMode)
+{
+ MV_STATUS status;
+
+ if(iter == 0)
+ iter = CESA_DEF_ITER_NUM;
+
+ mvOsPrintf("test=%d, case=%d, size=%d, iter=%d\n",
+ testIdx, caseIdx, reqSize, iter);
+
+ status = testOpen(testIdx);
+
+ status = testRun(testIdx, caseIdx, iter, reqSize, checkMode);
+ printTestResults(testIdx, status, checkMode);
+ status = testClose(testIdx);
+
+}
+
+void cesaTest(int iter, int reqSize, int checkMode)
+{
+ if(iter == 0)
+ iter = CESA_DEF_ITER_NUM;
+
+ mvOsPrintf("%d iteration\n", iter);
+ mvOsPrintf("%d size\n\n", reqSize);
+
+/* DES tests */
+ desTest(iter, reqSize, checkMode);
+
+/* 3DES tests */
+ tripleDesTest(iter, reqSize, checkMode);
+
+/* AES tests */
+ aesTest(iter, reqSize, checkMode);
+
+/* MD5 tests */
+ mdTest(iter, reqSize, checkMode);
+
+/* SHA-1 tests */
+ shaTest(iter, reqSize, checkMode);
+}
+
+void multiSizeTest(int idx, int iter, int checkMode, char* inputData)
+{
+ MV_STATUS status;
+ int i;
+ MV_CESA_SIZE_TEST* pMultiTest;
+
+ if( testOpen(idx) != MV_OK)
+ return;
+
+ if(iter == 0)
+ iter = CESA_DEF_ITER_NUM;
+
+ if(checkMode == CESA_SHOW_CHECK_MODE)
+ {
+ iter = 1;
+ }
+ else
+ checkMode = CESA_FULL_CHECK_MODE;
+
+ cesaTestCases[0].plainHexStr = inputData;
+ cesaTestCases[0].pCryptoIV = NULL;
+
+ switch(idx)
+ {
+ case 302:
+ pMultiTest = mdMultiSizeTest302;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = cesaDataHexStr3;
+ break;
+
+ case 304:
+ pMultiTest = mdMultiSizeTest304;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 305:
+ pMultiTest = mdMultiSizeTest305;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 402:
+ pMultiTest = shaMultiSizeTest402;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 404:
+ pMultiTest = shaMultiSizeTest404;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 405:
+ pMultiTest = shaMultiSizeTest405;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 502:
+ pMultiTest = tripleDesMdMultiSizeTest502;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 503:
+ pMultiTest = tripleDesShaMultiSizeTest503;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 504:
+ iter = 1;
+ pMultiTest = cbc3desMdMultiSizeTest504;
+ cesaTestCases[0].pCryptoIV = iv1;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 505:
+ iter = 1;
+ pMultiTest = cbc3desShaMultiSizeTest505;
+ cesaTestCases[0].pCryptoIV = iv1;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 506:
+ iter = 1;
+ pMultiTest = cbcAes128md5multiSizeTest506;
+ cesaTestCases[0].pCryptoIV = iv5;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ case 507:
+ iter = 1;
+ pMultiTest = cbcAes128sha1multiSizeTest507;
+ cesaTestCases[0].pCryptoIV = iv5;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ break;
+
+ default:
+ iter = 1;
+ checkMode = CESA_SHOW_CHECK_MODE;
+ pMultiTest = mdMultiSizeTest302;
+ if(inputData == NULL)
+ cesaTestCases[0].plainHexStr = hashHexStr80;
+ }
+ i = 0;
+ while(pMultiTest[i].outputHexStr != NULL)
+ {
+ cesaTestCases[0].cipherHexStr = (char *)pMultiTest[i].outputHexStr;
+ status = testRun(idx, 0, iter, pMultiTest[i].size,
+ checkMode);
+ if(checkMode != CESA_SHOW_CHECK_MODE)
+ {
+ cesaReqSize = pMultiTest[i].size;
+ printTestResults(idx, status, checkMode);
+ }
+ if(status != MV_OK)
+ break;
+ i++;
+ }
+ testClose(idx);
+/*
+ mvCesaDebugStatus();
+ cesaTestPrintStatus();
+*/
+}
+
+void open_session_test(int idx, int caseIdx, int iter)
+{
+ int reqIdError, cryptoError, openErrors, i;
+ int openErrDisp[100];
+ MV_STATUS status;
+
+ memset(openErrDisp, 0, sizeof(openErrDisp));
+ openErrors = 0;
+ reqIdError = 0;
+ cryptoError = 0;
+ for(i=0; i<iter; i++)
+ {
+ status = testOpen(idx);
+ if(status != MV_OK)
+ {
+ openErrors++;
+ openErrDisp[status]++;
+ }
+ else
+ {
+ testRun(idx, caseIdx, 1, 0, CESA_FAST_CHECK_MODE);
+ if(cesaCryptoError > 0)
+ cryptoError++;
+ if(cesaReqIdError > 0)
+ reqIdError++;
+
+ testClose(idx);
+ }
+ }
+ if(cryptoError > 0)
+ mvOsPrintf("cryptoError : %d\n", cryptoError);
+ if(reqIdError > 0)
+ mvOsPrintf("reqIdError : %d\n", reqIdError);
+
+ if(openErrors > 0)
+ {
+ mvOsPrintf("Open Errors = %d\n", openErrors);
+ for(i=0; i<100; i++)
+ {
+ if(openErrDisp[i] != 0)
+ mvOsPrintf("Error %d - occurs %d times\n", i, openErrDisp[i]);
+ }
+ }
+}
+
+
+void loopback_test(int idx, int iter, int size, char* pPlainData)
+{
+}
+
+
+#if defined(MV_VXWORKS)
+int testMode = 0;
+unsigned __TASKCONV cesaTask(void* args)
+{
+ int reqSize = cesaReqSize;
+
+ if(testMode == 0)
+ {
+ cesaOneTest(cesaTestIdx, cesaCaseIdx, cesaIteration,
+ reqSize, cesaCheckMode);
+ }
+ else
+ {
+ if(testMode == 1)
+ {
+ cesaTest(cesaIteration, reqSize, cesaCheckMode);
+ combiTest(cesaIteration, reqSize, cesaCheckMode);
+ }
+ else
+ {
+ multiSizeTest(cesaIdx, cesaIteration, cesaCheckMode, NULL);
+ }
+ }
+ return 0;
+}
+
+void oneTest(int testIdx, int caseIdx,
+ int iter, int reqSize, int checkMode)
+{
+ long rc;
+
+ cesaIteration = iter;
+ cesaReqSize = cesaRateSize = reqSize;
+ cesaCheckMode = checkMode;
+ testMode = 0;
+ cesaTestIdx = testIdx;
+ cesaCaseIdx = caseIdx;
+ rc = mvOsTaskCreate("CESA_T", 100, 4*1024, cesaTask, NULL, &cesaTaskId);
+ if (rc != MV_OK)
+ {
+ mvOsPrintf("hMW: Can't create CESA multiCmd test task, rc = %ld\n", rc);
+ }
+}
+
+void multiTest(int iter, int reqSize, int checkMode)
+{
+ long rc;
+
+ cesaIteration = iter;
+ cesaCheckMode = checkMode;
+ cesaReqSize = reqSize;
+ testMode = 1;
+ rc = mvOsTaskCreate("CESA_T", 100, 4*1024, cesaTask, NULL, &cesaTaskId);
+ if (rc != MV_OK)
+ {
+ mvOsPrintf("hMW: Can't create CESA multiCmd test task, rc = %ld\n", rc);
+ }
+}
+
+void sizeTest(int testIdx, int iter, int checkMode)
+{
+ long rc;
+
+ cesaIteration = iter;
+ cesaCheckMode = checkMode;
+ testMode = 2;
+ cesaIdx = testIdx;
+ rc = mvOsTaskCreate("CESA_T", 100, 4*1024, cesaTask, NULL, &cesaTaskId);
+ if (rc != MV_OK)
+ {
+ mvOsPrintf("hMW: Can't create CESA test task, rc = %ld\n", rc);
+ }
+}
+
+#endif /* MV_VXWORKS */
+
+extern void mvCesaDebugSA(short sid, int mode);
+void cesaTestPrintSession(int idx)
+{
+ int testIdx;
+ MV_CESA_TEST_SESSION* pTestSession;
+
+ pTestSession = getTestSessionDb(idx, &testIdx);
+ if(pTestSession == NULL)
+ {
+ mvOsPrintf("Test %d is not exist\n", idx);
+ return;
+ }
+ pTestSession = &pTestSession[testIdx];
+
+ if(pTestSession->sid == -1)
+ {
+ mvOsPrintf("Test session %d is not opened\n", idx);
+ return;
+ }
+
+ mvCesaDebugSA(pTestSession->sid, 1);
+}
+
+void cesaTestPrintStatus(void)
+{
+ mvOsPrintf("\n\t Cesa Test Status\n\n");
+
+ mvOsPrintf("isrCount=%d\n",
+ cesaTestIsrCount);
+
+#ifdef CESA_TEST_DEBUG
+ {
+ int i, j;
+ j = cesaTestTraceIdx;
+ mvOsPrintf("No Type Cause rCause iCause Res Time pReady pProc pEmpty\n");
+ for(i=0; i<MV_CESA_TEST_TRACE_SIZE; i++)
+ {
+ mvOsPrintf("%02d. %d 0x%04x 0x%04x 0x%04x 0x%02x 0x%02x %02d 0x%06x %p %p %p\n",
+ j, cesaTestTrace[j].type, cesaTestTrace[j].cause, cesaTestTrace[j].realCause,
+ cesaTestTrace[j].dmaCause, cesaTestTrace[j].resources, cesaTestTrace[j].timeStamp,
+ cesaTestTrace[j].pReqReady, cesaTestTrace[j].pReqProcess, cesaTestTrace[j].pReqEmpty);
+ j++;
+ if(j == MV_CESA_TEST_TRACE_SIZE)
+ j = 0;
+ }
+ }
+#endif /* CESA_TEST_DEBUG */
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCompVer.txt
new file mode 100644
index 000000000..38a926440
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.4
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvLru.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvLru.c
new file mode 100644
index 000000000..9ab29a883
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvLru.c
@@ -0,0 +1,158 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvLru.h"
+/* LRU Cache support */
+
+
+/* Init LRU cache database */
+MV_LRU_CACHE* mvLruCacheInit(int numOfEntries)
+{
+ int i;
+ MV_LRU_CACHE* pLruCache;
+
+ pLruCache = mvOsMalloc(sizeof(MV_LRU_CACHE));
+ if(pLruCache == NULL)
+ {
+ return NULL;
+ }
+ memset(pLruCache, 0, sizeof(MV_LRU_CACHE));
+
+ pLruCache->table = mvOsMalloc(numOfEntries*sizeof(MV_LRU_ENTRY));
+ if(pLruCache->table == NULL)
+ {
+ mvOsFree(pLruCache);
+ return NULL;
+ }
+ memset(pLruCache->table, 0, numOfEntries*sizeof(MV_LRU_ENTRY));
+ pLruCache->tableSize = numOfEntries;
+
+ for(i=0; i<numOfEntries; i++)
+ {
+ pLruCache->table[i].next = i+1;
+ pLruCache->table[i].prev = i-1;
+ }
+ pLruCache->least = 0;
+ pLruCache->most = numOfEntries-1;
+
+ return pLruCache;
+}
+
+void mvLruCacheFinish(MV_LRU_CACHE* pLruCache)
+{
+ mvOsFree(pLruCache->table);
+ mvOsFree(pLruCache);
+}
+
+/* Update LRU cache database after using cache Index */
+void mvLruCacheIdxUpdate(MV_LRU_CACHE* pLruHndl, int cacheIdx)
+{
+ int prev, next;
+
+ if(cacheIdx == pLruHndl->most)
+ return;
+
+ next = pLruHndl->table[cacheIdx].next;
+ if(cacheIdx == pLruHndl->least)
+ {
+ pLruHndl->least = next;
+ }
+ else
+ {
+ prev = pLruHndl->table[cacheIdx].prev;
+
+ pLruHndl->table[next].prev = prev;
+ pLruHndl->table[prev].next = next;
+ }
+
+ pLruHndl->table[pLruHndl->most].next = cacheIdx;
+ pLruHndl->table[cacheIdx].prev = pLruHndl->most;
+ pLruHndl->most = cacheIdx;
+}
+
+/* Delete LRU cache entry */
+void mvLruCacheIdxDelete(MV_LRU_CACHE* pLruHndl, int cacheIdx)
+{
+ int prev, next;
+
+ if(cacheIdx == pLruHndl->least)
+ return;
+
+ prev = pLruHndl->table[cacheIdx].prev;
+ if(cacheIdx == pLruHndl->most)
+ {
+ pLruHndl->most = prev;
+ }
+ else
+ {
+ next = pLruHndl->table[cacheIdx].next;
+
+ pLruHndl->table[next].prev = prev;
+ pLruHndl->table[prev].next = next;
+ }
+ pLruHndl->table[pLruHndl->least].prev = cacheIdx;
+ pLruHndl->table[cacheIdx].next = pLruHndl->least;
+ pLruHndl->least = cacheIdx;
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvLru.h b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvLru.h
new file mode 100644
index 000000000..896e7f81c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvLru.h
@@ -0,0 +1,112 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*******************************************************************************
+* mvLru.h - Header File for Least Recently Used Cache algorithm
+*
+* DESCRIPTION:
+* This header file contains macros typedefs and function declaration for
+* the Least Recently Used Cache algorithm.
+*
+*******************************************************************************/
+
+#ifndef __mvLru_h__
+#define __mvLru_h__
+
+
+typedef struct
+{
+ int next;
+ int prev;
+} MV_LRU_ENTRY;
+
+typedef struct
+{
+ int least;
+ int most;
+ MV_LRU_ENTRY* table;
+ int tableSize;
+
+}MV_LRU_CACHE;
+
+
+/* Find Cache index for replacement LRU */
+static INLINE int mvLruCacheIdxFind(MV_LRU_CACHE* pLruHndl)
+{
+ return pLruHndl->least;
+}
+
+/* Init LRU cache module */
+MV_LRU_CACHE* mvLruCacheInit(int numOfEntries);
+
+/* Finish LRU cache module */
+void mvLruCacheFinish(MV_LRU_CACHE* pLruHndl);
+
+/* Update LRU cache database after using cache Index */
+void mvLruCacheIdxUpdate(MV_LRU_CACHE* pLruHndl, int cacheIdx);
+
+/* Delete LRU cache entry */
+void mvLruCacheIdxDelete(MV_LRU_CACHE* pLruHndl, int cacheIdx);
+
+
+#endif /* __mvLru_h__ */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvMD5.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvMD5.c
new file mode 100644
index 000000000..189f6292e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvMD5.c
@@ -0,0 +1,349 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvMD5.h"
+
+static void mvMD5Transform(MV_U32 buf[4], MV_U32 const in[MV_MD5_MAC_LEN]);
+
+#ifdef MV_CPU_LE
+#define mvByteReverse(buf, len) /* Nothing */
+#else
+static void mvByteReverse(unsigned char *buf, unsigned longs);
+
+/*
+ * Note: this code is harmless on little-endian machines.
+ */
+static void mvByteReverse(unsigned char *buf, unsigned longs)
+{
+ MV_U32 t;
+
+ do
+ {
+ t = (MV_U32) ((unsigned) buf[3] << 8 | buf[2]) << 16 |
+ ((unsigned) buf[1] << 8 | buf[0]);
+ *(MV_U32 *) buf = t;
+ buf += 4;
+ } while (--longs);
+}
+#endif
+
+/*
+ * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void mvMD5Init(MV_MD5_CONTEXT *ctx)
+{
+ ctx->buf[0] = 0x67452301;
+ ctx->buf[1] = 0xefcdab89;
+ ctx->buf[2] = 0x98badcfe;
+ ctx->buf[3] = 0x10325476;
+
+ ctx->bits[0] = 0;
+ ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void mvMD5Update(MV_MD5_CONTEXT *ctx, unsigned char const *buf, unsigned len)
+{
+ MV_U32 t;
+
+ /* Update bitcount */
+
+ t = ctx->bits[0];
+ if ((ctx->bits[0] = t + ((MV_U32) len << 3)) < t)
+ ctx->bits[1]++; /* Carry from low to high */
+ ctx->bits[1] += len >> 29;
+
+ t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */
+
+ /* Handle any leading odd-sized chunks */
+
+ if (t)
+ {
+ unsigned char *p = (unsigned char *) ctx->in + t;
+
+ t = 64 - t;
+ if (len < t)
+ {
+ memcpy(p, buf, len);
+ return;
+ }
+ memcpy(p, buf, t);
+ mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+ mvMD5Transform(ctx->buf, (MV_U32 *) ctx->in);
+ buf += t;
+ len -= t;
+ }
+ /* Process data in 64-byte chunks */
+
+ while (len >= 64)
+ {
+ memcpy(ctx->in, buf, 64);
+ mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+ mvMD5Transform(ctx->buf, (MV_U32 *) ctx->in);
+ buf += 64;
+ len -= 64;
+ }
+
+ /* Handle any remaining bytes of data. */
+
+ memcpy(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void mvMD5Final(unsigned char digest[MV_MD5_MAC_LEN], MV_MD5_CONTEXT *ctx)
+{
+ unsigned count;
+ unsigned char *p;
+
+ /* Compute number of bytes mod 64 */
+ count = (ctx->bits[0] >> 3) & 0x3F;
+
+ /* Set the first char of padding to 0x80. This is safe since there is
+ always at least one byte free */
+ p = ctx->in + count;
+ *p++ = 0x80;
+
+ /* Bytes of padding needed to make 64 bytes */
+ count = 64 - 1 - count;
+
+ /* Pad out to 56 mod 64 */
+ if (count < 8)
+ {
+ /* Two lots of padding: Pad the first block to 64 bytes */
+ memset(p, 0, count);
+ mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+ mvMD5Transform(ctx->buf, (MV_U32 *) ctx->in);
+
+ /* Now fill the next block with 56 bytes */
+ memset(ctx->in, 0, 56);
+ }
+ else
+ {
+ /* Pad block to 56 bytes */
+ memset(p, 0, count - 8);
+ }
+ mvByteReverse(ctx->in, 14);
+
+ /* Append length in bits and transform */
+ ((MV_U32 *) ctx->in)[14] = ctx->bits[0];
+ ((MV_U32 *) ctx->in)[15] = ctx->bits[1];
+
+ mvMD5Transform(ctx->buf, (MV_U32 *) ctx->in);
+ mvByteReverse((unsigned char *) ctx->buf, 4);
+ memcpy(digest, ctx->buf, MV_MD5_MAC_LEN);
+ memset(ctx, 0, sizeof(ctx)); /* In case it's sensitive */
+}
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+ ( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x )
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data. MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+static void mvMD5Transform(MV_U32 buf[4], MV_U32 const in[MV_MD5_MAC_LEN])
+{
+ register MV_U32 a, b, c, d;
+
+ a = buf[0];
+ b = buf[1];
+ c = buf[2];
+ d = buf[3];
+
+ MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+ MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+ MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+ MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+ buf[0] += a;
+ buf[1] += b;
+ buf[2] += c;
+ buf[3] += d;
+}
+
+void mvMD5(unsigned char const *buf, unsigned len, unsigned char* digest)
+{
+ MV_MD5_CONTEXT ctx;
+
+ mvMD5Init(&ctx);
+ mvMD5Update(&ctx, buf, len);
+ mvMD5Final(digest, &ctx);
+}
+
+
+void mvHmacMd5(unsigned char const* text, int text_len,
+ unsigned char const* key, int key_len,
+ unsigned char* digest)
+{
+ int i;
+ MV_MD5_CONTEXT ctx;
+ unsigned char k_ipad[64+1]; /* inner padding - key XORd with ipad */
+ unsigned char k_opad[64+1]; /* outer padding - key XORd with opad */
+
+ /* start out by storing key in pads */
+ memset(k_ipad, 0, 64);
+ memcpy(k_ipad, key, key_len);
+ memset(k_opad, 0, 64);
+ memcpy(k_opad, key, key_len);
+
+ /* XOR key with ipad and opad values */
+ for (i=0; i<64; i++)
+ {
+ k_ipad[i] ^= 0x36;
+ k_opad[i] ^= 0x5c;
+ }
+
+ /* perform inner MD5 */
+ mvMD5Init(&ctx); /* init ctx for 1st pass */
+ mvMD5Update(&ctx, k_ipad, 64); /* start with inner pad */
+ mvMD5Update(&ctx, text, text_len); /* then text of datagram */
+ mvMD5Final(digest, &ctx); /* finish up 1st pass */
+
+ /* perform outer MD5 */
+ mvMD5Init(&ctx); /* init ctx for 2nd pass */
+ mvMD5Update(&ctx, k_opad, 64); /* start with outer pad */
+ mvMD5Update(&ctx, digest, 16); /* then results of 1st hash */
+ mvMD5Final(digest, &ctx); /* finish up 2nd pass */
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvMD5.h b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvMD5.h
new file mode 100644
index 000000000..d05c6b66f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvMD5.h
@@ -0,0 +1,93 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvMD5_h__
+#define __mvMD5_h__
+
+#include "mvMD5.h"
+
+#define MV_MD5_MAC_LEN 16
+
+
+typedef struct
+{
+ MV_U32 buf[4];
+ MV_U32 bits[2];
+ MV_U8 in[64];
+
+} MV_MD5_CONTEXT;
+
+void mvMD5Init(MV_MD5_CONTEXT *context);
+void mvMD5Update(MV_MD5_CONTEXT *context, unsigned char const *buf,
+ unsigned len);
+void mvMD5Final(unsigned char digest[16], MV_MD5_CONTEXT *context);
+
+void mvMD5(unsigned char const *buf, unsigned len, unsigned char* digest);
+
+void mvHmacMd5(unsigned char const* text, int text_len,
+ unsigned char const* key, int key_len,
+ unsigned char* digest);
+
+
+#endif /* __mvMD5_h__ */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvSHA1.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvSHA1.c
new file mode 100644
index 000000000..0e0786b0c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvSHA1.c
@@ -0,0 +1,239 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvSHA1.h"
+
+#define SHA1HANDSOFF
+
+typedef union
+{
+ MV_U8 c[64];
+ MV_U32 l[16];
+
+} CHAR64LONG16;
+
+static void mvSHA1Transform(MV_U32 state[5], const MV_U8 *buffer);
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+
+
+#ifdef MV_CPU_LE
+#define blk0(i) (block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) | \
+ (rol(block->l[i], 8) & 0x00FF00FF))
+#else
+#define blk0(i) block->l[i]
+#endif
+#define blk(i) (block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ \
+ block->l[(i + 8) & 15] ^ block->l[(i + 2) & 15] ^ block->l[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v,w,x,y,z,i) \
+ z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30);
+#define R1(v,w,x,y,z,i) \
+ z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30);
+#define R2(v,w,x,y,z,i) \
+ z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); w = rol(w, 30);
+#define R3(v,w,x,y,z,i) \
+ z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+ w = rol(w, 30);
+#define R4(v,w,x,y,z,i) \
+ z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+ w=rol(w, 30);
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void mvSHA1Transform(MV_U32 state[5], const MV_U8 *buffer)
+{
+ MV_U32 a, b, c, d, e;
+ CHAR64LONG16* block;
+
+#ifdef SHA1HANDSOFF
+ static MV_U32 workspace[16];
+
+ block = (CHAR64LONG16 *) workspace;
+ memcpy(block, buffer, 64);
+#else
+ block = (CHAR64LONG16 *) buffer;
+#endif
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+ R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+ R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
+ R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
+ R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
+ R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
+ R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
+ R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
+ R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
+ R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
+ R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
+ R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
+ R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
+ R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
+ R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
+ R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
+ R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
+ R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+ R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+ R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+}
+
+void mvSHA1Init(MV_SHA1_CTX* context)
+{
+ /* SHA1 initialization constants */
+ context->state[0] = 0x67452301;
+ context->state[1] = 0xEFCDAB89;
+ context->state[2] = 0x98BADCFE;
+ context->state[3] = 0x10325476;
+ context->state[4] = 0xC3D2E1F0;
+ context->count[0] = context->count[1] = 0;
+}
+
+
+/* Run your data through this. */
+void mvSHA1Update(MV_SHA1_CTX *context, MV_U8 const *data,
+ unsigned int len)
+{
+ MV_U32 i, j;
+
+ j = (context->count[0] >> 3) & 63;
+ if ((context->count[0] += len << 3) < (len << 3))
+ context->count[1]++;
+ context->count[1] += (len >> 29);
+ if ((j + len) > 63)
+ {
+ memcpy(&context->buffer[j], data, (i = 64-j));
+ mvSHA1Transform(context->state, context->buffer);
+ for ( ; i + 63 < len; i += 64)
+ {
+ mvSHA1Transform(context->state, &data[i]);
+ }
+ j = 0;
+ }
+ else
+ {
+ i = 0;
+ }
+ memcpy(&context->buffer[j], &data[i], len - i);
+}
+
+void mvSHA1Final(MV_U8* digest, MV_SHA1_CTX* context)
+{
+ MV_U32 i;
+ MV_U8 finalcount[8];
+
+ for (i = 0; i < 8; i++)
+ {
+ finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)] >>
+ ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
+ }
+ mvSHA1Update(context, (const unsigned char *) "\200", 1);
+ while ((context->count[0] & 504) != 448)
+ {
+ mvSHA1Update(context, (const unsigned char *) "\0", 1);
+ }
+ mvSHA1Update(context, finalcount, 8); /* Should cause a mvSHA1Transform()
+ */
+ for (i = 0; i < 20; i++)
+ {
+ digest[i] = (unsigned char)
+ ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
+ }
+ /* Wipe variables */
+ i = 0;
+ memset(context->buffer, 0, 64);
+ memset(context->state, 0, 20);
+ memset(context->count, 0, 8);
+ memset(finalcount, 0, 8);
+
+#ifdef SHA1HANDSOFF /* make SHA1Transform overwrite it's own static vars */
+ mvSHA1Transform(context->state, context->buffer);
+#endif
+}
+
+
+void mvSHA1(MV_U8 const *buf, unsigned int len, MV_U8* digest)
+{
+ MV_SHA1_CTX ctx;
+
+ mvSHA1Init(&ctx);
+ mvSHA1Update(&ctx, buf, len);
+ mvSHA1Final(digest, &ctx);
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvSHA1.h b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvSHA1.h
new file mode 100644
index 000000000..17df9fcdb
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa/mvSHA1.h
@@ -0,0 +1,88 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvSHA1_h__
+#define __mvSHA1_h__
+
+#include "mvSHA1.h"
+
+#define MV_SHA1_MAC_LEN 20
+
+
+typedef struct
+{
+ MV_U32 state[5];
+ MV_U32 count[2];
+ MV_U8 buffer[64];
+
+} MV_SHA1_CTX;
+
+void mvSHA1Init(MV_SHA1_CTX *context);
+void mvSHA1Update(MV_SHA1_CTX *context, MV_U8 const *buf, unsigned int len);
+void mvSHA1Final(MV_U8* digest, MV_SHA1_CTX *context);
+
+void mvSHA1(MV_U8 const *buf, unsigned int len, MV_U8* digest);
+
+
+#endif /* __mvSHA1_h__ */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/cesa_ocf_drv.c b/target/linux/generic/files/crypto/ocf/kirkwood/cesa_ocf_drv.c
new file mode 100644
index 000000000..e689f24ef
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/cesa_ocf_drv.c
@@ -0,0 +1,1302 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/platform_device.h>
+#include <asm/scatterlist.h>
+#include <linux/spinlock.h>
+#include "ctrlEnv/sys/mvSysCesa.h"
+#include "cesa/mvCesa.h" /* moved here before cryptodev.h due to include dependencies */
+#include <cryptodev.h>
+#include <uio.h>
+#include <plat/mv_cesa.h>
+#include <linux/mbus.h>
+#include "mvDebug.h"
+
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+
+#include "cesa/mvCesaRegs.h"
+#include "cesa/AES/mvAes.h"
+#include "cesa/mvLru.h"
+
+#undef RT_DEBUG
+#ifdef RT_DEBUG
+static int debug = 1;
+module_param(debug, int, 1);
+MODULE_PARM_DESC(debug, "Enable debug");
+#undef dprintk
+#define dprintk(a...) if (debug) { printk(a); } else
+#else
+static int debug = 0;
+#undef dprintk
+#define dprintk(a...)
+#endif
+
+
+/* TDMA Regs */
+#define WINDOW_BASE(i) 0xA00 + (i << 3)
+#define WINDOW_CTRL(i) 0xA04 + (i << 3)
+
+/* interrupt handling */
+#undef CESA_OCF_POLLING
+#undef CESA_OCF_TASKLET
+
+#if defined(CESA_OCF_POLLING) && defined(CESA_OCF_TASKLET)
+#error "don't use both tasklet and polling mode"
+#endif
+
+extern int cesaReqResources;
+/* support for spliting action into 2 actions */
+#define CESA_OCF_SPLIT
+
+/* general defines */
+#define CESA_OCF_MAX_SES 128
+#define CESA_Q_SIZE 64
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)
+#define FRAG_PAGE(f) (f).p
+#else
+#define FRAG_PAGE(f) (f)
+#endif
+
+/* data structures */
+struct cesa_ocf_data {
+ int cipher_alg;
+ int auth_alg;
+ int encrypt_tn_auth;
+#define auth_tn_decrypt encrypt_tn_auth
+ int ivlen;
+ int digestlen;
+ short sid_encrypt;
+ short sid_decrypt;
+ /* fragment workaround sessions */
+ short frag_wa_encrypt;
+ short frag_wa_decrypt;
+ short frag_wa_auth;
+};
+
+/* CESA device data */
+struct cesa_dev {
+ void __iomem *sram;
+ void __iomem *reg;
+ struct mv_cesa_platform_data *plat_data;
+ int irq;
+};
+
+#define DIGEST_BUF_SIZE 32
+struct cesa_ocf_process {
+ MV_CESA_COMMAND cesa_cmd;
+ MV_CESA_MBUF cesa_mbuf;
+ MV_BUF_INFO cesa_bufs[MV_CESA_MAX_MBUF_FRAGS];
+ char digest[DIGEST_BUF_SIZE];
+ int digest_len;
+ struct cryptop *crp;
+ int need_cb;
+};
+
+/* global variables */
+static int32_t cesa_ocf_id = -1;
+static struct cesa_ocf_data *cesa_ocf_sessions[CESA_OCF_MAX_SES];
+static spinlock_t cesa_lock;
+static struct cesa_dev cesa_device;
+
+/* static APIs */
+static int cesa_ocf_process (device_t, struct cryptop *, int);
+static int cesa_ocf_newsession (device_t, u_int32_t *, struct cryptoini *);
+static int cesa_ocf_freesession (device_t, u_int64_t);
+static void cesa_callback (unsigned long);
+static irqreturn_t cesa_interrupt_handler (int, void *);
+#ifdef CESA_OCF_POLLING
+static void cesa_interrupt_polling(void);
+#endif
+#ifdef CESA_OCF_TASKLET
+static struct tasklet_struct cesa_ocf_tasklet;
+#endif
+
+static struct timeval tt_start;
+static struct timeval tt_end;
+
+/*
+ * dummy device structure
+ */
+
+static struct {
+ softc_device_decl sc_dev;
+} mv_cesa_dev;
+
+static device_method_t mv_cesa_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, cesa_ocf_newsession),
+ DEVMETHOD(cryptodev_freesession,cesa_ocf_freesession),
+ DEVMETHOD(cryptodev_process, cesa_ocf_process),
+ DEVMETHOD(cryptodev_kprocess, NULL),
+};
+
+
+
+/* Add debug Trace */
+#undef CESA_OCF_TRACE_DEBUG
+#ifdef CESA_OCF_TRACE_DEBUG
+
+#define MV_CESA_USE_TIMER_ID 0
+
+typedef struct
+{
+ int type; /* 0 - isrEmpty, 1 - cesaReadyGet, 2 - cesaAction */
+ MV_U32 timeStamp;
+ MV_U32 cause;
+ MV_U32 realCause;
+ MV_U32 dmaCause;
+ int resources;
+ MV_CESA_REQ* pReqReady;
+ MV_CESA_REQ* pReqEmpty;
+ MV_CESA_REQ* pReqProcess;
+} MV_CESA_TEST_TRACE;
+
+#define MV_CESA_TEST_TRACE_SIZE 50
+
+static int cesaTestTraceIdx = 0;
+static MV_CESA_TEST_TRACE cesaTestTrace[MV_CESA_TEST_TRACE_SIZE];
+
+static void cesaTestTraceAdd(int type)
+{
+ cesaTestTrace[cesaTestTraceIdx].type = type;
+ cesaTestTrace[cesaTestTraceIdx].realCause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+ //cesaTestTrace[cesaTestTraceIdx].idmaCause = MV_REG_READ(IDMA_CAUSE_REG);
+ cesaTestTrace[cesaTestTraceIdx].resources = cesaReqResources;
+ cesaTestTrace[cesaTestTraceIdx].pReqReady = pCesaReqReady;
+ cesaTestTrace[cesaTestTraceIdx].pReqEmpty = pCesaReqEmpty;
+ cesaTestTrace[cesaTestTraceIdx].pReqProcess = pCesaReqProcess;
+ cesaTestTrace[cesaTestTraceIdx].timeStamp = mvCntmrRead(MV_CESA_USE_TIMER_ID);
+ cesaTestTraceIdx++;
+ if(cesaTestTraceIdx == MV_CESA_TEST_TRACE_SIZE)
+ cesaTestTraceIdx = 0;
+}
+
+#else /* CESA_OCF_TRACE_DEBUG */
+
+#define cesaTestTraceAdd(x)
+
+#endif /* CESA_OCF_TRACE_DEBUG */
+
+unsigned int
+get_usec(unsigned int start)
+{
+ if(start) {
+ do_gettimeofday (&tt_start);
+ return 0;
+ }
+ else {
+ do_gettimeofday (&tt_end);
+ tt_end.tv_sec -= tt_start.tv_sec;
+ tt_end.tv_usec -= tt_start.tv_usec;
+ if (tt_end.tv_usec < 0) {
+ tt_end.tv_usec += 1000 * 1000;
+ tt_end.tv_sec -= 1;
+ }
+ }
+ printk("time taken is %d\n", (unsigned int)(tt_end.tv_usec + tt_end.tv_sec * 1000000));
+ return (tt_end.tv_usec + tt_end.tv_sec * 1000000);
+}
+
+#ifdef RT_DEBUG
+/*
+ * check that the crp action match the current session
+ */
+static int
+ocf_check_action(struct cryptop *crp, struct cesa_ocf_data *cesa_ocf_cur_ses) {
+ int count = 0;
+ int encrypt = 0, decrypt = 0, auth = 0;
+ struct cryptodesc *crd;
+
+ /* Go through crypto descriptors, processing as we go */
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next, count++) {
+ if(count > 2) {
+ printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+ return 1;
+ }
+
+ /* Encryption /Decryption */
+ if(crd->crd_alg == cesa_ocf_cur_ses->cipher_alg) {
+ /* check that the action is compatible with session */
+ if(encrypt || decrypt) {
+ printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+ return 1;
+ }
+
+ if(crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+ if( (count == 2) && (cesa_ocf_cur_ses->encrypt_tn_auth) ) {
+ printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+ return 1;
+ }
+ encrypt++;
+ }
+ else { /* decrypt */
+ if( (count == 2) && !(cesa_ocf_cur_ses->auth_tn_decrypt) ) {
+ printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+ return 1;
+ }
+ decrypt++;
+ }
+
+ }
+ /* Authentication */
+ else if(crd->crd_alg == cesa_ocf_cur_ses->auth_alg) {
+ /* check that the action is compatible with session */
+ if(auth) {
+ printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+ return 1;
+ }
+ if( (count == 2) && (decrypt) && (cesa_ocf_cur_ses->auth_tn_decrypt)) {
+ printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+ return 1;
+ }
+ if( (count == 2) && (encrypt) && !(cesa_ocf_cur_ses->encrypt_tn_auth)) {
+ printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+ return 1;
+ }
+ auth++;
+ }
+ else {
+ printk("%s,%d: Alg isn't supported by this session.\n", __FILE__, __LINE__);
+ return 1;
+ }
+ }
+ return 0;
+
+}
+#endif
+
+/*
+ * Process a request.
+ */
+static int
+cesa_ocf_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct cesa_ocf_process *cesa_ocf_cmd = NULL;
+ struct cesa_ocf_process *cesa_ocf_cmd_wa = NULL;
+ MV_CESA_COMMAND *cesa_cmd;
+ struct cryptodesc *crd;
+ struct cesa_ocf_data *cesa_ocf_cur_ses;
+ int sid = 0, temp_len = 0, i;
+ int encrypt = 0, decrypt = 0, auth = 0;
+ int status;
+ struct sk_buff *skb = NULL;
+ struct uio *uiop = NULL;
+ unsigned char *ivp;
+ MV_BUF_INFO *p_buf_info;
+ MV_CESA_MBUF *p_mbuf_info;
+ unsigned long flags;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ if( cesaReqResources <= 1 ) {
+ dprintk("%s,%d: ERESTART\n", __FILE__, __LINE__);
+ return ERESTART;
+ }
+
+#ifdef RT_DEBUG
+ /* Sanity check */
+ if (crp == NULL) {
+ printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL ) {
+ printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ return EINVAL;
+ }
+
+ sid = crp->crp_sid & 0xffffffff;
+ if ((sid >= CESA_OCF_MAX_SES) || (cesa_ocf_sessions[sid] == NULL)) {
+ crp->crp_etype = ENOENT;
+ printk("%s,%d: ENOENT session %d \n", __FILE__, __LINE__, sid);
+ return EINVAL;
+ }
+#endif
+
+ sid = crp->crp_sid & 0xffffffff;
+ crp->crp_etype = 0;
+ cesa_ocf_cur_ses = cesa_ocf_sessions[sid];
+
+#ifdef RT_DEBUG
+ if(ocf_check_action(crp, cesa_ocf_cur_ses)){
+ goto p_error;
+ }
+#endif
+
+ /* malloc a new cesa process */
+ cesa_ocf_cmd = kmalloc(sizeof(struct cesa_ocf_process), GFP_ATOMIC);
+
+ if (cesa_ocf_cmd == NULL) {
+ printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
+ goto p_error;
+ }
+ memset(cesa_ocf_cmd, 0, sizeof(struct cesa_ocf_process));
+
+ /* init cesa_process */
+ cesa_ocf_cmd->crp = crp;
+ /* always call callback */
+ cesa_ocf_cmd->need_cb = 1;
+
+ /* init cesa_cmd for usage of the HALs */
+ cesa_cmd = &cesa_ocf_cmd->cesa_cmd;
+ cesa_cmd->pReqPrv = (void *)cesa_ocf_cmd;
+ cesa_cmd->sessionId = cesa_ocf_cur_ses->sid_encrypt; /* defualt use encrypt */
+
+ /* prepare src buffer */
+ /* we send the entire buffer to the HAL, even if only part of it should be encrypt/auth. */
+ /* if not using seesions for both encrypt and auth, then it will be wiser to to copy only */
+ /* from skip to crd_len. */
+ p_buf_info = cesa_ocf_cmd->cesa_bufs;
+ p_mbuf_info = &cesa_ocf_cmd->cesa_mbuf;
+
+ p_buf_info += 2; /* save 2 first buffers for IV and digest -
+ we won't append them to the end since, they
+ might be places in an unaligned addresses. */
+
+ p_mbuf_info->pFrags = p_buf_info;
+ temp_len = 0;
+
+ /* handle SKB */
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+
+ dprintk("%s,%d: handle SKB.\n", __FILE__, __LINE__);
+ skb = (struct sk_buff *) crp->crp_buf;
+
+ if (skb_shinfo(skb)->nr_frags >= (MV_CESA_MAX_MBUF_FRAGS - 1)) {
+ printk("%s,%d: %d nr_frags > MV_CESA_MAX_MBUF_FRAGS", __FILE__, __LINE__, skb_shinfo(skb)->nr_frags);
+ goto p_error;
+ }
+
+ p_mbuf_info->mbufSize = skb->len;
+ temp_len = skb->len;
+ /* first skb fragment */
+ p_buf_info->bufSize = skb_headlen(skb);
+ p_buf_info->bufVirtPtr = skb->data;
+ p_buf_info++;
+
+ /* now handle all other skb fragments */
+ for ( i = 0; i < skb_shinfo(skb)->nr_frags; i++ ) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ p_buf_info->bufSize = frag->size;
+ p_buf_info->bufVirtPtr = page_address(FRAG_PAGE(frag->page)) + frag->page_offset;
+ p_buf_info++;
+ }
+ p_mbuf_info->numFrags = skb_shinfo(skb)->nr_frags + 1;
+ }
+ /* handle UIO */
+ else if(crp->crp_flags & CRYPTO_F_IOV) {
+
+ dprintk("%s,%d: handle UIO.\n", __FILE__, __LINE__);
+ uiop = (struct uio *) crp->crp_buf;
+
+ if (uiop->uio_iovcnt > (MV_CESA_MAX_MBUF_FRAGS - 1)) {
+ printk("%s,%d: %d uio_iovcnt > MV_CESA_MAX_MBUF_FRAGS \n", __FILE__, __LINE__, uiop->uio_iovcnt);
+ goto p_error;
+ }
+
+ p_mbuf_info->mbufSize = crp->crp_ilen;
+ p_mbuf_info->numFrags = uiop->uio_iovcnt;
+ for(i = 0; i < uiop->uio_iovcnt; i++) {
+ p_buf_info->bufVirtPtr = uiop->uio_iov[i].iov_base;
+ p_buf_info->bufSize = uiop->uio_iov[i].iov_len;
+ temp_len += p_buf_info->bufSize;
+ dprintk("%s,%d: buf %x-> addr %x, size %x \n"
+ , __FILE__, __LINE__, i, (unsigned int)p_buf_info->bufVirtPtr, p_buf_info->bufSize);
+ p_buf_info++;
+ }
+
+ }
+ /* handle CONTIG */
+ else {
+ dprintk("%s,%d: handle CONTIG.\n", __FILE__, __LINE__);
+ p_mbuf_info->numFrags = 1;
+ p_mbuf_info->mbufSize = crp->crp_ilen;
+ p_buf_info->bufVirtPtr = crp->crp_buf;
+ p_buf_info->bufSize = crp->crp_ilen;
+ temp_len = crp->crp_ilen;
+ p_buf_info++;
+ }
+
+ /* Support up to 64K why? cause! */
+ if(crp->crp_ilen > 64*1024) {
+ printk("%s,%d: buf too big %x \n", __FILE__, __LINE__, crp->crp_ilen);
+ goto p_error;
+ }
+
+ if( temp_len != crp->crp_ilen ) {
+ printk("%s,%d: warning size don't match.(%x %x) \n", __FILE__, __LINE__, temp_len, crp->crp_ilen);
+ }
+
+ cesa_cmd->pSrc = p_mbuf_info;
+ cesa_cmd->pDst = p_mbuf_info;
+
+ /* restore p_buf_info to point to first available buf */
+ p_buf_info = cesa_ocf_cmd->cesa_bufs;
+ p_buf_info += 1;
+
+
+ /* Go through crypto descriptors, processing as we go */
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+
+ /* Encryption /Decryption */
+ if(crd->crd_alg == cesa_ocf_cur_ses->cipher_alg) {
+
+ dprintk("%s,%d: cipher", __FILE__, __LINE__);
+
+ cesa_cmd->cryptoOffset = crd->crd_skip;
+ cesa_cmd->cryptoLength = crd->crd_len;
+
+ if(crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+ dprintk(" encrypt \n");
+ encrypt++;
+
+ /* handle IV */
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) { /* IV from USER */
+ dprintk("%s,%d: IV from USER (offset %x) \n", __FILE__, __LINE__, crd->crd_inject);
+ cesa_cmd->ivFromUser = 1;
+ ivp = crd->crd_iv;
+
+ /*
+ * do we have to copy the IV back to the buffer ?
+ */
+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ dprintk("%s,%d: copy the IV back to the buffer\n", __FILE__, __LINE__);
+ cesa_cmd->ivOffset = crd->crd_inject;
+ crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, cesa_ocf_cur_ses->ivlen, ivp);
+ }
+ else {
+ dprintk("%s,%d: don't copy the IV back to the buffer \n", __FILE__, __LINE__);
+ p_mbuf_info->numFrags++;
+ p_mbuf_info->mbufSize += cesa_ocf_cur_ses->ivlen;
+ p_mbuf_info->pFrags = p_buf_info;
+
+ p_buf_info->bufVirtPtr = ivp;
+ p_buf_info->bufSize = cesa_ocf_cur_ses->ivlen;
+ p_buf_info--;
+
+ /* offsets */
+ cesa_cmd->ivOffset = 0;
+ cesa_cmd->cryptoOffset += cesa_ocf_cur_ses->ivlen;
+ if(auth) {
+ cesa_cmd->macOffset += cesa_ocf_cur_ses->ivlen;
+ cesa_cmd->digestOffset += cesa_ocf_cur_ses->ivlen;
+ }
+ }
+ }
+ else { /* random IV */
+ dprintk("%s,%d: random IV \n", __FILE__, __LINE__);
+ cesa_cmd->ivFromUser = 0;
+
+ /*
+ * do we have to copy the IV back to the buffer ?
+ */
+ /* in this mode the HAL will always copy the IV */
+ /* given by the session to the ivOffset */
+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ cesa_cmd->ivOffset = crd->crd_inject;
+ }
+ else {
+ /* if IV isn't copy, then how will the user know which IV did we use??? */
+ printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ goto p_error;
+ }
+ }
+ }
+ else { /* decrypt */
+ dprintk(" decrypt \n");
+ decrypt++;
+ cesa_cmd->sessionId = cesa_ocf_cur_ses->sid_decrypt;
+
+ /* handle IV */
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+ dprintk("%s,%d: IV from USER \n", __FILE__, __LINE__);
+ /* append the IV buf to the mbuf */
+ cesa_cmd->ivFromUser = 1;
+ p_mbuf_info->numFrags++;
+ p_mbuf_info->mbufSize += cesa_ocf_cur_ses->ivlen;
+ p_mbuf_info->pFrags = p_buf_info;
+
+ p_buf_info->bufVirtPtr = crd->crd_iv;
+ p_buf_info->bufSize = cesa_ocf_cur_ses->ivlen;
+ p_buf_info--;
+
+ /* offsets */
+ cesa_cmd->ivOffset = 0;
+ cesa_cmd->cryptoOffset += cesa_ocf_cur_ses->ivlen;
+ if(auth) {
+ cesa_cmd->macOffset += cesa_ocf_cur_ses->ivlen;
+ cesa_cmd->digestOffset += cesa_ocf_cur_ses->ivlen;
+ }
+ }
+ else {
+ dprintk("%s,%d: IV inside the buffer \n", __FILE__, __LINE__);
+ cesa_cmd->ivFromUser = 0;
+ cesa_cmd->ivOffset = crd->crd_inject;
+ }
+ }
+
+ }
+ /* Authentication */
+ else if(crd->crd_alg == cesa_ocf_cur_ses->auth_alg) {
+ dprintk("%s,%d: Authentication \n", __FILE__, __LINE__);
+ auth++;
+ cesa_cmd->macOffset = crd->crd_skip;
+ cesa_cmd->macLength = crd->crd_len;
+
+ /* digest + mac */
+ cesa_cmd->digestOffset = crd->crd_inject;
+ }
+ else {
+ printk("%s,%d: Alg isn't supported by this session.\n", __FILE__, __LINE__);
+ goto p_error;
+ }
+ }
+
+ dprintk("\n");
+ dprintk("%s,%d: Sending Action: \n", __FILE__, __LINE__);
+ dprintk("%s,%d: IV from user: %d. IV offset %x \n", __FILE__, __LINE__, cesa_cmd->ivFromUser, cesa_cmd->ivOffset);
+ dprintk("%s,%d: crypt offset %x len %x \n", __FILE__, __LINE__, cesa_cmd->cryptoOffset, cesa_cmd->cryptoLength);
+ dprintk("%s,%d: Auth offset %x len %x \n", __FILE__, __LINE__, cesa_cmd->macOffset, cesa_cmd->macLength);
+ dprintk("%s,%d: set digest in offset %x . \n", __FILE__, __LINE__, cesa_cmd->digestOffset);
+ if(debug) {
+ mvCesaDebugMbuf("SRC BUFFER", cesa_cmd->pSrc, 0, cesa_cmd->pSrc->mbufSize);
+ }
+
+
+ /* send action to HAL */
+ spin_lock_irqsave(&cesa_lock, flags);
+ status = mvCesaAction(cesa_cmd);
+ spin_unlock_irqrestore(&cesa_lock, flags);
+
+ /* action not allowed */
+ if(status == MV_NOT_ALLOWED) {
+#ifdef CESA_OCF_SPLIT
+ /* if both encrypt and auth try to split */
+ if(auth && (encrypt || decrypt)) {
+ MV_CESA_COMMAND *cesa_cmd_wa;
+
+ /* malloc a new cesa process and init it */
+ cesa_ocf_cmd_wa = kmalloc(sizeof(struct cesa_ocf_process), GFP_ATOMIC);
+
+ if (cesa_ocf_cmd_wa == NULL) {
+ printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
+ goto p_error;
+ }
+ memcpy(cesa_ocf_cmd_wa, cesa_ocf_cmd, sizeof(struct cesa_ocf_process));
+ cesa_cmd_wa = &cesa_ocf_cmd_wa->cesa_cmd;
+ cesa_cmd_wa->pReqPrv = (void *)cesa_ocf_cmd_wa;
+ cesa_ocf_cmd_wa->need_cb = 0;
+
+ /* break requests to two operation, first operation completion won't call callback */
+ if((decrypt) && (cesa_ocf_cur_ses->auth_tn_decrypt)) {
+ cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+ cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_decrypt;
+ }
+ else if((decrypt) && !(cesa_ocf_cur_ses->auth_tn_decrypt)) {
+ cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_decrypt;
+ cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+ }
+ else if((encrypt) && (cesa_ocf_cur_ses->encrypt_tn_auth)) {
+ cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_encrypt;
+ cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+ }
+ else if((encrypt) && !(cesa_ocf_cur_ses->encrypt_tn_auth)){
+ cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+ cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_encrypt;
+ }
+ else {
+ printk("%s,%d: Unsupporterd fragment wa mode \n", __FILE__, __LINE__);
+ goto p_error;
+ }
+
+ /* send the 2 actions to the HAL */
+ spin_lock_irqsave(&cesa_lock, flags);
+ status = mvCesaAction(cesa_cmd_wa);
+ spin_unlock_irqrestore(&cesa_lock, flags);
+
+ if((status != MV_NO_MORE) && (status != MV_OK)) {
+ printk("%s,%d: cesa action failed, status = 0x%x\n", __FILE__, __LINE__, status);
+ goto p_error;
+ }
+ spin_lock_irqsave(&cesa_lock, flags);
+ status = mvCesaAction(cesa_cmd);
+ spin_unlock_irqrestore(&cesa_lock, flags);
+
+ }
+ /* action not allowed and can't split */
+ else
+#endif
+ {
+ goto p_error;
+ }
+ }
+
+ /* Hal Q is full, send again. This should never happen */
+ if(status == MV_NO_RESOURCE) {
+ printk("%s,%d: cesa no more resources \n", __FILE__, __LINE__);
+ if(cesa_ocf_cmd)
+ kfree(cesa_ocf_cmd);
+ if(cesa_ocf_cmd_wa)
+ kfree(cesa_ocf_cmd_wa);
+ return ERESTART;
+ }
+ else if((status != MV_NO_MORE) && (status != MV_OK)) {
+ printk("%s,%d: cesa action failed, status = 0x%x\n", __FILE__, __LINE__, status);
+ goto p_error;
+ }
+
+
+#ifdef CESA_OCF_POLLING
+ cesa_interrupt_polling();
+#endif
+ cesaTestTraceAdd(5);
+
+ return 0;
+p_error:
+ crp->crp_etype = EINVAL;
+ if(cesa_ocf_cmd)
+ kfree(cesa_ocf_cmd);
+ if(cesa_ocf_cmd_wa)
+ kfree(cesa_ocf_cmd_wa);
+ return EINVAL;
+}
+
+/*
+ * cesa callback.
+ */
+static void
+cesa_callback(unsigned long dummy)
+{
+ struct cesa_ocf_process *cesa_ocf_cmd = NULL;
+ struct cryptop *crp = NULL;
+ MV_CESA_RESULT result[MV_CESA_MAX_CHAN];
+ int res_idx = 0,i;
+ MV_STATUS status;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+#ifdef CESA_OCF_TASKLET
+ disable_irq(cesa_device.irq);
+#endif
+ while(MV_TRUE) {
+
+ /* Get Ready requests */
+ spin_lock(&cesa_lock);
+ status = mvCesaReadyGet(&result[res_idx]);
+ spin_unlock(&cesa_lock);
+
+ cesaTestTraceAdd(2);
+
+ if(status != MV_OK) {
+#ifdef CESA_OCF_POLLING
+ if(status == MV_BUSY) { /* Fragment */
+ cesa_interrupt_polling();
+ return;
+ }
+#endif
+ break;
+ }
+ res_idx++;
+ break;
+ }
+
+ for(i = 0; i < res_idx; i++) {
+
+ if(!result[i].pReqPrv) {
+ printk("%s,%d: warning private is NULL\n", __FILE__, __LINE__);
+ break;
+ }
+
+ cesa_ocf_cmd = result[i].pReqPrv;
+ crp = cesa_ocf_cmd->crp;
+
+ // ignore HMAC error.
+ //if(result->retCode)
+ // crp->crp_etype = EIO;
+
+#if defined(CESA_OCF_POLLING)
+ if(!cesa_ocf_cmd->need_cb){
+ cesa_interrupt_polling();
+ }
+#endif
+ if(cesa_ocf_cmd->need_cb) {
+ if(debug) {
+ mvCesaDebugMbuf("DST BUFFER", cesa_ocf_cmd->cesa_cmd.pDst, 0, cesa_ocf_cmd->cesa_cmd.pDst->mbufSize);
+ }
+ crypto_done(crp);
+ }
+ kfree(cesa_ocf_cmd);
+ }
+#ifdef CESA_OCF_TASKLET
+ enable_irq(cesa_device.irq);
+#endif
+
+ cesaTestTraceAdd(3);
+
+ return;
+}
+
+#ifdef CESA_OCF_POLLING
+static void
+cesa_interrupt_polling(void)
+{
+ u32 cause;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ /* Read cause register */
+ do {
+ cause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+ cause &= MV_CESA_CAUSE_ACC_DMA_ALL_MASK;
+
+ } while (cause == 0);
+
+ /* clear interrupts */
+ MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG, 0);
+
+ cesa_callback(0);
+
+ return;
+}
+
+#endif
+
+/*
+ * cesa Interrupt polling routine.
+ */
+static irqreturn_t
+cesa_interrupt_handler(int irq, void *arg)
+{
+ u32 cause;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ cesaTestTraceAdd(0);
+
+ /* Read cause register */
+ cause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+
+ if( (cause & MV_CESA_CAUSE_ACC_DMA_ALL_MASK) == 0)
+ {
+ /* Empty interrupt */
+ dprintk("%s,%d: cesaTestReadyIsr: cause=0x%x\n", __FILE__, __LINE__, cause);
+ return IRQ_HANDLED;
+ }
+
+ /* clear interrupts */
+ MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG, 0);
+
+ cesaTestTraceAdd(1);
+#ifdef CESA_OCF_TASKLET
+ tasklet_hi_schedule(&cesa_ocf_tasklet);
+#else
+ cesa_callback(0);
+#endif
+ return IRQ_HANDLED;
+}
+
+/*
+ * Open a session.
+ */
+static int
+/*cesa_ocf_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)*/
+cesa_ocf_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+ u32 status = 0, i;
+ u32 count = 0, auth = 0, encrypt =0;
+ struct cesa_ocf_data *cesa_ocf_cur_ses;
+ MV_CESA_OPEN_SESSION cesa_session;
+ MV_CESA_OPEN_SESSION *cesa_ses = &cesa_session;
+
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid == NULL || cri == NULL) {
+ printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ /* leave first empty like in other implementations */
+ for (i = 1; i < CESA_OCF_MAX_SES; i++) {
+ if (cesa_ocf_sessions[i] == NULL)
+ break;
+ }
+
+ if(i >= CESA_OCF_MAX_SES) {
+ printk("%s,%d: no more sessions \n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ cesa_ocf_sessions[i] = (struct cesa_ocf_data *) kmalloc(sizeof(struct cesa_ocf_data), GFP_ATOMIC);
+ if (cesa_ocf_sessions[i] == NULL) {
+ cesa_ocf_freesession(NULL, i);
+ printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
+ return ENOBUFS;
+ }
+ dprintk("%s,%d: new session %d \n", __FILE__, __LINE__, i);
+
+ *sid = i;
+ cesa_ocf_cur_ses = cesa_ocf_sessions[i];
+ memset(cesa_ocf_cur_ses, 0, sizeof(struct cesa_ocf_data));
+ cesa_ocf_cur_ses->sid_encrypt = -1;
+ cesa_ocf_cur_ses->sid_decrypt = -1;
+ cesa_ocf_cur_ses->frag_wa_encrypt = -1;
+ cesa_ocf_cur_ses->frag_wa_decrypt = -1;
+ cesa_ocf_cur_ses->frag_wa_auth = -1;
+
+ /* init the session */
+ memset(cesa_ses, 0, sizeof(MV_CESA_OPEN_SESSION));
+ count = 1;
+ while (cri) {
+ if(count > 2) {
+ printk("%s,%d: don't support more then 2 operations\n", __FILE__, __LINE__);
+ goto error;
+ }
+ switch (cri->cri_alg) {
+ case CRYPTO_AES_CBC:
+ dprintk("%s,%d: (%d) AES CBC \n", __FILE__, __LINE__, count);
+ cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+ cesa_ocf_cur_ses->ivlen = MV_CESA_AES_BLOCK_SIZE;
+ cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_AES;
+ cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+ if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+ printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+ goto error;
+ }
+ memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+ dprintk("%s,%d: key length %d \n", __FILE__, __LINE__, cri->cri_klen/8);
+ cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+ encrypt += count;
+ break;
+ case CRYPTO_3DES_CBC:
+ dprintk("%s,%d: (%d) 3DES CBC \n", __FILE__, __LINE__, count);
+ cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+ cesa_ocf_cur_ses->ivlen = MV_CESA_3DES_BLOCK_SIZE;
+ cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_3DES;
+ cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+ if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+ printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+ goto error;
+ }
+ memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+ cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+ encrypt += count;
+ break;
+ case CRYPTO_DES_CBC:
+ dprintk("%s,%d: (%d) DES CBC \n", __FILE__, __LINE__, count);
+ cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+ cesa_ocf_cur_ses->ivlen = MV_CESA_DES_BLOCK_SIZE;
+ cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_DES;
+ cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+ if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+ printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+ goto error;
+ }
+ memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+ cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+ encrypt += count;
+ break;
+ case CRYPTO_MD5:
+ case CRYPTO_MD5_HMAC:
+ dprintk("%s,%d: (%d) %sMD5 CBC \n", __FILE__, __LINE__, count, (cri->cri_alg != CRYPTO_MD5)? "H-":" ");
+ cesa_ocf_cur_ses->auth_alg = cri->cri_alg;
+ cesa_ocf_cur_ses->digestlen = (cri->cri_alg == CRYPTO_MD5)? MV_CESA_MD5_DIGEST_SIZE : 12;
+ cesa_ses->macMode = (cri->cri_alg == CRYPTO_MD5)? MV_CESA_MAC_MD5 : MV_CESA_MAC_HMAC_MD5;
+ if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+ printk("%s,%d: MAC key too long. \n", __FILE__, __LINE__);
+ goto error;
+ }
+ cesa_ses->macKeyLength = cri->cri_klen/8;
+ memcpy(cesa_ses->macKey, cri->cri_key, cri->cri_klen/8);
+ cesa_ses->digestSize = cesa_ocf_cur_ses->digestlen;
+ auth += count;
+ break;
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ dprintk("%s,%d: (%d) %sSHA1 CBC \n", __FILE__, __LINE__, count, (cri->cri_alg != CRYPTO_SHA1)? "H-":" ");
+ cesa_ocf_cur_ses->auth_alg = cri->cri_alg;
+ cesa_ocf_cur_ses->digestlen = (cri->cri_alg == CRYPTO_SHA1)? MV_CESA_SHA1_DIGEST_SIZE : 12;
+ cesa_ses->macMode = (cri->cri_alg == CRYPTO_SHA1)? MV_CESA_MAC_SHA1 : MV_CESA_MAC_HMAC_SHA1;
+ if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+ printk("%s,%d: MAC key too long. \n", __FILE__, __LINE__);
+ goto error;
+ }
+ cesa_ses->macKeyLength = cri->cri_klen/8;
+ memcpy(cesa_ses->macKey, cri->cri_key, cri->cri_klen/8);
+ cesa_ses->digestSize = cesa_ocf_cur_ses->digestlen;
+ auth += count;
+ break;
+ default:
+ printk("%s,%d: unknown algo 0x%x\n", __FILE__, __LINE__, cri->cri_alg);
+ goto error;
+ }
+ cri = cri->cri_next;
+ count++;
+ }
+
+ if((encrypt > 2) || (auth > 2)) {
+ printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+ goto error;
+ }
+ /* create new sessions in HAL */
+ if(encrypt) {
+ cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+ /* encrypt session */
+ if(auth == 1) {
+ cesa_ses->operation = MV_CESA_MAC_THEN_CRYPTO;
+ }
+ else if(auth == 2) {
+ cesa_ses->operation = MV_CESA_CRYPTO_THEN_MAC;
+ cesa_ocf_cur_ses->encrypt_tn_auth = 1;
+ }
+ else {
+ cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+ }
+ cesa_ses->direction = MV_CESA_DIR_ENCODE;
+ status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_encrypt);
+ if(status != MV_OK) {
+ printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+ goto error;
+ }
+ /* decrypt session */
+ if( cesa_ses->operation == MV_CESA_MAC_THEN_CRYPTO ) {
+ cesa_ses->operation = MV_CESA_CRYPTO_THEN_MAC;
+ }
+ else if( cesa_ses->operation == MV_CESA_CRYPTO_THEN_MAC ) {
+ cesa_ses->operation = MV_CESA_MAC_THEN_CRYPTO;
+ }
+ cesa_ses->direction = MV_CESA_DIR_DECODE;
+ status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_decrypt);
+ if(status != MV_OK) {
+ printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+ goto error;
+ }
+
+ /* preapre one action sessions for case we will need to split an action */
+#ifdef CESA_OCF_SPLIT
+ if(( cesa_ses->operation == MV_CESA_MAC_THEN_CRYPTO ) ||
+ ( cesa_ses->operation == MV_CESA_CRYPTO_THEN_MAC )) {
+ /* open one session for encode and one for decode */
+ cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+ cesa_ses->direction = MV_CESA_DIR_ENCODE;
+ status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_encrypt);
+ if(status != MV_OK) {
+ printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+ goto error;
+ }
+
+ cesa_ses->direction = MV_CESA_DIR_DECODE;
+ status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_decrypt);
+ if(status != MV_OK) {
+ printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+ goto error;
+ }
+ /* open one session for auth */
+ cesa_ses->operation = MV_CESA_MAC_ONLY;
+ cesa_ses->direction = MV_CESA_DIR_ENCODE;
+ status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_auth);
+ if(status != MV_OK) {
+ printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+ goto error;
+ }
+ }
+#endif
+ }
+ else { /* only auth */
+ cesa_ses->operation = MV_CESA_MAC_ONLY;
+ cesa_ses->direction = MV_CESA_DIR_ENCODE;
+ status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_encrypt);
+ if(status != MV_OK) {
+ printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ cesa_ocf_freesession(NULL, *sid);
+ return EINVAL;
+
+}
+
+
+/*
+ * Free a session.
+ */
+static int
+cesa_ocf_freesession(device_t dev, u_int64_t tid)
+{
+ struct cesa_ocf_data *cesa_ocf_cur_ses;
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+ //unsigned long flags;
+
+ dprintk("%s() %d \n", __FUNCTION__, sid);
+ if ( (sid >= CESA_OCF_MAX_SES) || (cesa_ocf_sessions[sid] == NULL) ) {
+ printk("%s,%d: EINVAL can't free session %d \n", __FILE__, __LINE__, sid);
+ return(EINVAL);
+ }
+
+ /* Silently accept and return */
+ if (sid == 0)
+ return(0);
+
+ /* release session from HAL */
+ cesa_ocf_cur_ses = cesa_ocf_sessions[sid];
+ if (cesa_ocf_cur_ses->sid_encrypt != -1) {
+ mvCesaSessionClose(cesa_ocf_cur_ses->sid_encrypt);
+ }
+ if (cesa_ocf_cur_ses->sid_decrypt != -1) {
+ mvCesaSessionClose(cesa_ocf_cur_ses->sid_decrypt);
+ }
+ if (cesa_ocf_cur_ses->frag_wa_encrypt != -1) {
+ mvCesaSessionClose(cesa_ocf_cur_ses->frag_wa_encrypt);
+ }
+ if (cesa_ocf_cur_ses->frag_wa_decrypt != -1) {
+ mvCesaSessionClose(cesa_ocf_cur_ses->frag_wa_decrypt);
+ }
+ if (cesa_ocf_cur_ses->frag_wa_auth != -1) {
+ mvCesaSessionClose(cesa_ocf_cur_ses->frag_wa_auth);
+ }
+
+ kfree(cesa_ocf_cur_ses);
+ cesa_ocf_sessions[sid] = NULL;
+
+ return 0;
+}
+
+
+/* TDMA Window setup */
+
+static void __init
+setup_tdma_mbus_windows(struct cesa_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ writel(0, dev->reg + WINDOW_BASE(i));
+ writel(0, dev->reg + WINDOW_CTRL(i));
+ }
+
+ for (i = 0; i < dev->plat_data->dram->num_cs; i++) {
+ struct mbus_dram_window *cs = dev->plat_data->dram->cs + i;
+ writel(
+ ((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dev->plat_data->dram->mbus_dram_target_id << 4) | 1,
+ dev->reg + WINDOW_CTRL(i)
+ );
+ writel(cs->base, dev->reg + WINDOW_BASE(i));
+ }
+}
+
+/*
+ * our driver startup and shutdown routines
+ */
+static int
+mv_cesa_ocf_init(struct platform_device *pdev)
+{
+#if defined(CONFIG_MV78200) || defined(CONFIG_MV632X)
+ if (MV_FALSE == mvSocUnitIsMappedToThisCpu(CESA))
+ {
+ dprintk("CESA is not mapped to this CPU\n");
+ return -ENODEV;
+ }
+#endif
+
+ dprintk("%s\n", __FUNCTION__);
+ memset(&mv_cesa_dev, 0, sizeof(mv_cesa_dev));
+ softc_device_init(&mv_cesa_dev, "MV CESA", 0, mv_cesa_methods);
+ cesa_ocf_id = crypto_get_driverid(softc_get_device(&mv_cesa_dev),CRYPTOCAP_F_HARDWARE);
+
+ if (cesa_ocf_id < 0)
+ panic("MV CESA crypto device cannot initialize!");
+
+ dprintk("%s,%d: cesa ocf device id is %d \n", __FILE__, __LINE__, cesa_ocf_id);
+
+ /* CESA unit is auto power on off */
+#if 0
+ if (MV_FALSE == mvCtrlPwrClckGet(CESA_UNIT_ID,0))
+ {
+ printk("\nWarning CESA %d is Powered Off\n",0);
+ return EINVAL;
+ }
+#endif
+
+ memset(&cesa_device, 0, sizeof(struct cesa_dev));
+ /* Get the IRQ, and crypto memory regions */
+ {
+ struct resource *res;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+
+ if (!res)
+ return -ENXIO;
+
+ cesa_device.sram = ioremap(res->start, res->end - res->start + 1);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+
+ if (!res) {
+ iounmap(cesa_device.sram);
+ return -ENXIO;
+ }
+ cesa_device.reg = ioremap(res->start, res->end - res->start + 1);
+ cesa_device.irq = platform_get_irq(pdev, 0);
+ cesa_device.plat_data = pdev->dev.platform_data;
+ setup_tdma_mbus_windows(&cesa_device);
+
+ }
+
+
+ if( MV_OK != mvCesaInit(CESA_OCF_MAX_SES*5, CESA_Q_SIZE, cesa_device.reg,
+ NULL) ) {
+ printk("%s,%d: mvCesaInit Failed. \n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ /* clear and unmask Int */
+ MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+#ifndef CESA_OCF_POLLING
+ MV_REG_WRITE( MV_CESA_ISR_MASK_REG, MV_CESA_CAUSE_ACC_DMA_MASK);
+#endif
+#ifdef CESA_OCF_TASKLET
+ tasklet_init(&cesa_ocf_tasklet, cesa_callback, (unsigned int) 0);
+#endif
+ /* register interrupt */
+ if( request_irq( cesa_device.irq, cesa_interrupt_handler,
+ (IRQF_DISABLED) , "cesa", &cesa_ocf_id) < 0) {
+ printk("%s,%d: cannot assign irq %x\n", __FILE__, __LINE__, cesa_device.reg);
+ return EINVAL;
+ }
+
+
+ memset(cesa_ocf_sessions, 0, sizeof(struct cesa_ocf_data *) * CESA_OCF_MAX_SES);
+
+#define REGISTER(alg) \
+ crypto_register(cesa_ocf_id, alg, 0,0)
+ REGISTER(CRYPTO_AES_CBC);
+ REGISTER(CRYPTO_DES_CBC);
+ REGISTER(CRYPTO_3DES_CBC);
+ REGISTER(CRYPTO_MD5);
+ REGISTER(CRYPTO_MD5_HMAC);
+ REGISTER(CRYPTO_SHA1);
+ REGISTER(CRYPTO_SHA1_HMAC);
+#undef REGISTER
+
+ return 0;
+}
+
+static void
+mv_cesa_ocf_exit(struct platform_device *pdev)
+{
+ dprintk("%s()\n", __FUNCTION__);
+
+ crypto_unregister_all(cesa_ocf_id);
+ cesa_ocf_id = -1;
+ iounmap(cesa_device.reg);
+ iounmap(cesa_device.sram);
+ free_irq(cesa_device.irq, NULL);
+
+ /* mask and clear Int */
+ MV_REG_WRITE( MV_CESA_ISR_MASK_REG, 0);
+ MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+
+
+ if( MV_OK != mvCesaFinish() ) {
+ printk("%s,%d: mvCesaFinish Failed. \n", __FILE__, __LINE__);
+ return;
+ }
+}
+
+
+void cesa_ocf_debug(void)
+{
+
+#ifdef CESA_OCF_TRACE_DEBUG
+ {
+ int i, j;
+ j = cesaTestTraceIdx;
+ mvOsPrintf("No Type rCause iCause Proc Isr Res Time pReady pProc pEmpty\n");
+ for(i=0; i<MV_CESA_TEST_TRACE_SIZE; i++)
+ {
+ mvOsPrintf("%02d. %d 0x%04x 0x%04x 0x%02x 0x%02x %02d 0x%06x %p %p %p\n",
+ j, cesaTestTrace[j].type, cesaTestTrace[j].realCause,
+ cesaTestTrace[j].idmaCause,
+ cesaTestTrace[j].resources, cesaTestTrace[j].timeStamp,
+ cesaTestTrace[j].pReqReady, cesaTestTrace[j].pReqProcess, cesaTestTrace[j].pReqEmpty);
+ j++;
+ if(j == MV_CESA_TEST_TRACE_SIZE)
+ j = 0;
+ }
+ }
+#endif
+
+}
+
+static struct platform_driver marvell_cesa = {
+ .probe = mv_cesa_ocf_init,
+ .remove = mv_cesa_ocf_exit,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mv_crypto",
+ },
+};
+
+MODULE_ALIAS("platform:mv_crypto");
+
+static int __init mv_cesa_init(void)
+{
+ return platform_driver_register(&marvell_cesa);
+}
+
+module_init(mv_cesa_init);
+
+static void __exit mv_cesa_exit(void)
+{
+ platform_driver_unregister(&marvell_cesa);
+}
+
+module_exit(mv_cesa_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ronen Shitrit");
+MODULE_DESCRIPTION("OCF module for Orion CESA crypto");
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mv802_3.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mv802_3.h
new file mode 100644
index 000000000..3769dde2f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mv802_3.h
@@ -0,0 +1,213 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmv802_3h
+#define __INCmv802_3h
+
+
+/* includes */
+#include "mvTypes.h"
+
+/* Defines */
+#define MV_MAX_ETH_DATA 1500
+
+/* 802.3 types */
+#define MV_IP_TYPE 0x0800
+#define MV_IP_ARP_TYPE 0x0806
+#define MV_APPLE_TALK_ARP_TYPE 0x80F3
+#define MV_NOVELL_IPX_TYPE 0x8137
+#define MV_EAPOL_TYPE 0x888e
+
+
+
+/* Encapsulation header for RFC1042 and Ethernet_tunnel */
+
+#define MV_RFC1042_SNAP_HEADER {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00}
+
+#define MV_ETH_SNAP_LSB 0xF8
+
+
+#define MV_MAC_ADDR_SIZE (6)
+#define MV_MAC_STR_SIZE (20)
+#define MV_VLAN_HLEN (4)
+
+/* This macro checks for a multicast mac address */
+#define MV_IS_MULTICAST_MAC(mac) (((mac)[0] & 0x1) == 1)
+
+
+/* This macro checks for an broadcast mac address */
+#define MV_IS_BROADCAST_MAC(mac) \
+ (((mac)[0] == 0xFF) && \
+ ((mac)[1] == 0xFF) && \
+ ((mac)[2] == 0xFF) && \
+ ((mac)[3] == 0xFF) && \
+ ((mac)[4] == 0xFF) && \
+ ((mac)[5] == 0xFF))
+
+
+/* Typedefs */
+typedef struct
+{
+ MV_U8 pDA[MV_MAC_ADDR_SIZE];
+ MV_U8 pSA[MV_MAC_ADDR_SIZE];
+ MV_U16 typeOrLen;
+
+} MV_802_3_HEADER;
+
+enum {
+ MV_IP_PROTO_NULL = 0, /* Dummy protocol for TCP */
+ MV_IP_PROTO_ICMP = 1, /* Internet Control Message Protocol */
+ MV_IP_PROTO_IGMP = 2, /* Internet Group Management Protocol */
+ MV_IP_PROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */
+ MV_IP_PROTO_TCP = 6, /* Transmission Control Protocol */
+ MV_IP_PROTO_EGP = 8, /* Exterior Gateway Protocol */
+ MV_IP_PROTO_PUP = 12, /* PUP protocol */
+ MV_IP_PROTO_UDP = 17, /* User Datagram Protocol */
+ MV_IP_PROTO_IDP = 22, /* XNS IDP protocol */
+ MV_IP_PROTO_DCCP = 33, /* Datagram Congestion Control Protocol */
+ MV_IP_PROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */
+ MV_IP_PROTO_RSVP = 46, /* RSVP protocol */
+ MV_IP_PROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */
+ MV_IP_PROTO_ESP = 50, /* Encapsulation Security Payload protocol */
+ MV_IP_PROTO_AH = 51, /* Authentication Header protocol */
+ MV_IP_PROTO_BEETPH = 94, /* IP option pseudo header for BEET */
+ MV_IP_PROTO_PIM = 103,
+ MV_IP_PROTO_COMP = 108, /* Compression Header protocol */
+ MV_IP_PROTO_ZERO_HOP = 114, /* Any 0 hop protocol (IANA) */
+ MV_IP_PROTO_SCTP = 132, /* Stream Control Transport Protocol */
+ MV_IP_PROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
+
+ MV_IP_PROTO_RAW = 255, /* Raw IP packets */
+ MV_IP_PROTO_MAX
+};
+
+typedef struct
+{
+ MV_U8 version;
+ MV_U8 tos;
+ MV_U16 totalLength;
+ MV_U16 identifier;
+ MV_U16 fragmentCtrl;
+ MV_U8 ttl;
+ MV_U8 protocol;
+ MV_U16 checksum;
+ MV_U32 srcIP;
+ MV_U32 dstIP;
+
+} MV_IP_HEADER;
+
+typedef struct
+{
+ MV_U32 spi;
+ MV_U32 seqNum;
+} MV_ESP_HEADER;
+
+#define MV_ICMP_ECHOREPLY 0 /* Echo Reply */
+#define MV_ICMP_DEST_UNREACH 3 /* Destination Unreachable */
+#define MV_ICMP_SOURCE_QUENCH 4 /* Source Quench */
+#define MV_ICMP_REDIRECT 5 /* Redirect (change route) */
+#define MV_ICMP_ECHO 8 /* Echo Request */
+#define MV_ICMP_TIME_EXCEEDED 11 /* Time Exceeded */
+#define MV_ICMP_PARAMETERPROB 12 /* Parameter Problem */
+#define MV_ICMP_TIMESTAMP 13 /* Timestamp Request */
+#define MV_ICMP_TIMESTAMPREPLY 14 /* Timestamp Reply */
+#define MV_ICMP_INFO_REQUEST 15 /* Information Request */
+#define MV_ICMP_INFO_REPLY 16 /* Information Reply */
+#define MV_ICMP_ADDRESS 17 /* Address Mask Request */
+#define MV_ICMP_ADDRESSREPLY 18 /* Address Mask Reply */
+
+typedef struct
+{
+ MV_U8 type;
+ MV_U8 code;
+ MV_U16 checksum;
+ MV_U16 id;
+ MV_U16 sequence;
+
+} MV_ICMP_ECHO_HEADER;
+
+typedef struct
+{
+ MV_U16 source;
+ MV_U16 dest;
+ MV_U32 seq;
+ MV_U32 ack_seq;
+ MV_U16 flags;
+ MV_U16 window;
+ MV_U16 chksum;
+ MV_U16 urg_offset;
+
+} MV_TCP_HEADER;
+
+typedef struct
+{
+ MV_U16 source;
+ MV_U16 dest;
+ MV_U16 len;
+ MV_U16 check;
+
+} MV_UDP_HEADER;
+
+#endif /* __INCmv802_3h */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCommon.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCommon.c
new file mode 100644
index 000000000..dc0e0cfc4
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCommon.c
@@ -0,0 +1,277 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mv802_3.h"
+#include "mvCommon.h"
+
+
+/*******************************************************************************
+* mvMacStrToHex - Convert MAC format string to hex.
+*
+* DESCRIPTION:
+* This function convert MAC format string to hex.
+*
+* INPUT:
+* macStr - MAC address string. Fornat of address string is
+* uu:vv:ww:xx:yy:zz, where ":" can be any delimiter.
+*
+* OUTPUT:
+* macHex - MAC in hex format.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvMacStrToHex(const char* macStr, MV_U8* macHex)
+{
+ int i;
+ char tmp[3];
+
+ for(i = 0; i < MV_MAC_ADDR_SIZE; i++)
+ {
+ tmp[0] = macStr[(i * 3) + 0];
+ tmp[1] = macStr[(i * 3) + 1];
+ tmp[2] = '\0';
+ macHex[i] = (MV_U8) (strtol(tmp, NULL, 16));
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvMacHexToStr - Convert MAC in hex format to string format.
+*
+* DESCRIPTION:
+* This function convert MAC in hex format to string format.
+*
+* INPUT:
+* macHex - MAC in hex format.
+*
+* OUTPUT:
+* macStr - MAC address string. String format is uu:vv:ww:xx:yy:zz.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvMacHexToStr(MV_U8* macHex, char* macStr)
+{
+ int i;
+
+ for(i = 0; i < MV_MAC_ADDR_SIZE; i++)
+ {
+ mvOsSPrintf(&macStr[i * 3], "%02x:", macHex[i]);
+ }
+ macStr[(i * 3) - 1] = '\0';
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSizePrint - Print the given size with size unit description.
+*
+* DESCRIPTION:
+* This function print the given size with size unit description.
+* FOr example when size paramter is 0x180000, the function prints:
+* "size 1MB+500KB"
+*
+* INPUT:
+* size - Size in bytes.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvSizePrint(MV_U32 size)
+{
+ mvOsOutput("size ");
+
+ if(size >= _1G)
+ {
+ mvOsOutput("%3dGB ", size / _1G);
+ size %= _1G;
+ if(size)
+ mvOsOutput("+");
+ }
+ if(size >= _1M )
+ {
+ mvOsOutput("%3dMB ", size / _1M);
+ size %= _1M;
+ if(size)
+ mvOsOutput("+");
+ }
+ if(size >= _1K)
+ {
+ mvOsOutput("%3dKB ", size / _1K);
+ size %= _1K;
+ if(size)
+ mvOsOutput("+");
+ }
+ if(size > 0)
+ {
+ mvOsOutput("%3dB ", size);
+ }
+}
+
+/*******************************************************************************
+* mvHexToBin - Convert hex to binary
+*
+* DESCRIPTION:
+* This function Convert hex to binary.
+*
+* INPUT:
+* pHexStr - hex buffer pointer.
+* size - Size to convert.
+*
+* OUTPUT:
+* pBin - Binary buffer pointer.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvHexToBin(const char* pHexStr, MV_U8* pBin, int size)
+{
+ int j, i;
+ char tmp[3];
+ MV_U8 byte;
+
+ for(j=0, i=0; j<size; j++, i+=2)
+ {
+ tmp[0] = pHexStr[i];
+ tmp[1] = pHexStr[i+1];
+ tmp[2] = '\0';
+ byte = (MV_U8) (strtol(tmp, NULL, 16) & 0xFF);
+ pBin[j] = byte;
+ }
+}
+
+void mvAsciiToHex(const char* asciiStr, char* hexStr)
+{
+ int i=0;
+
+ while(asciiStr[i] != 0)
+ {
+ mvOsSPrintf(&hexStr[i*2], "%02x", asciiStr[i]);
+ i++;
+ }
+ hexStr[i*2] = 0;
+}
+
+
+void mvBinToHex(const MV_U8* bin, char* hexStr, int size)
+{
+ int i;
+
+ for(i=0; i<size; i++)
+ {
+ mvOsSPrintf(&hexStr[i*2], "%02x", bin[i]);
+ }
+ hexStr[i*2] = '\0';
+}
+
+void mvBinToAscii(const MV_U8* bin, char* asciiStr, int size)
+{
+ int i;
+
+ for(i=0; i<size; i++)
+ {
+ mvOsSPrintf(&asciiStr[i*2], "%c", bin[i]);
+ }
+ asciiStr[i*2] = '\0';
+}
+
+/*******************************************************************************
+* mvLog2 -
+*
+* DESCRIPTION:
+* Calculate the Log2 of a given number.
+*
+* INPUT:
+* num - A number to calculate the Log2 for.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Log 2 of the input number, or 0xFFFFFFFF if input is 0.
+*
+*******************************************************************************/
+MV_U32 mvLog2(MV_U32 num)
+{
+ MV_U32 result = 0;
+ if(num == 0)
+ return 0xFFFFFFFF;
+ while(num != 1)
+ {
+ num = num >> 1;
+ result++;
+ }
+ return result;
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCommon.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCommon.h
new file mode 100644
index 000000000..c8e9ce100
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCommon.h
@@ -0,0 +1,308 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+
+#ifndef __INCmvCommonh
+#define __INCmvCommonh
+
+#include "mvTypes.h"
+
+/* Swap tool */
+
+/* 16bit nibble swap. For example 0x1234 -> 0x2143 */
+#define MV_NIBBLE_SWAP_16BIT(X) (((X&0xf) << 4) | \
+ ((X&0xf0) >> 4) | \
+ ((X&0xf00) << 4) | \
+ ((X&0xf000) >> 4))
+
+/* 32bit nibble swap. For example 0x12345678 -> 0x21436587 */
+#define MV_NIBBLE_SWAP_32BIT(X) (((X&0xf) << 4) | \
+ ((X&0xf0) >> 4) | \
+ ((X&0xf00) << 4) | \
+ ((X&0xf000) >> 4) | \
+ ((X&0xf0000) << 4) | \
+ ((X&0xf00000) >> 4) | \
+ ((X&0xf000000) << 4) | \
+ ((X&0xf0000000) >> 4))
+
+/* 16bit byte swap. For example 0x1122 -> 0x2211 */
+#define MV_BYTE_SWAP_16BIT(X) ((((X)&0xff)<<8) | (((X)&0xff00)>>8))
+
+/* 32bit byte swap. For example 0x11223344 -> 0x44332211 */
+#define MV_BYTE_SWAP_32BIT(X) ((((X)&0xff)<<24) | \
+ (((X)&0xff00)<<8) | \
+ (((X)&0xff0000)>>8) | \
+ (((X)&0xff000000)>>24))
+
+/* 64bit byte swap. For example 0x11223344.55667788 -> 0x88776655.44332211 */
+#define MV_BYTE_SWAP_64BIT(X) ((l64) ((((X)&0xffULL)<<56) | \
+ (((X)&0xff00ULL)<<40) | \
+ (((X)&0xff0000ULL)<<24) | \
+ (((X)&0xff000000ULL)<<8) | \
+ (((X)&0xff00000000ULL)>>8) | \
+ (((X)&0xff0000000000ULL)>>24) | \
+ (((X)&0xff000000000000ULL)>>40) | \
+ (((X)&0xff00000000000000ULL)>>56)))
+
+/* Endianess macros. */
+#if defined(MV_CPU_LE)
+ #define MV_16BIT_LE(X) (X)
+ #define MV_32BIT_LE(X) (X)
+ #define MV_64BIT_LE(X) (X)
+ #define MV_16BIT_BE(X) MV_BYTE_SWAP_16BIT(X)
+ #define MV_32BIT_BE(X) MV_BYTE_SWAP_32BIT(X)
+ #define MV_64BIT_BE(X) MV_BYTE_SWAP_64BIT(X)
+#elif defined(MV_CPU_BE)
+ #define MV_16BIT_LE(X) MV_BYTE_SWAP_16BIT(X)
+ #define MV_32BIT_LE(X) MV_BYTE_SWAP_32BIT(X)
+ #define MV_64BIT_LE(X) MV_BYTE_SWAP_64BIT(X)
+ #define MV_16BIT_BE(X) (X)
+ #define MV_32BIT_BE(X) (X)
+ #define MV_64BIT_BE(X) (X)
+#else
+ #error "CPU endianess isn't defined!\n"
+#endif
+
+
+/* Bit field definitions */
+#define NO_BIT 0x00000000
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+/* Handy sizes */
+#define _1K 0x00000400
+#define _2K 0x00000800
+#define _4K 0x00001000
+#define _8K 0x00002000
+#define _16K 0x00004000
+#define _32K 0x00008000
+#define _64K 0x00010000
+#define _128K 0x00020000
+#define _256K 0x00040000
+#define _512K 0x00080000
+
+#define _1M 0x00100000
+#define _2M 0x00200000
+#define _4M 0x00400000
+#define _8M 0x00800000
+#define _16M 0x01000000
+#define _32M 0x02000000
+#define _64M 0x04000000
+#define _128M 0x08000000
+#define _256M 0x10000000
+#define _512M 0x20000000
+
+#define _1G 0x40000000
+#define _2G 0x80000000
+
+/* Tclock and Sys clock define */
+#define _100MHz 100000000
+#define _125MHz 125000000
+#define _133MHz 133333334
+#define _150MHz 150000000
+#define _160MHz 160000000
+#define _166MHz 166666667
+#define _175MHz 175000000
+#define _178MHz 178000000
+#define _183MHz 183333334
+#define _187MHz 187000000
+#define _192MHz 192000000
+#define _194MHz 194000000
+#define _200MHz 200000000
+#define _233MHz 233333334
+#define _250MHz 250000000
+#define _266MHz 266666667
+#define _300MHz 300000000
+
+/* For better address window table readability */
+#define EN MV_TRUE
+#define DIS MV_FALSE
+#define N_A -1 /* Not applicable */
+
+/* Cache configuration options for memory (DRAM, SRAM, ... ) */
+
+/* Memory uncached, HW or SW cache coherency is not needed */
+#define MV_UNCACHED 0
+/* Memory cached, HW cache coherency supported in WriteThrough mode */
+#define MV_CACHE_COHER_HW_WT 1
+/* Memory cached, HW cache coherency supported in WriteBack mode */
+#define MV_CACHE_COHER_HW_WB 2
+/* Memory cached, No HW cache coherency, Cache coherency must be in SW */
+#define MV_CACHE_COHER_SW 3
+
+
+/* Macro for testing aligment. Positive if number is NOT aligned */
+#define MV_IS_NOT_ALIGN(number, align) ((number) & ((align) - 1))
+
+/* Macro for alignment up. For example, MV_ALIGN_UP(0x0330, 0x20) = 0x0340 */
+#define MV_ALIGN_UP(number, align) \
+(((number) & ((align) - 1)) ? (((number) + (align)) & ~((align)-1)) : (number))
+
+/* Macro for alignment down. For example, MV_ALIGN_UP(0x0330, 0x20) = 0x0320 */
+#define MV_ALIGN_DOWN(number, align) ((number) & ~((align)-1))
+
+/* This macro returns absolute value */
+#define MV_ABS(number) (((int)(number) < 0) ? -(int)(number) : (int)(number))
+
+
+/* Bit fields manipulation macros */
+
+/* An integer word which its 'x' bit is set */
+#define MV_BIT_MASK(bitNum) (1 << (bitNum) )
+
+/* Checks wheter bit 'x' in integer word is set */
+#define MV_BIT_CHECK(word, bitNum) ( (word) & MV_BIT_MASK(bitNum) )
+
+/* Clear (reset) bit 'x' in integer word (RMW - Read-Modify-Write) */
+#define MV_BIT_CLEAR(word, bitNum) ( (word) &= ~(MV_BIT_MASK(bitNum)) )
+
+/* Set bit 'x' in integer word (RMW) */
+#define MV_BIT_SET(word, bitNum) ( (word) |= MV_BIT_MASK(bitNum) )
+
+/* Invert bit 'x' in integer word (RMW) */
+#define MV_BIT_INV(word, bitNum) ( (word) ^= MV_BIT_MASK(bitNum) )
+
+/* Get the min between 'a' or 'b' */
+#define MV_MIN(a,b) (((a) < (b)) ? (a) : (b))
+
+/* Get the max between 'a' or 'b' */
+#define MV_MAX(a,b) (((a) < (b)) ? (b) : (a))
+
+/* Temporary */
+#define mvOsDivide(num, div) \
+({ \
+ int i=0, rem=(num); \
+ \
+ while(rem >= (div)) \
+ { \
+ rem -= (div); \
+ i++; \
+ } \
+ (i); \
+})
+
+/* Temporary */
+#define mvOsReminder(num, div) \
+({ \
+ int rem = (num); \
+ \
+ while(rem >= (div)) \
+ rem -= (div); \
+ (rem); \
+})
+
+#define MV_IP_QUAD(ipAddr) ((ipAddr >> 24) & 0xFF), ((ipAddr >> 16) & 0xFF), \
+ ((ipAddr >> 8) & 0xFF), ((ipAddr >> 0) & 0xFF)
+
+#define MV_IS_POWER_OF_2(num) ((num != 0) && ((num & (num - 1)) == 0))
+
+#ifndef MV_ASMLANGUAGE
+/* mvCommon API list */
+
+MV_VOID mvHexToBin(const char* pHexStr, MV_U8* pBin, int size);
+void mvAsciiToHex(const char* asciiStr, char* hexStr);
+void mvBinToHex(const MV_U8* bin, char* hexStr, int size);
+void mvBinToAscii(const MV_U8* bin, char* asciiStr, int size);
+
+MV_STATUS mvMacStrToHex(const char* macStr, MV_U8* macHex);
+MV_STATUS mvMacHexToStr(MV_U8* macHex, char* macStr);
+void mvSizePrint(MV_U32);
+
+MV_U32 mvLog2(MV_U32 num);
+
+#endif /* MV_ASMLANGUAGE */
+
+
+#endif /* __INCmvCommonh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCompVer.txt
new file mode 100644
index 000000000..38a926440
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.4
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDebug.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDebug.c
new file mode 100644
index 000000000..087f36d32
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDebug.c
@@ -0,0 +1,326 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+
+/* includes */
+#include "mvOs.h"
+#include "mv802_3.h"
+#include "mvCommon.h"
+#include "mvDebug.h"
+
+/* Global variables effect on behave MV_DEBUG_PRINT and MV_DEBUG_CODE macros
+ * mvDebug - map of bits (one for each module) bit=1 means enable
+ * debug code and messages for this module
+ * mvModuleDebug - array of 32 bits varables one for each module
+ */
+MV_U32 mvDebug = 0;
+MV_U32 mvDebugModules[MV_MODULE_MAX];
+
+/* Init mvModuleDebug array to default values */
+void mvDebugInit(void)
+{
+ int bit;
+
+ mvDebug = 0;
+ for(bit=0; bit<MV_MODULE_MAX; bit++)
+ {
+ mvDebugModules[bit] = MV_DEBUG_FLAG_ERR | MV_DEBUG_FLAG_STATS;
+ mvDebug |= MV_BIT_MASK(bit);
+ }
+}
+
+void mvDebugModuleEnable(MV_MODULE_ID module, MV_BOOL isEnable)
+{
+ if (isEnable)
+ {
+ MV_BIT_SET(mvDebug, module);
+ }
+ else
+ MV_BIT_CLEAR(mvDebug, module);
+}
+
+void mvDebugModuleSetFlags(MV_MODULE_ID module, MV_U32 flags)
+{
+ mvDebugModules[module] |= flags;
+}
+
+void mvDebugModuleClearFlags(MV_MODULE_ID module, MV_U32 flags)
+{
+ mvDebugModules[module] &= ~flags;
+}
+
+/* Dump memory in specific format:
+ * address: X1X1X1X1 X2X2X2X2 ... X8X8X8X8
+ */
+void mvDebugMemDump(void* addr, int size, int access)
+{
+ int i, j;
+ MV_U32 memAddr = (MV_U32)addr;
+
+ if(access == 0)
+ access = 1;
+
+ if( (access != 4) && (access != 2) && (access != 1) )
+ {
+ mvOsPrintf("%d wrong access size. Access must be 1 or 2 or 4\n",
+ access);
+ return;
+ }
+ memAddr = MV_ALIGN_DOWN( (unsigned int)addr, 4);
+ size = MV_ALIGN_UP(size, 4);
+ addr = (void*)MV_ALIGN_DOWN( (unsigned int)addr, access);
+ while(size > 0)
+ {
+ mvOsPrintf("%08x: ", memAddr);
+ i = 0;
+ /* 32 bytes in the line */
+ while(i < 32)
+ {
+ if(memAddr >= (MV_U32)addr)
+ {
+ switch(access)
+ {
+ case 1:
+ if( memAddr == CPU_PHY_MEM(memAddr) )
+ {
+ mvOsPrintf("%02x ", MV_MEMIO8_READ(memAddr));
+ }
+ else
+ {
+ mvOsPrintf("%02x ", *((MV_U8*)memAddr));
+ }
+ break;
+
+ case 2:
+ if( memAddr == CPU_PHY_MEM(memAddr) )
+ {
+ mvOsPrintf("%04x ", MV_MEMIO16_READ(memAddr));
+ }
+ else
+ {
+ mvOsPrintf("%04x ", *((MV_U16*)memAddr));
+ }
+ break;
+
+ case 4:
+ if( memAddr == CPU_PHY_MEM(memAddr) )
+ {
+ mvOsPrintf("%08x ", MV_MEMIO32_READ(memAddr));
+ }
+ else
+ {
+ mvOsPrintf("%08x ", *((MV_U32*)memAddr));
+ }
+ break;
+ }
+ }
+ else
+ {
+ for(j=0; j<(access*2+1); j++)
+ mvOsPrintf(" ");
+ }
+ i += access;
+ memAddr += access;
+ size -= access;
+ if(size <= 0)
+ break;
+ }
+ mvOsPrintf("\n");
+ }
+}
+
+void mvDebugPrintBufInfo(BUF_INFO* pBufInfo, int size, int access)
+{
+ if(pBufInfo == NULL)
+ {
+ mvOsPrintf("\n!!! pBufInfo = NULL\n");
+ return;
+ }
+ mvOsPrintf("\n*** pBufInfo=0x%x, cmdSts=0x%08x, pBuf=0x%x, bufSize=%d\n",
+ (unsigned int)pBufInfo,
+ (unsigned int)pBufInfo->cmdSts,
+ (unsigned int)pBufInfo->pBuff,
+ (unsigned int)pBufInfo->bufSize);
+ mvOsPrintf("pData=0x%x, byteCnt=%d, pNext=0x%x, uInfo1=0x%x, uInfo2=0x%x\n",
+ (unsigned int)pBufInfo->pData,
+ (unsigned int)pBufInfo->byteCnt,
+ (unsigned int)pBufInfo->pNextBufInfo,
+ (unsigned int)pBufInfo->userInfo1,
+ (unsigned int)pBufInfo->userInfo2);
+ if(pBufInfo->pData != NULL)
+ {
+ if(size > pBufInfo->byteCnt)
+ size = pBufInfo->byteCnt;
+ mvDebugMemDump(pBufInfo->pData, size, access);
+ }
+}
+
+void mvDebugPrintPktInfo(MV_PKT_INFO* pPktInfo, int size, int access)
+{
+ int frag, len;
+
+ if(pPktInfo == NULL)
+ {
+ mvOsPrintf("\n!!! pPktInfo = NULL\n");
+ return;
+ }
+ mvOsPrintf("\npPkt=%p, stat=0x%08x, numFr=%d, size=%d, pFr=%p, osInfo=0x%lx\n",
+ pPktInfo, pPktInfo->status, pPktInfo->numFrags, pPktInfo->pktSize,
+ pPktInfo->pFrags, pPktInfo->osInfo);
+
+ for(frag=0; frag<pPktInfo->numFrags; frag++)
+ {
+ mvOsPrintf("#%2d. bufVirt=%p, bufSize=%d\n",
+ frag, pPktInfo->pFrags[frag].bufVirtPtr,
+ pPktInfo->pFrags[frag].bufSize);
+ if(size > 0)
+ {
+ len = MV_MIN((int)pPktInfo->pFrags[frag].bufSize, size);
+ mvDebugMemDump(pPktInfo->pFrags[frag].bufVirtPtr, len, access);
+ size -= len;
+ }
+ }
+
+}
+
+void mvDebugPrintIpAddr(MV_U32 ipAddr)
+{
+ mvOsPrintf("%d.%d.%d.%d", ((ipAddr >> 24) & 0xFF), ((ipAddr >> 16) & 0xFF),
+ ((ipAddr >> 8) & 0xFF), ((ipAddr >> 0) & 0xFF));
+}
+
+void mvDebugPrintMacAddr(const MV_U8* pMacAddr)
+{
+ int i;
+
+ mvOsPrintf("%02x", (unsigned int)pMacAddr[0]);
+ for(i=1; i<MV_MAC_ADDR_SIZE; i++)
+ {
+ mvOsPrintf(":%02x", pMacAddr[i]);
+ }
+ /* mvOsPrintf("\n");*/
+}
+
+
+/******* There are three functions deals with MV_DEBUG_TIMES structure ********/
+
+/* Reset MV_DEBUG_TIMES entry */
+void mvDebugResetTimeEntry(MV_DEBUG_TIMES* pTimeEntry, int count, char* pName)
+{
+ pTimeEntry->begin = 0;
+ pTimeEntry->count = count;
+ pTimeEntry->end = 0;
+ pTimeEntry->left = pTimeEntry->count;
+ pTimeEntry->total = 0;
+ pTimeEntry->min = 0xFFFFFFFF;
+ pTimeEntry->max = 0x0;
+ strncpy(pTimeEntry->name, pName, sizeof(pTimeEntry->name)-1);
+ pTimeEntry->name[sizeof(pTimeEntry->name)-1] = '\0';
+}
+
+/* Print out MV_DEBUG_TIMES entry */
+void mvDebugPrintTimeEntry(MV_DEBUG_TIMES* pTimeEntry, MV_BOOL isTitle)
+{
+ int num;
+
+ if(isTitle == MV_TRUE)
+ mvOsPrintf("Event NumOfEvents TotalTime Average Min Max\n");
+
+ num = pTimeEntry->count-pTimeEntry->left;
+ if(num > 0)
+ {
+ mvOsPrintf("%-11s %6u 0x%08lx %6lu %6lu %6lu\n",
+ pTimeEntry->name, num, pTimeEntry->total, pTimeEntry->total/num,
+ pTimeEntry->min, pTimeEntry->max);
+ }
+}
+
+/* Update MV_DEBUG_TIMES entry */
+void mvDebugUpdateTimeEntry(MV_DEBUG_TIMES* pTimeEntry)
+{
+ MV_U32 delta;
+
+ if(pTimeEntry->left > 0)
+ {
+ if(pTimeEntry->end <= pTimeEntry->begin)
+ {
+ delta = pTimeEntry->begin - pTimeEntry->end;
+ }
+ else
+ {
+ delta = ((MV_U32)0x10000 - pTimeEntry->end) + pTimeEntry->begin;
+ }
+ pTimeEntry->total += delta;
+
+ if(delta < pTimeEntry->min)
+ pTimeEntry->min = delta;
+
+ if(delta > pTimeEntry->max)
+ pTimeEntry->max = delta;
+
+ pTimeEntry->left--;
+ }
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDebug.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDebug.h
new file mode 100644
index 000000000..e4975bed5
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDebug.h
@@ -0,0 +1,178 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+
+#ifndef __INCmvDebugh
+#define __INCmvDebugh
+
+/* includes */
+#include "mvTypes.h"
+
+typedef enum
+{
+ MV_MODULE_INVALID = -1,
+ MV_MODULE_ETH = 0,
+ MV_MODULE_IDMA,
+ MV_MODULE_XOR,
+ MV_MODULE_TWASI,
+ MV_MODULE_MGI,
+ MV_MODULE_USB,
+ MV_MODULE_CESA,
+
+ MV_MODULE_MAX
+}MV_MODULE_ID;
+
+/* Define generic flags useful for most of modules */
+#define MV_DEBUG_FLAG_ALL (0)
+#define MV_DEBUG_FLAG_INIT (1 << 0)
+#define MV_DEBUG_FLAG_RX (1 << 1)
+#define MV_DEBUG_FLAG_TX (1 << 2)
+#define MV_DEBUG_FLAG_ERR (1 << 3)
+#define MV_DEBUG_FLAG_TRACE (1 << 4)
+#define MV_DEBUG_FLAG_DUMP (1 << 5)
+#define MV_DEBUG_FLAG_CACHE (1 << 6)
+#define MV_DEBUG_FLAG_IOCTL (1 << 7)
+#define MV_DEBUG_FLAG_STATS (1 << 8)
+
+extern MV_U32 mvDebug;
+extern MV_U32 mvDebugModules[MV_MODULE_MAX];
+
+#ifdef MV_DEBUG
+# define MV_DEBUG_PRINT(module, flags, msg) mvOsPrintf msg
+# define MV_DEBUG_CODE(module, flags, code) code
+#elif defined(MV_RT_DEBUG)
+# define MV_DEBUG_PRINT(module, flags, msg) \
+ if( (mvDebug & (1<<(module))) && \
+ ((mvDebugModules[(module)] & (flags)) == (flags)) ) \
+ mvOsPrintf msg
+# define MV_DEBUG_CODE(module, flags, code) \
+ if( (mvDebug & (1<<(module))) && \
+ ((mvDebugModules[(module)] & (flags)) == (flags)) ) \
+ code
+#else
+# define MV_DEBUG_PRINT(module, flags, msg)
+# define MV_DEBUG_CODE(module, flags, code)
+#endif
+
+
+
+/* typedefs */
+
+/* time measurement structure used to check how much time pass between
+ * two points
+ */
+typedef struct {
+ char name[20]; /* name of the entry */
+ unsigned long begin; /* time measured on begin point */
+ unsigned long end; /* time measured on end point */
+ unsigned long total; /* Accumulated time */
+ unsigned long left; /* The rest measurement actions */
+ unsigned long count; /* Maximum measurement actions */
+ unsigned long min; /* Minimum time from begin to end */
+ unsigned long max; /* Maximum time from begin to end */
+} MV_DEBUG_TIMES;
+
+
+/* mvDebug.h API list */
+
+/****** Error Recording ******/
+
+/* Dump memory in specific format:
+ * address: X1X1X1X1 X2X2X2X2 ... X8X8X8X8
+ */
+void mvDebugMemDump(void* addr, int size, int access);
+
+void mvDebugPrintBufInfo(BUF_INFO* pBufInfo, int size, int access);
+
+void mvDebugPrintPktInfo(MV_PKT_INFO* pPktInfo, int size, int access);
+
+void mvDebugPrintIpAddr(MV_U32 ipAddr);
+
+void mvDebugPrintMacAddr(const MV_U8* pMacAddr);
+
+/**** There are three functions deals with MV_DEBUG_TIMES structure ****/
+
+/* Reset MV_DEBUG_TIMES entry */
+void mvDebugResetTimeEntry(MV_DEBUG_TIMES* pTimeEntry, int count, char* name);
+
+/* Update MV_DEBUG_TIMES entry */
+void mvDebugUpdateTimeEntry(MV_DEBUG_TIMES* pTimeEntry);
+
+/* Print out MV_DEBUG_TIMES entry */
+void mvDebugPrintTimeEntry(MV_DEBUG_TIMES* pTimeEntry, MV_BOOL isTitle);
+
+
+/******** General ***********/
+
+/* Change value of mvDebugPrint global variable */
+
+void mvDebugInit(void);
+void mvDebugModuleEnable(MV_MODULE_ID module, MV_BOOL isEnable);
+void mvDebugModuleSetFlags(MV_MODULE_ID module, MV_U32 flags);
+void mvDebugModuleClearFlags(MV_MODULE_ID module, MV_U32 flags);
+
+
+#endif /* __INCmvDebug.h */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h
new file mode 100644
index 000000000..478209407
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h
@@ -0,0 +1,225 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDeviceIdh
+#define __INCmvDeviceIdh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines */
+#define MARVELL_VEN_ID 0x11ab
+
+/* Disco-3 */
+#define MV64460_DEV_ID 0x6480
+#define MV64460B_DEV_ID 0x6485
+#define MV64430_DEV_ID 0x6420
+
+/* Disco-5 */
+#define MV64560_DEV_ID 0x6450
+
+/* Disco-6 */
+#define MV64660_DEV_ID 0x6460
+
+/* Orion */
+#define MV_1181_DEV_ID 0x1181
+#define MV_5181_DEV_ID 0x5181
+#define MV_5281_DEV_ID 0x5281
+#define MV_5182_DEV_ID 0x5182
+#define MV_8660_DEV_ID 0x8660
+#define MV_5180_DEV_ID 0x5180
+#define MV_5082_DEV_ID 0x5082
+#define MV_1281_DEV_ID 0x1281
+#define MV_6082_DEV_ID 0x6082
+#define MV_6183_DEV_ID 0x6183
+#define MV_6183L_DEV_ID 0x6083
+
+#define MV_5281_D0_REV 0x4
+#define MV_5281_D0_ID ((MV_5281_DEV_ID << 16) | MV_5281_D0_REV)
+#define MV_5281_D0_NAME "88F5281 D0"
+
+#define MV_5281_D1_REV 0x5
+#define MV_5281_D1_ID ((MV_5281_DEV_ID << 16) | MV_5281_D1_REV)
+#define MV_5281_D1_NAME "88F5281 D1"
+
+#define MV_5281_D2_REV 0x6
+#define MV_5281_D2_ID ((MV_5281_DEV_ID << 16) | MV_5281_D2_REV)
+#define MV_5281_D2_NAME "88F5281 D2"
+
+
+#define MV_5181L_A0_REV 0x8 /* need for PCIE Er */
+#define MV_5181_A1_REV 0x1 /* for USB Er ..*/
+#define MV_5181_B0_REV 0x2
+#define MV_5181_B1_REV 0x3
+#define MV_5182_A1_REV 0x1
+#define MV_5180N_B1_REV 0x3
+#define MV_5181L_A0_ID ((MV_5181_DEV_ID << 16) | MV_5181L_A0_REV)
+
+
+
+/* kw */
+#define MV_6281_DEV_ID 0x6281
+#define MV_6192_DEV_ID 0x6192
+#define MV_6190_DEV_ID 0x6190
+#define MV_6180_DEV_ID 0x6180
+
+#define MV_6281_A0_REV 0x2
+#define MV_6281_A0_ID ((MV_6281_DEV_ID << 16) | MV_6281_A0_REV)
+#define MV_6281_A0_NAME "88F6281 A0"
+
+#define MV_6192_A0_REV 0x2
+#define MV_6192_A0_ID ((MV_6192_DEV_ID << 16) | MV_6192_A0_REV)
+#define MV_6192_A0_NAME "88F6192 A0"
+
+#define MV_6190_A0_REV 0x2
+#define MV_6190_A0_ID ((MV_6190_DEV_ID << 16) | MV_6190_A0_REV)
+#define MV_6190_A0_NAME "88F6190 A0"
+
+#define MV_6180_A0_REV 0x2
+#define MV_6180_A0_ID ((MV_6180_DEV_ID << 16) | MV_6180_A0_REV)
+#define MV_6180_A0_NAME "88F6180 A0"
+
+#define MV_6281_A1_REV 0x3
+#define MV_6281_A1_ID ((MV_6281_DEV_ID << 16) | MV_6281_A1_REV)
+#define MV_6281_A1_NAME "88F6281 A1"
+
+#define MV_6192_A1_REV 0x3
+#define MV_6192_A1_ID ((MV_6192_DEV_ID << 16) | MV_6192_A1_REV)
+#define MV_6192_A1_NAME "88F6192 A1"
+
+#define MV_6190_A1_REV 0x3
+#define MV_6190_A1_ID ((MV_6190_DEV_ID << 16) | MV_6190_A1_REV)
+#define MV_6190_A1_NAME "88F6190 A1"
+
+#define MV_6180_A1_REV 0x3
+#define MV_6180_A1_ID ((MV_6180_DEV_ID << 16) | MV_6180_A1_REV)
+#define MV_6180_A1_NAME "88F6180 A1"
+
+#define MV_88F6XXX_A0_REV 0x2
+#define MV_88F6XXX_A1_REV 0x3
+/* Disco-Duo */
+#define MV_78XX0_ZY_DEV_ID 0x6381
+#define MV_78XX0_ZY_NAME "MV78X00"
+
+#define MV_78XX0_Z0_REV 0x1
+#define MV_78XX0_Z0_ID ((MV_78XX0_ZY_DEV_ID << 16) | MV_78XX0_Z0_REV)
+#define MV_78XX0_Z0_NAME "78X00 Z0"
+
+#define MV_78XX0_Y0_REV 0x2
+#define MV_78XX0_Y0_ID ((MV_78XX0_ZY_DEV_ID << 16) | MV_78XX0_Y0_REV)
+#define MV_78XX0_Y0_NAME "78X00 Y0"
+
+#define MV_78XX0_DEV_ID 0x7800
+#define MV_78XX0_NAME "MV78X00"
+
+#define MV_76100_DEV_ID 0x7610
+#define MV_78200_DEV_ID 0x7820
+#define MV_78100_DEV_ID 0x7810
+#define MV_78XX0_A0_REV 0x1
+#define MV_78XX0_A1_REV 0x2
+
+#define MV_76100_NAME "MV76100"
+#define MV_78100_NAME "MV78100"
+#define MV_78200_NAME "MV78200"
+
+#define MV_76100_A0_ID ((MV_76100_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78100_A0_ID ((MV_78100_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78200_A0_ID ((MV_78200_DEV_ID << 16) | MV_78XX0_A0_REV)
+
+#define MV_76100_A1_ID ((MV_76100_DEV_ID << 16) | MV_78XX0_A1_REV)
+#define MV_78100_A1_ID ((MV_78100_DEV_ID << 16) | MV_78XX0_A1_REV)
+#define MV_78200_A1_ID ((MV_78200_DEV_ID << 16) | MV_78XX0_A1_REV)
+
+#define MV_76100_A0_NAME "MV76100 A0"
+#define MV_78100_A0_NAME "MV78100 A0"
+#define MV_78200_A0_NAME "MV78200 A0"
+#define MV_78XX0_A0_NAME "MV78XX0 A0"
+
+#define MV_76100_A1_NAME "MV76100 A1"
+#define MV_78100_A1_NAME "MV78100 A1"
+#define MV_78200_A1_NAME "MV78200 A1"
+#define MV_78XX0_A1_NAME "MV78XX0 A1"
+
+/*MV88F632X family*/
+#define MV_6321_DEV_ID 0x6321
+#define MV_6322_DEV_ID 0x6322
+#define MV_6323_DEV_ID 0x6323
+
+#define MV_6321_NAME "88F6321"
+#define MV_6322_NAME "88F6322"
+#define MV_6323_NAME "88F6323"
+
+#define MV_632X_A1_REV 0x2
+
+#define MV_6321_A1_ID ((MV_6321_DEV_ID << 16) | MV_632X_A1_REV)
+#define MV_6322_A1_ID ((MV_6322_DEV_ID << 16) | MV_632X_A1_REV)
+#define MV_6323_A1_ID ((MV_6323_DEV_ID << 16) | MV_632X_A1_REV)
+
+#define MV_6321_A1_NAME "88F6321 A1"
+#define MV_6322_A1_NAME "88F6322 A1"
+#define MV_6323_A1_NAME "88F6323 A1"
+
+
+#endif /* __INCmvDeviceIdh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvHalVer.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvHalVer.h
new file mode 100644
index 000000000..3bfcfe19e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvHalVer.h
@@ -0,0 +1,73 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvHalVerh
+#define __INCmvHalVerh
+
+/* Defines */
+#define MV_HAL_VERSION "FEROCEON_HAL_3_1_7"
+#define MV_RELEASE_BASELINE "SoCandControllers_FEROCEON_RELEASE_7_9_2009_KW_4_3_4_DD_2_1_4_6183_1_1_4"
+
+#endif /* __INCmvHalVerh */ \ No newline at end of file
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvStack.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvStack.c
new file mode 100644
index 000000000..41ca7ceba
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvStack.c
@@ -0,0 +1,100 @@
+/*******************************************************************************
+* Copyright 2003, Marvell Semiconductor Israel LTD. *
+* THIS CODE CONTAINS CONFIDENTIAL INFORMATION OF MARVELL. *
+* NO RIGHTS ARE GRANTED HEREIN UNDER ANY PATENT, MASK WORK RIGHT OR COPYRIGHT *
+* OF MARVELL OR ANY THIRD PARTY. MARVELL RESERVES THE RIGHT AT ITS SOLE *
+* DISCRETION TO REQUEST THAT THIS CODE BE IMMEDIATELY RETURNED TO MARVELL. *
+* THIS CODE IS PROVIDED "AS IS". MARVELL MAKES NO WARRANTIES, EXPRESSED, *
+* IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, COMPLETENESS OR PERFORMANCE. *
+* *
+* MARVELL COMPRISES MARVELL TECHNOLOGY GROUP LTD. (MTGL) AND ITS SUBSIDIARIES, *
+* MARVELL INTERNATIONAL LTD. (MIL), MARVELL TECHNOLOGY, INC. (MTI), MARVELL *
+* SEMICONDUCTOR, INC. (MSI), MARVELL ASIA PTE LTD. (MAPL), MARVELL JAPAN K.K. *
+* (MJKK), MARVELL SEMICONDUCTOR ISRAEL LTD (MSIL). *
+********************************************************************************
+* mvQueue.c
+*
+* FILENAME: $Workfile: mvStack.c $
+* REVISION: $Revision: 1.1 $
+* LAST UPDATE: $Modtime: $
+*
+* DESCRIPTION:
+* This file implements simple Stack LIFO functionality.
+*******************************************************************************/
+
+/* includes */
+#include "mvOs.h"
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvStack.h"
+
+/* defines */
+
+
+/* Public functions */
+
+
+/* Purpose: Create new stack
+ * Inputs:
+ * - MV_U32 noOfElements - maximum number of elements in the stack.
+ * Each element 4 bytes size
+ * Return: void* - pointer to created stack.
+ */
+void* mvStackCreate(int numOfElements)
+{
+ MV_STACK* pStack;
+ MV_U32* pStackElements;
+
+ pStack = (MV_STACK*)mvOsMalloc(sizeof(MV_STACK));
+ pStackElements = (MV_U32*)mvOsMalloc(numOfElements*sizeof(MV_U32));
+ if( (pStack == NULL) || (pStackElements == NULL) )
+ {
+ mvOsPrintf("mvStack: Can't create new stack\n");
+ return NULL;
+ }
+ memset(pStackElements, 0, numOfElements*sizeof(MV_U32));
+ pStack->numOfElements = numOfElements;
+ pStack->stackIdx = 0;
+ pStack->stackElements = pStackElements;
+
+ return pStack;
+}
+
+/* Purpose: Delete existing stack
+ * Inputs:
+ * - void* stackHndl - Stack handle as returned by "mvStackCreate()" function
+ *
+ * Return: MV_STATUS MV_NOT_FOUND - Failure. StackHandle is not valid.
+ * MV_OK - Success.
+ */
+MV_STATUS mvStackDelete(void* stackHndl)
+{
+ MV_STACK* pStack = (MV_STACK*)stackHndl;
+
+ if( (pStack == NULL) || (pStack->stackElements == NULL) )
+ return MV_NOT_FOUND;
+
+ mvOsFree(pStack->stackElements);
+ mvOsFree(pStack);
+
+ return MV_OK;
+}
+
+
+/* PrintOut status of the stack */
+void mvStackStatus(void* stackHndl, MV_BOOL isPrintElements)
+{
+ int i;
+ MV_STACK* pStack = (MV_STACK*)stackHndl;
+
+ mvOsPrintf("StackHandle=%p, pElements=%p, numElements=%d, stackIdx=%d\n",
+ stackHndl, pStack->stackElements, pStack->numOfElements,
+ pStack->stackIdx);
+ if(isPrintElements == MV_TRUE)
+ {
+ for(i=0; i<pStack->stackIdx; i++)
+ {
+ mvOsPrintf("%3d. Value=0x%x\n", i, pStack->stackElements[i]);
+ }
+ }
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvStack.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvStack.h
new file mode 100644
index 000000000..e247e61c2
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvStack.h
@@ -0,0 +1,140 @@
+/*******************************************************************************
+* Copyright 2003, Marvell Semiconductor Israel LTD. *
+* THIS CODE CONTAINS CONFIDENTIAL INFORMATION OF MARVELL. *
+* NO RIGHTS ARE GRANTED HEREIN UNDER ANY PATENT, MASK WORK RIGHT OR COPYRIGHT *
+* OF MARVELL OR ANY THIRD PARTY. MARVELL RESERVES THE RIGHT AT ITS SOLE *
+* DISCRETION TO REQUEST THAT THIS CODE BE IMMEDIATELY RETURNED TO MARVELL. *
+* THIS CODE IS PROVIDED "AS IS". MARVELL MAKES NO WARRANTIES, EXPRESSED, *
+* IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, COMPLETENESS OR PERFORMANCE. *
+* *
+* MARVELL COMPRISES MARVELL TECHNOLOGY GROUP LTD. (MTGL) AND ITS SUBSIDIARIES, *
+* MARVELL INTERNATIONAL LTD. (MIL), MARVELL TECHNOLOGY, INC. (MTI), MARVELL *
+* SEMICONDUCTOR, INC. (MSI), MARVELL ASIA PTE LTD. (MAPL), MARVELL JAPAN K.K. *
+* (MJKK), MARVELL SEMICONDUCTOR ISRAEL LTD (MSIL). *
+********************************************************************************
+* mvStack.h - Header File for :
+*
+* FILENAME: $Workfile: mvStack.h $
+* REVISION: $Revision: 1.1 $
+* LAST UPDATE: $Modtime: $
+*
+* DESCRIPTION:
+* This file defines simple Stack (LIFO) functionality.
+*
+*******************************************************************************/
+
+#ifndef __mvStack_h__
+#define __mvStack_h__
+
+
+/* includes */
+#include "mvTypes.h"
+
+
+/* defines */
+
+
+/* typedefs */
+/* Data structure describes general purpose Stack */
+typedef struct
+{
+ int stackIdx;
+ int numOfElements;
+ MV_U32* stackElements;
+} MV_STACK;
+
+static INLINE MV_BOOL mvStackIsFull(void* stackHndl)
+{
+ MV_STACK* pStack = (MV_STACK*)stackHndl;
+
+ if(pStack->stackIdx == pStack->numOfElements)
+ return MV_TRUE;
+
+ return MV_FALSE;
+}
+
+static INLINE MV_BOOL mvStackIsEmpty(void* stackHndl)
+{
+ MV_STACK* pStack = (MV_STACK*)stackHndl;
+
+ if(pStack->stackIdx == 0)
+ return MV_TRUE;
+
+ return MV_FALSE;
+}
+/* Purpose: Push new element to stack
+ * Inputs:
+ * - void* stackHndl - Stack handle as returned by "mvStackCreate()" function.
+ * - MV_U32 value - New element.
+ *
+ * Return: MV_STATUS MV_FULL - Failure. Stack is full.
+ * MV_OK - Success. Element is put to stack.
+ */
+static INLINE void mvStackPush(void* stackHndl, MV_U32 value)
+{
+ MV_STACK* pStack = (MV_STACK*)stackHndl;
+
+#ifdef MV_RT_DEBUG
+ if(pStack->stackIdx == pStack->numOfElements)
+ {
+ mvOsPrintf("mvStackPush: Stack is FULL\n");
+ return;
+ }
+#endif /* MV_RT_DEBUG */
+
+ pStack->stackElements[pStack->stackIdx] = value;
+ pStack->stackIdx++;
+}
+
+/* Purpose: Pop element from the top of stack and copy it to "pValue"
+ * Inputs:
+ * - void* stackHndl - Stack handle as returned by "mvStackCreate()" function.
+ * - MV_U32 value - Element in the top of stack.
+ *
+ * Return: MV_STATUS MV_EMPTY - Failure. Stack is empty.
+ * MV_OK - Success. Element is removed from the stack and
+ * copied to pValue argument
+ */
+static INLINE MV_U32 mvStackPop(void* stackHndl)
+{
+ MV_STACK* pStack = (MV_STACK*)stackHndl;
+
+#ifdef MV_RT_DEBUG
+ if(pStack->stackIdx == 0)
+ {
+ mvOsPrintf("mvStackPop: Stack is EMPTY\n");
+ return 0;
+ }
+#endif /* MV_RT_DEBUG */
+
+ pStack->stackIdx--;
+ return pStack->stackElements[pStack->stackIdx];
+}
+
+static INLINE int mvStackIndex(void* stackHndl)
+{
+ MV_STACK* pStack = (MV_STACK*)stackHndl;
+
+ return pStack->stackIdx;
+}
+
+static INLINE int mvStackFreeElements(void* stackHndl)
+{
+ MV_STACK* pStack = (MV_STACK*)stackHndl;
+
+ return (pStack->numOfElements - pStack->stackIdx);
+}
+
+/* mvStack.h API list */
+
+/* Create new Stack */
+void* mvStackCreate(int numOfElements);
+
+/* Delete existing stack */
+MV_STATUS mvStackDelete(void* stackHndl);
+
+/* Print status of the stack */
+void mvStackStatus(void* stackHndl, MV_BOOL isPrintElements);
+
+#endif /* __mvStack_h__ */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvTypes.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvTypes.h
new file mode 100644
index 000000000..de212a141
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/common/mvTypes.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvTypesh
+#define __INCmvTypesh
+
+/* Defines */
+
+/* The following is a list of Marvell status */
+#define MV_ERROR (-1)
+#define MV_OK (0x00) /* Operation succeeded */
+#define MV_FAIL (0x01) /* Operation failed */
+#define MV_BAD_VALUE (0x02) /* Illegal value (general) */
+#define MV_OUT_OF_RANGE (0x03) /* The value is out of range */
+#define MV_BAD_PARAM (0x04) /* Illegal parameter in function called */
+#define MV_BAD_PTR (0x05) /* Illegal pointer value */
+#define MV_BAD_SIZE (0x06) /* Illegal size */
+#define MV_BAD_STATE (0x07) /* Illegal state of state machine */
+#define MV_SET_ERROR (0x08) /* Set operation failed */
+#define MV_GET_ERROR (0x09) /* Get operation failed */
+#define MV_CREATE_ERROR (0x0A) /* Fail while creating an item */
+#define MV_NOT_FOUND (0x0B) /* Item not found */
+#define MV_NO_MORE (0x0C) /* No more items found */
+#define MV_NO_SUCH (0x0D) /* No such item */
+#define MV_TIMEOUT (0x0E) /* Time Out */
+#define MV_NO_CHANGE (0x0F) /* Parameter(s) is already in this value */
+#define MV_NOT_SUPPORTED (0x10) /* This request is not support */
+#define MV_NOT_IMPLEMENTED (0x11) /* Request supported but not implemented */
+#define MV_NOT_INITIALIZED (0x12) /* The item is not initialized */
+#define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */
+#define MV_FULL (0x14) /* Item is full (Queue or table etc...) */
+#define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */
+#define MV_INIT_ERROR (0x16) /* Error occured while INIT process */
+#define MV_HW_ERROR (0x17) /* Hardware error */
+#define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */
+#define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */
+#define MV_NOT_READY (0x1A) /* The other side is not ready yet */
+#define MV_ALREADY_EXIST (0x1B) /* Tried to create existing item */
+#define MV_OUT_OF_CPU_MEM (0x1C) /* Cpu memory allocation failed. */
+#define MV_NOT_STARTED (0x1D) /* Not started yet */
+#define MV_BUSY (0x1E) /* Item is busy. */
+#define MV_TERMINATE (0x1F) /* Item terminates it's work. */
+#define MV_NOT_ALIGNED (0x20) /* Wrong alignment */
+#define MV_NOT_ALLOWED (0x21) /* Operation NOT allowed */
+#define MV_WRITE_PROTECT (0x22) /* Write protected */
+
+
+#define MV_INVALID (int)(-1)
+
+#define MV_FALSE 0
+#define MV_TRUE (!(MV_FALSE))
+
+
+#ifndef NULL
+#define NULL ((void*)0)
+#endif
+
+
+#ifndef MV_ASMLANGUAGE
+/* typedefs */
+
+typedef char MV_8;
+typedef unsigned char MV_U8;
+
+typedef int MV_32;
+typedef unsigned int MV_U32;
+
+typedef short MV_16;
+typedef unsigned short MV_U16;
+
+#ifdef MV_PPC64
+typedef long MV_64;
+typedef unsigned long MV_U64;
+#else
+typedef long long MV_64;
+typedef unsigned long long MV_U64;
+#endif
+
+typedef long MV_LONG; /* 32/64 */
+typedef unsigned long MV_ULONG; /* 32/64 */
+
+typedef int MV_STATUS;
+typedef int MV_BOOL;
+typedef void MV_VOID;
+typedef float MV_FLOAT;
+
+typedef int (*MV_FUNCPTR) (void); /* ptr to function returning int */
+typedef void (*MV_VOIDFUNCPTR) (void); /* ptr to function returning void */
+typedef double (*MV_DBLFUNCPTR) (void); /* ptr to function returning double*/
+typedef float (*MV_FLTFUNCPTR) (void); /* ptr to function returning float */
+
+typedef MV_U32 MV_KHZ;
+typedef MV_U32 MV_MHZ;
+typedef MV_U32 MV_HZ;
+
+
+/* This enumerator describes the set of commands that can be applied on */
+/* an engine (e.g. IDMA, XOR). Appling a comman depends on the current */
+/* status (see MV_STATE enumerator) */
+/* Start can be applied only when status is IDLE */
+/* Stop can be applied only when status is IDLE, ACTIVE or PAUSED */
+/* Pause can be applied only when status is ACTIVE */
+/* Restart can be applied only when status is PAUSED */
+typedef enum _mvCommand
+{
+ MV_START, /* Start */
+ MV_STOP, /* Stop */
+ MV_PAUSE, /* Pause */
+ MV_RESTART /* Restart */
+} MV_COMMAND;
+
+/* This enumerator describes the set of state conditions. */
+/* Moving from one state to other is stricted. */
+typedef enum _mvState
+{
+ MV_IDLE,
+ MV_ACTIVE,
+ MV_PAUSED,
+ MV_UNDEFINED_STATE
+} MV_STATE;
+
+
+/* This structure describes address space window. Window base can be */
+/* 64 bit, window size up to 4GB */
+typedef struct _mvAddrWin
+{
+ MV_U32 baseLow; /* 32bit base low */
+ MV_U32 baseHigh; /* 32bit base high */
+ MV_U32 size; /* 32bit size */
+}MV_ADDR_WIN;
+
+/* This binary enumerator describes protection attribute status */
+typedef enum _mvProtRight
+{
+ ALLOWED, /* Protection attribute allowed */
+ FORBIDDEN /* Protection attribute forbidden */
+}MV_PROT_RIGHT;
+
+/* Unified struct for Rx and Tx packet operations. The user is required to */
+/* be familier only with Tx/Rx descriptor command status. */
+typedef struct _bufInfo
+{
+ MV_U32 cmdSts; /* Tx/Rx command status */
+ MV_U16 byteCnt; /* Size of valid data in the buffer */
+ MV_U16 bufSize; /* Total size of the buffer */
+ MV_U8 *pBuff; /* Pointer to Buffer */
+ MV_U8 *pData; /* Pointer to data in the Buffer */
+ MV_U32 userInfo1; /* Tx/Rx attached user information 1 */
+ MV_U32 userInfo2; /* Tx/Rx attached user information 2 */
+ struct _bufInfo *pNextBufInfo; /* Next buffer in packet */
+} BUF_INFO;
+
+/* This structure contains information describing one of buffers
+ * (fragments) they are built Ethernet packet.
+ */
+typedef struct
+{
+ MV_U8* bufVirtPtr;
+ MV_ULONG bufPhysAddr;
+ MV_U32 bufSize;
+ MV_U32 dataSize;
+ MV_U32 memHandle;
+ MV_32 bufAddrShift;
+} MV_BUF_INFO;
+
+/* This structure contains information describing Ethernet packet.
+ * The packet can be divided for few buffers (fragments)
+ */
+typedef struct
+{
+ MV_ULONG osInfo;
+ MV_BUF_INFO *pFrags;
+ MV_U32 status;
+ MV_U16 pktSize;
+ MV_U16 numFrags;
+ MV_U32 ownerId;
+ MV_U32 fragIP;
+} MV_PKT_INFO;
+
+#endif /* MV_ASMLANGUAGE */
+
+#endif /* __INCmvTypesh */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/dbg-trace.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/dbg-trace.c
new file mode 100644
index 000000000..644fd02da
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/dbg-trace.c
@@ -0,0 +1,110 @@
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include "dbg-trace.h"
+
+#define TRACE_ARR_LEN 800
+#define STR_LEN 128
+struct trace {
+ struct timeval tv;
+ char str[STR_LEN];
+ unsigned int callback_val1;
+ unsigned int callback_val2;
+ char valid;
+};
+static unsigned int (*trc_callback1) (unsigned char) = NULL;
+static unsigned int (*trc_callback2) (unsigned char) = NULL;
+static unsigned char trc_param1 = 0;
+static unsigned char trc_param2 = 0;
+struct trace *trc_arr;
+static int trc_index;
+static int trc_active = 0;
+
+void TRC_START()
+{
+ trc_active = 1;
+}
+
+void TRC_STOP()
+{
+ trc_active = 0;
+}
+
+void TRC_INIT(void *callback1, void *callback2, unsigned char callback1_param, unsigned char callback2_param)
+{
+ printk("Marvell debug tracing is on\n");
+ trc_arr = (struct trace *)kmalloc(TRACE_ARR_LEN*sizeof(struct trace),GFP_KERNEL);
+ if(trc_arr == NULL)
+ {
+ printk("Can't allocate Debug Trace buffer\n");
+ return;
+ }
+ memset(trc_arr,0,TRACE_ARR_LEN*sizeof(struct trace));
+ trc_index = 0;
+ trc_callback1 = callback1;
+ trc_callback2 = callback2;
+ trc_param1 = callback1_param;
+ trc_param2 = callback2_param;
+}
+void TRC_REC(char *fmt,...)
+{
+ va_list args;
+ struct trace *trc = &trc_arr[trc_index];
+
+ if(trc_active == 0)
+ return;
+
+ do_gettimeofday(&trc->tv);
+ if(trc_callback1)
+ trc->callback_val1 = trc_callback1(trc_param1);
+ if(trc_callback2)
+ trc->callback_val2 = trc_callback2(trc_param2);
+ va_start(args, fmt);
+ vsprintf(trc->str,fmt,args);
+ va_end(args);
+ trc->valid = 1;
+ if((++trc_index) == TRACE_ARR_LEN) {
+ trc_index = 0;
+ }
+}
+void TRC_OUTPUT(void)
+{
+ int i,j;
+ struct trace *p;
+ printk("\n\nTrace %d items\n",TRACE_ARR_LEN);
+ for(i=0,j=trc_index; i<TRACE_ARR_LEN; i++,j++) {
+ if(j == TRACE_ARR_LEN)
+ j = 0;
+ p = &trc_arr[j];
+ if(p->valid) {
+ unsigned long uoffs;
+ struct trace *plast;
+ if(p == &trc_arr[0])
+ plast = &trc_arr[TRACE_ARR_LEN-1];
+ else
+ plast = p-1;
+ if(p->tv.tv_sec == ((plast)->tv.tv_sec))
+ uoffs = (p->tv.tv_usec - ((plast)->tv.tv_usec));
+ else
+ uoffs = (1000000 - ((plast)->tv.tv_usec)) +
+ ((p->tv.tv_sec - ((plast)->tv.tv_sec) - 1) * 1000000) +
+ p->tv.tv_usec;
+ printk("%03d: [+%ld usec]", j, (unsigned long)uoffs);
+ if(trc_callback1)
+ printk("[%u]",p->callback_val1);
+ if(trc_callback2)
+ printk("[%u]",p->callback_val2);
+ printk(": %s",p->str);
+ }
+ p->valid = 0;
+ }
+ memset(trc_arr,0,TRACE_ARR_LEN*sizeof(struct trace));
+ trc_index = 0;
+}
+void TRC_RELEASE(void)
+{
+ kfree(trc_arr);
+ trc_index = 0;
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/dbg-trace.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/dbg-trace.h
new file mode 100644
index 000000000..a5aac2673
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/dbg-trace.h
@@ -0,0 +1,24 @@
+
+#ifndef _MV_DBG_TRCE_H_
+#define _MV_DBG_TRCE_H_
+
+#ifdef CONFIG_MV_DBG_TRACE
+void TRC_INIT(void *callback1, void *callback2,
+ unsigned char callback1_param, unsigned char callback2_param);
+void TRC_REC(char *fmt,...);
+void TRC_OUTPUT(void);
+void TRC_RELEASE(void);
+void TRC_START(void);
+void TRC_STOP(void);
+
+#else
+#define TRC_INIT(x1,x2,x3,x4)
+#define TRC_REC(X...)
+#define TRC_OUTPUT()
+#define TRC_RELEASE()
+#define TRC_START()
+#define TRC_STOP()
+#endif
+
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.c
new file mode 100644
index 000000000..5f6278447
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.c
@@ -0,0 +1,2513 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "boardEnv/mvBoardEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "cpu/mvCpu.h"
+#include "cntmr/mvCntmr.h"
+#include "gpp/mvGpp.h"
+#include "twsi/mvTwsi.h"
+#include "pex/mvPex.h"
+#include "device/mvDevice.h"
+#include "eth/gbe/mvEthRegs.h"
+
+/* defines */
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+extern MV_CPU_ARM_CLK _cpuARMDDRCLK[];
+
+#define CODE_IN_ROM MV_FALSE
+#define CODE_IN_RAM MV_TRUE
+
+extern MV_BOARD_INFO* boardInfoTbl[];
+#define BOARD_INFO(boardId) boardInfoTbl[boardId - BOARD_ID_BASE]
+
+/* Locals */
+static MV_DEV_CS_INFO* boardGetDevEntry(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+
+MV_U32 tClkRate = -1;
+
+
+/*******************************************************************************
+* mvBoardEnvInit - Init board
+*
+* DESCRIPTION:
+* In this function the board environment take care of device bank
+* initialization.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvBoardEnvInit(MV_VOID)
+{
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardEnvInit:Board unknown.\n");
+ return;
+
+ }
+
+ /* Set GPP Out value */
+ MV_REG_WRITE(GPP_DATA_OUT_REG(0), BOARD_INFO(boardId)->gppOutValLow);
+ MV_REG_WRITE(GPP_DATA_OUT_REG(1), BOARD_INFO(boardId)->gppOutValHigh);
+
+ /* set GPP polarity */
+ mvGppPolaritySet(0, 0xFFFFFFFF, BOARD_INFO(boardId)->gppPolarityValLow);
+ mvGppPolaritySet(1, 0xFFFFFFFF, BOARD_INFO(boardId)->gppPolarityValHigh);
+
+ /* Workaround for Erratum FE-MISC-70*/
+ if(mvCtrlRevGet()==MV_88F6XXX_A0_REV)
+ {
+ BOARD_INFO(boardId)->gppOutEnValLow &= 0xfffffffd;
+ BOARD_INFO(boardId)->gppOutEnValLow |= (BOARD_INFO(boardId)->gppOutEnValHigh) & 0x00000002;
+ } /*End of WA*/
+
+ /* Set GPP Out Enable*/
+ mvGppTypeSet(0, 0xFFFFFFFF, BOARD_INFO(boardId)->gppOutEnValLow);
+ mvGppTypeSet(1, 0xFFFFFFFF, BOARD_INFO(boardId)->gppOutEnValHigh);
+
+ /* Nand CE */
+ MV_REG_BIT_SET(NAND_CTRL_REG, NAND_ACTCEBOOT_BIT);
+}
+
+/*******************************************************************************
+* mvBoardModelGet - Get Board model
+*
+* DESCRIPTION:
+* This function returns 16bit describing board model.
+* Board model is constructed of one byte major and minor numbers in the
+* following manner:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* String describing board model.
+*
+*******************************************************************************/
+MV_U16 mvBoardModelGet(MV_VOID)
+{
+ return (mvBoardIdGet() >> 16);
+}
+
+/*******************************************************************************
+* mbBoardRevlGet - Get Board revision
+*
+* DESCRIPTION:
+* This function returns a 32bit describing the board revision.
+* Board revision is constructed of 4bytes. 2bytes describes major number
+* and the other 2bytes describes minor munber.
+* For example for board revision 3.4 the function will return
+* 0x00030004.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* String describing board model.
+*
+*******************************************************************************/
+MV_U16 mvBoardRevGet(MV_VOID)
+{
+ return (mvBoardIdGet() & 0xFFFF);
+}
+
+/*******************************************************************************
+* mvBoardNameGet - Get Board name
+*
+* DESCRIPTION:
+* This function returns a string describing the board model and revision.
+* String is extracted from board I2C EEPROM.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* pNameBuff - Buffer to contain board name string. Minimum size 32 chars.
+*
+* RETURN:
+*
+* MV_ERROR if informantion can not be read.
+*******************************************************************************/
+MV_STATUS mvBoardNameGet(char *pNameBuff)
+{
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsSPrintf (pNameBuff, "Board unknown.\n");
+ return MV_ERROR;
+
+ }
+
+ mvOsSPrintf (pNameBuff, "%s",BOARD_INFO(boardId)->boardName);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvBoardIsPortInSgmii -
+*
+* DESCRIPTION:
+* This routine returns MV_TRUE for port number works in SGMII or MV_FALSE
+* For all other options.
+*
+* INPUT:
+* ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE - port in SGMII.
+* MV_FALSE - other.
+*
+*******************************************************************************/
+MV_BOOL mvBoardIsPortInSgmii(MV_U32 ethPortNum)
+{
+ MV_BOOL ethPortSgmiiSupport[BOARD_ETH_PORT_NUM] = MV_ETH_PORT_SGMII;
+
+ if(ethPortNum >= BOARD_ETH_PORT_NUM)
+ {
+ mvOsPrintf ("Invalid portNo=%d\n", ethPortNum);
+ return MV_FALSE;
+ }
+ return ethPortSgmiiSupport[ethPortNum];
+}
+
+/*******************************************************************************
+* mvBoardIsPortInGmii -
+*
+* DESCRIPTION:
+* This routine returns MV_TRUE for port number works in GMII or MV_FALSE
+* For all other options.
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE - port in GMII.
+* MV_FALSE - other.
+*
+*******************************************************************************/
+MV_BOOL mvBoardIsPortInGmii(MV_VOID)
+{
+ MV_U32 devClassId, devClass = 0;
+ if (mvBoardMppGroupTypeGet(devClass) == MV_BOARD_AUTO)
+ {
+ /* Get MPP module ID */
+ devClassId = mvBoarModuleTypeGet(devClass);
+ if (MV_BOARD_MODULE_GMII_ID == devClassId)
+ return MV_TRUE;
+ }
+ else if (mvBoardMppGroupTypeGet(devClass) == MV_BOARD_GMII)
+ return MV_TRUE;
+
+ return MV_FALSE;
+}
+/*******************************************************************************
+* mvBoardPhyAddrGet - Get the phy address
+*
+* DESCRIPTION:
+* This routine returns the Phy address of a given ethernet port.
+*
+* INPUT:
+* ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit describing Phy address, -1 if the port number is wrong.
+*
+*******************************************************************************/
+MV_32 mvBoardPhyAddrGet(MV_U32 ethPortNum)
+{
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardPhyAddrGet: Board unknown.\n");
+ return MV_ERROR;
+ }
+
+ return BOARD_INFO(boardId)->pBoardMacInfo[ethPortNum].boardEthSmiAddr;
+}
+
+/*******************************************************************************
+* mvBoardMacSpeedGet - Get the Mac speed
+*
+* DESCRIPTION:
+* This routine returns the Mac speed if pre define of a given ethernet port.
+*
+* INPUT:
+* ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BOARD_MAC_SPEED, -1 if the port number is wrong.
+*
+*******************************************************************************/
+MV_BOARD_MAC_SPEED mvBoardMacSpeedGet(MV_U32 ethPortNum)
+{
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardMacSpeedGet: Board unknown.\n");
+ return MV_ERROR;
+ }
+
+ return BOARD_INFO(boardId)->pBoardMacInfo[ethPortNum].boardMacSpeed;
+}
+
+/*******************************************************************************
+* mvBoardLinkStatusIrqGet - Get the IRQ number for the link status indication
+*
+* DESCRIPTION:
+* This routine returns the IRQ number for the link status indication.
+*
+* INPUT:
+* ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* the number of the IRQ for the link status indication, -1 if the port
+* number is wrong or if not relevant.
+*
+*******************************************************************************/
+MV_32 mvBoardLinkStatusIrqGet(MV_U32 ethPortNum)
+{
+ MV_U32 boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardLinkStatusIrqGet: Board unknown.\n");
+ return MV_ERROR;
+ }
+
+ return BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].linkStatusIrq;
+}
+
+/*******************************************************************************
+* mvBoardSwitchPortGet - Get the mapping between the board connector and the
+* Ethernet Switch port
+*
+* DESCRIPTION:
+* This routine returns the matching Switch port.
+*
+* INPUT:
+* ethPortNum - Ethernet port number.
+* boardPortNum - logical number of the connector on the board
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* the matching Switch port, -1 if the port number is wrong or if not relevant.
+*
+*******************************************************************************/
+MV_32 mvBoardSwitchPortGet(MV_U32 ethPortNum, MV_U8 boardPortNum)
+{
+ MV_U32 boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardSwitchPortGet: Board unknown.\n");
+ return MV_ERROR;
+ }
+ if (boardPortNum >= BOARD_ETH_SWITCH_PORT_NUM)
+ {
+ mvOsPrintf("mvBoardSwitchPortGet: Illegal board port number.\n");
+ return MV_ERROR;
+ }
+
+ return BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].qdPort[boardPortNum];
+}
+
+/*******************************************************************************
+* mvBoardSwitchCpuPortGet - Get the the Ethernet Switch CPU port
+*
+* DESCRIPTION:
+* This routine returns the Switch CPU port.
+*
+* INPUT:
+* ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* the Switch CPU port, -1 if the port number is wrong or if not relevant.
+*
+*******************************************************************************/
+MV_32 mvBoardSwitchCpuPortGet(MV_U32 ethPortNum)
+{
+ MV_U32 boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardSwitchCpuPortGet: Board unknown.\n");
+ return MV_ERROR;
+ }
+
+ return BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].qdCpuPort;
+}
+
+/*******************************************************************************
+* mvBoardIsSwitchConnected - Get switch connection status
+* DESCRIPTION:
+* This routine returns port's connection status
+*
+* INPUT:
+* ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 1 - if ethPortNum is connected to switch, 0 otherwise
+*
+*******************************************************************************/
+MV_32 mvBoardIsSwitchConnected(MV_U32 ethPortNum)
+{
+ MV_U32 boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardIsSwitchConnected: Board unknown.\n");
+ return MV_ERROR;
+ }
+
+ if(ethPortNum >= BOARD_INFO(boardId)->numBoardMacInfo)
+ {
+ mvOsPrintf("mvBoardIsSwitchConnected: Illegal port number(%u)\n", ethPortNum);
+ return MV_ERROR;
+ }
+
+ if((MV_32)(BOARD_INFO(boardId)->pSwitchInfo))
+ return (MV_32)(BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].switchOnPort == ethPortNum);
+ else
+ return 0;
+}
+/*******************************************************************************
+* mvBoardSmiScanModeGet - Get Switch SMI scan mode
+*
+* DESCRIPTION:
+* This routine returns Switch SMI scan mode.
+*
+* INPUT:
+* ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 1 for SMI_MANUAL_MODE, -1 if the port number is wrong or if not relevant.
+*
+*******************************************************************************/
+MV_32 mvBoardSmiScanModeGet(MV_U32 ethPortNum)
+{
+ MV_U32 boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardSmiScanModeGet: Board unknown.\n");
+ return MV_ERROR;
+ }
+
+ return BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].smiScanMode;
+}
+/*******************************************************************************
+* mvBoardSpecInitGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN: Return MV_TRUE and parameters in case board need spesific phy init,
+* otherwise return MV_FALSE.
+*
+*
+*******************************************************************************/
+
+MV_BOOL mvBoardSpecInitGet(MV_U32* regOff, MV_U32* data)
+{
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvBoardTclkGet - Get the board Tclk (Controller clock)
+*
+* DESCRIPTION:
+* This routine extract the controller core clock.
+* This function uses the controller counters to make identification.
+* Note: In order to avoid interference, make sure task context switch
+* and interrupts will not occure during this function operation
+*
+* INPUT:
+* countNum - Counter number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit clock cycles in Hertz.
+*
+*******************************************************************************/
+MV_U32 mvBoardTclkGet(MV_VOID)
+{
+ if(mvCtrlModelGet()==MV_6281_DEV_ID)
+ {
+#if defined(TCLK_AUTO_DETECT)
+ MV_U32 tmpTClkRate = MV_BOARD_TCLK_166MHZ;
+
+ tmpTClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ tmpTClkRate &= MSAR_TCLCK_MASK;
+
+ switch (tmpTClkRate)
+ {
+ case MSAR_TCLCK_166:
+ return MV_BOARD_TCLK_166MHZ;
+ break;
+ case MSAR_TCLCK_200:
+ return MV_BOARD_TCLK_200MHZ;
+ break;
+ }
+#else
+ return MV_BOARD_TCLK_200MHZ;
+#endif
+ }
+
+ return MV_BOARD_TCLK_166MHZ;
+
+}
+/*******************************************************************************
+* mvBoardSysClkGet - Get the board SysClk (CPU bus clock)
+*
+* DESCRIPTION:
+* This routine extract the CPU bus clock.
+*
+* INPUT:
+* countNum - Counter number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit clock cycles in Hertz.
+*
+*******************************************************************************/
+static MV_U32 mvBoard6180SysClkGet(MV_VOID)
+{
+ MV_U32 sysClkRate=0;
+ MV_CPU_ARM_CLK _cpu6180_ddr_l2_CLK[] = MV_CPU6180_DDR_L2_CLCK_TBL;
+
+ sysClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ sysClkRate = sysClkRate & MSAR_CPUCLCK_MASK_6180;
+ sysClkRate = sysClkRate >> MSAR_CPUCLCK_OFFS_6180;
+
+ sysClkRate = _cpu6180_ddr_l2_CLK[sysClkRate].ddrClk;
+
+ return sysClkRate;
+
+}
+
+MV_U32 mvBoardSysClkGet(MV_VOID)
+{
+#ifdef SYSCLK_AUTO_DETECT
+ MV_U32 sysClkRate, tmp, pClkRate, indexDdrRtio;
+ MV_U32 cpuCLK[] = MV_CPU_CLCK_TBL;
+ MV_U32 ddrRtio[][2] = MV_DDR_CLCK_RTIO_TBL;
+
+ if(mvCtrlModelGet() == MV_6180_DEV_ID)
+ return mvBoard6180SysClkGet();
+
+ tmp = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ pClkRate = MSAR_CPUCLCK_EXTRACT(tmp);
+ pClkRate = cpuCLK[pClkRate];
+
+ indexDdrRtio = tmp & MSAR_DDRCLCK_RTIO_MASK;
+ indexDdrRtio = indexDdrRtio >> MSAR_DDRCLCK_RTIO_OFFS;
+ if(ddrRtio[indexDdrRtio][0] != 0)
+ sysClkRate = ((pClkRate * ddrRtio[indexDdrRtio][1]) / ddrRtio[indexDdrRtio][0]);
+ else
+ sysClkRate = 0;
+ return sysClkRate;
+#else
+ return MV_BOARD_DEFAULT_SYSCLK;
+#endif
+}
+
+
+/*******************************************************************************
+* mvBoardPexBridgeIntPinGet - Get PEX to PCI bridge interrupt pin number
+*
+* DESCRIPTION:
+* Multi-ported PCI Express bridges that is implemented on the board
+* collapse interrupts across multiple conventional PCI/PCI-X buses.
+* A dual-headed PCI Express bridge would map (or "swizzle") the
+* interrupts per the following table (in accordance with the respective
+* logical PCI/PCI-X bridge's Device Number), collapse the INTA#-INTD#
+* signals from its two logical PCI/PCI-X bridges, collapse the
+* INTA#-INTD# signals from any internal sources, and convert the
+* signals to in-band PCI Express messages. 10
+* This function returns the upstream interrupt as it was converted by
+* the bridge, according to board configuration and the following table:
+* PCI dev num
+* Interrupt pin 7, 8, 9
+* A -> A D C
+* B -> B A D
+* C -> C B A
+* D -> D C B
+*
+*
+* INPUT:
+* devNum - PCI/PCIX device number.
+* intPin - PCI Int pin
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Int pin connected to the Interrupt controller
+*
+*******************************************************************************/
+MV_U32 mvBoardPexBridgeIntPinGet(MV_U32 devNum, MV_U32 intPin)
+{
+ MV_U32 realIntPin = ((intPin + (3 - (devNum % 4))) %4 );
+
+ if (realIntPin == 0) return 4;
+ else return realIntPin;
+
+}
+
+/*******************************************************************************
+* mvBoardDebugLedNumGet - Get number of debug Leds
+*
+* DESCRIPTION:
+* INPUT:
+* boardId
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_U32 mvBoardDebugLedNumGet(MV_U32 boardId)
+{
+ return BOARD_INFO(boardId)->activeLedsNumber;
+}
+
+/*******************************************************************************
+* mvBoardDebugLeg - Set the board debug Leds
+*
+* DESCRIPTION: turn on/off status leds.
+* Note: assume MPP leds are part of group 0 only.
+*
+* INPUT:
+* hexNum - Number to be displied in hex by Leds.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvBoardDebugLed(MV_U32 hexNum)
+{
+ MV_U32 val = 0,totalMask, currentBitMask = 1,i;
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (BOARD_INFO(boardId)->pLedGppPin == NULL)
+ return;
+
+ totalMask = (1 << BOARD_INFO(boardId)->activeLedsNumber) -1;
+ hexNum &= totalMask;
+ totalMask = 0;
+
+ for (i = 0 ; i < BOARD_INFO(boardId)->activeLedsNumber ; i++)
+ {
+ if (hexNum & currentBitMask)
+ {
+ val |= (1 << BOARD_INFO(boardId)->pLedGppPin[i]);
+ }
+
+ totalMask |= (1 << BOARD_INFO(boardId)->pLedGppPin[i]);
+
+ currentBitMask = (currentBitMask << 1);
+ }
+
+ if (BOARD_INFO(boardId)->ledsPolarity)
+ {
+ mvGppValueSet(0, totalMask, val);
+ }
+ else
+ {
+ mvGppValueSet(0, totalMask, ~val);
+ }
+}
+
+
+/*******************************************************************************
+* mvBoarGpioPinGet - mvBoarGpioPinGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+* class - MV_BOARD_GPP_CLASS enum.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoarGpioPinNumGet(MV_BOARD_GPP_CLASS class, MV_U32 index)
+{
+ MV_U32 boardId, i;
+ MV_U32 indexFound = 0;
+
+ boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardRTCGpioPinGet:Board unknown.\n");
+ return MV_ERROR;
+
+ }
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardGppInfo; i++)
+ if (BOARD_INFO(boardId)->pBoardGppInfo[i].devClass == class) {
+ if (indexFound == index)
+ return (MV_U32)BOARD_INFO(boardId)->pBoardGppInfo[i].gppPinNum;
+ else
+ indexFound++;
+
+ }
+
+ return MV_ERROR;
+}
+
+
+/*******************************************************************************
+* mvBoardRTCGpioPinGet - mvBoardRTCGpioPinGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardRTCGpioPinGet(MV_VOID)
+{
+ return mvBoarGpioPinNumGet(BOARD_GPP_RTC, 0);
+}
+
+
+/*******************************************************************************
+* mvBoardReset - mvBoardReset
+*
+* DESCRIPTION:
+* Reset the board
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None
+*
+*******************************************************************************/
+MV_VOID mvBoardReset(MV_VOID)
+{
+ MV_32 resetPin;
+
+ /* Get gpp reset pin if define */
+ resetPin = mvBoardResetGpioPinGet();
+ if (resetPin != MV_ERROR)
+ {
+ MV_REG_BIT_RESET( GPP_DATA_OUT_REG(0) ,(1 << resetPin));
+ MV_REG_BIT_RESET( GPP_DATA_OUT_EN_REG(0) ,(1 << resetPin));
+
+ }
+ else
+ {
+ /* No gpp reset pin was found, try to reset ussing
+ system reset out */
+ MV_REG_BIT_SET( CPU_RSTOUTN_MASK_REG , BIT2);
+ MV_REG_BIT_SET( CPU_SYS_SOFT_RST_REG , BIT0);
+ }
+}
+
+/*******************************************************************************
+* mvBoardResetGpioPinGet - mvBoardResetGpioPinGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardResetGpioPinGet(MV_VOID)
+{
+ return mvBoarGpioPinNumGet(BOARD_GPP_RESET, 0);
+}
+/*******************************************************************************
+* mvBoardSDIOGpioPinGet - mvBoardSDIOGpioPinGet
+*
+* DESCRIPTION:
+* used for hotswap detection
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardSDIOGpioPinGet(MV_VOID)
+{
+ return mvBoarGpioPinNumGet(BOARD_GPP_SDIO_DETECT, 0);
+}
+
+/*******************************************************************************
+* mvBoardUSBVbusGpioPinGet - return Vbus input GPP
+*
+* DESCRIPTION:
+*
+* INPUT:
+* int devNo.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardUSBVbusGpioPinGet(MV_32 devId)
+{
+ return mvBoarGpioPinNumGet(BOARD_GPP_USB_VBUS, devId);
+}
+
+/*******************************************************************************
+* mvBoardUSBVbusEnGpioPinGet - return Vbus Enable output GPP
+*
+* DESCRIPTION:
+*
+* INPUT:
+* int devNo.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardUSBVbusEnGpioPinGet(MV_32 devId)
+{
+ return mvBoarGpioPinNumGet(BOARD_GPP_USB_VBUS_EN, devId);
+}
+
+
+/*******************************************************************************
+* mvBoardGpioIntMaskGet - Get GPIO mask for interrupt pins
+*
+* DESCRIPTION:
+* This function returns a 32-bit mask of GPP pins that connected to
+* interrupt generating sources on board.
+* For example if UART channel A is hardwired to GPP pin 8 and
+* UART channel B is hardwired to GPP pin 4 the fuinction will return
+* the value 0x000000110
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* See description. The function return -1 if board is not identified.
+*
+*******************************************************************************/
+MV_32 mvBoardGpioIntMaskLowGet(MV_VOID)
+{
+ MV_U32 boardId;
+
+ boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardGpioIntMaskGet:Board unknown.\n");
+ return MV_ERROR;
+
+ }
+
+ return BOARD_INFO(boardId)->intsGppMaskLow;
+}
+MV_32 mvBoardGpioIntMaskHighGet(MV_VOID)
+{
+ MV_U32 boardId;
+
+ boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardGpioIntMaskGet:Board unknown.\n");
+ return MV_ERROR;
+
+ }
+
+ return BOARD_INFO(boardId)->intsGppMaskHigh;
+}
+
+
+/*******************************************************************************
+* mvBoardMppGet - Get board dependent MPP register value
+*
+* DESCRIPTION:
+* MPP settings are derived from board design.
+* MPP group consist of 8 MPPs. An MPP group represent MPP
+* control register.
+* This function retrieves board dependend MPP register value.
+*
+* INPUT:
+* mppGroupNum - MPP group number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit value describing MPP control register value.
+*
+*******************************************************************************/
+MV_32 mvBoardMppGet(MV_U32 mppGroupNum)
+{
+ MV_U32 boardId;
+
+ boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardMppGet:Board unknown.\n");
+ return MV_ERROR;
+
+ }
+
+ return BOARD_INFO(boardId)->pBoardMppConfigValue[0].mppGroup[mppGroupNum];
+}
+
+
+/*******************************************************************************
+* mvBoardMppGroupId - If MPP group type is AUTO then identify it using twsi
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardMppGroupIdUpdate(MV_VOID)
+{
+
+ MV_BOARD_MPP_GROUP_CLASS devClass;
+ MV_BOARD_MODULE_ID_CLASS devClassId;
+ MV_BOARD_MPP_TYPE_CLASS mppGroupType;
+ MV_U32 devId;
+ MV_U32 maxMppGrp = 1;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ maxMppGrp = MV_6281_MPP_MAX_MODULE;
+ break;
+ case MV_6192_DEV_ID:
+ maxMppGrp = MV_6192_MPP_MAX_MODULE;
+ break;
+ case MV_6190_DEV_ID:
+ maxMppGrp = MV_6190_MPP_MAX_MODULE;
+ break;
+ case MV_6180_DEV_ID:
+ maxMppGrp = MV_6180_MPP_MAX_MODULE;
+ break;
+ }
+
+ for (devClass = 0; devClass < maxMppGrp; devClass++)
+ {
+ /* If MPP group can be defined by the module connected to it */
+ if (mvBoardMppGroupTypeGet(devClass) == MV_BOARD_AUTO)
+ {
+ /* Get MPP module ID */
+ devClassId = mvBoarModuleTypeGet(devClass);
+ if (MV_ERROR != devClassId)
+ {
+ switch(devClassId)
+ {
+ case MV_BOARD_MODULE_TDM_ID:
+ case MV_BOARD_MODULE_TDM_5CHAN_ID:
+ mppGroupType = MV_BOARD_TDM;
+ break;
+ case MV_BOARD_MODULE_AUDIO_ID:
+ mppGroupType = MV_BOARD_AUDIO;
+ break;
+ case MV_BOARD_MODULE_RGMII_ID:
+ mppGroupType = MV_BOARD_RGMII;
+ break;
+ case MV_BOARD_MODULE_GMII_ID:
+ mppGroupType = MV_BOARD_GMII;
+ break;
+ case MV_BOARD_MODULE_TS_ID:
+ mppGroupType = MV_BOARD_TS;
+ break;
+ case MV_BOARD_MODULE_MII_ID:
+ mppGroupType = MV_BOARD_MII;
+ break;
+ default:
+ mppGroupType = MV_BOARD_OTHER;
+ break;
+ }
+ }
+ else
+ /* The module bay is empty */
+ mppGroupType = MV_BOARD_OTHER;
+
+ /* Update MPP group type */
+ mvBoardMppGroupTypeSet(devClass, mppGroupType);
+ }
+
+ /* Update MPP output voltage for RGMII 1.8V. Set port to GMII for GMII module */
+ if ((mvBoardMppGroupTypeGet(devClass) == MV_BOARD_RGMII))
+ MV_REG_BIT_SET(MPP_OUTPUT_DRIVE_REG,MPP_1_8_RGMII1_OUTPUT_DRIVE | MPP_1_8_RGMII0_OUTPUT_DRIVE);
+ else
+ {
+ if ((mvBoardMppGroupTypeGet(devClass) == MV_BOARD_GMII))
+ {
+ MV_REG_BIT_RESET(MPP_OUTPUT_DRIVE_REG, BIT7 | BIT15);
+ MV_REG_BIT_RESET(ETH_PORT_SERIAL_CTRL_1_REG(0),BIT3);
+ MV_REG_BIT_RESET(ETH_PORT_SERIAL_CTRL_1_REG(1),BIT3);
+ }
+ else if ((mvBoardMppGroupTypeGet(devClass) == MV_BOARD_MII))
+ {
+ /* Assumption that the MDC & MDIO should be 3.3V */
+ MV_REG_BIT_RESET(MPP_OUTPUT_DRIVE_REG, BIT7 | BIT15);
+ /* Assumption that only ETH1 can be MII when using modules on DB */
+ MV_REG_BIT_RESET(ETH_PORT_SERIAL_CTRL_1_REG(1),BIT3);
+ }
+ }
+ }
+}
+
+/*******************************************************************************
+* mvBoardMppGroupTypeGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+* mppGroupClass - MPP group number 0 for MPP[35:20] or 1 for MPP[49:36].
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_BOARD_MPP_TYPE_CLASS mvBoardMppGroupTypeGet(MV_BOARD_MPP_GROUP_CLASS mppGroupClass)
+{
+ MV_U32 boardId;
+
+ boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardMppGet:Board unknown.\n");
+ return MV_ERROR;
+
+ }
+
+ if (mppGroupClass == MV_BOARD_MPP_GROUP_1)
+ return BOARD_INFO(boardId)->pBoardMppTypeValue[0].boardMppGroup1;
+ else
+ return BOARD_INFO(boardId)->pBoardMppTypeValue[0].boardMppGroup2;
+}
+
+/*******************************************************************************
+* mvBoardMppGroupTypeSet
+*
+* DESCRIPTION:
+*
+* INPUT:
+* mppGroupClass - MPP group number 0 for MPP[35:20] or 1 for MPP[49:36].
+* mppGroupType - MPP group type for MPP[35:20] or for MPP[49:36].
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardMppGroupTypeSet(MV_BOARD_MPP_GROUP_CLASS mppGroupClass,
+ MV_BOARD_MPP_TYPE_CLASS mppGroupType)
+{
+ MV_U32 boardId;
+
+ boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardMppGet:Board unknown.\n");
+ }
+
+ if (mppGroupClass == MV_BOARD_MPP_GROUP_1)
+ BOARD_INFO(boardId)->pBoardMppTypeValue[0].boardMppGroup1 = mppGroupType;
+ else
+ BOARD_INFO(boardId)->pBoardMppTypeValue[0].boardMppGroup2 = mppGroupType;
+
+}
+
+/*******************************************************************************
+* mvBoardMppMuxSet - Update MPP mux
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardMppMuxSet(MV_VOID)
+{
+
+ MV_BOARD_MPP_GROUP_CLASS devClass;
+ MV_BOARD_MPP_TYPE_CLASS mppGroupType;
+ MV_U32 devId;
+ MV_U8 muxVal = 0xf;
+ MV_U32 maxMppGrp = 1;
+ MV_TWSI_SLAVE twsiSlave;
+ MV_TWSI_ADDR slave;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ maxMppGrp = MV_6281_MPP_MAX_MODULE;
+ break;
+ case MV_6192_DEV_ID:
+ maxMppGrp = MV_6192_MPP_MAX_MODULE;
+ break;
+ case MV_6190_DEV_ID:
+ maxMppGrp = MV_6190_MPP_MAX_MODULE;
+ break;
+ case MV_6180_DEV_ID:
+ maxMppGrp = MV_6180_MPP_MAX_MODULE;
+ break;
+ }
+
+ for (devClass = 0; devClass < maxMppGrp; devClass++)
+ {
+ mppGroupType = mvBoardMppGroupTypeGet(devClass);
+
+ switch(mppGroupType)
+ {
+ case MV_BOARD_TDM:
+ muxVal &= ~(devClass ? (0x2 << (devClass * 2)):0x0);
+ break;
+ case MV_BOARD_AUDIO:
+ muxVal &= ~(devClass ? 0x7 : 0x0); /*old Z0 value 0xd:0x0*/
+ break;
+ case MV_BOARD_TS:
+ muxVal &= ~(devClass ? (0x2 << (devClass * 2)):0x0);
+ break;
+ default:
+ muxVal |= (devClass ? 0xf : 0);
+ break;
+ }
+ }
+
+ /* TWSI init */
+ slave.type = ADDR7_BIT;
+ slave.address = 0;
+ mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+ /* Read MPP module ID */
+ DB(mvOsPrintf("Board: twsi exp set\n"));
+ twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(MV_BOARD_MUX_I2C_ADDR_ENTRY);
+ twsiSlave.slaveAddr.type = mvBoardTwsiExpAddrTypeGet(MV_BOARD_MUX_I2C_ADDR_ENTRY);
+ twsiSlave.validOffset = MV_TRUE;
+ /* Offset is the first command after the address which indicate the register number to be read
+ in next operation */
+ twsiSlave.offset = 2;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+
+
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+ {
+ DB(mvOsPrintf("Board: twsi exp out val fail\n"));
+ return;
+ }
+ DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+ /* Change twsi exp to output */
+ twsiSlave.offset = 6;
+ muxVal = 0;
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+ {
+ DB(mvOsPrintf("Board: twsi exp change to out fail\n"));
+ return;
+ }
+ DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+
+}
+
+/*******************************************************************************
+* mvBoardTdmMppSet - set MPPs in TDM module
+*
+* DESCRIPTION:
+*
+* INPUT: type of second telephony device
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardTdmMppSet(MV_32 chType)
+{
+
+ MV_BOARD_MPP_GROUP_CLASS devClass;
+ MV_BOARD_MPP_TYPE_CLASS mppGroupType;
+ MV_U32 devId;
+ MV_U8 muxVal = 1;
+ MV_U8 muxValMask = 1;
+ MV_U8 twsiVal;
+ MV_U32 maxMppGrp = 1;
+ MV_TWSI_SLAVE twsiSlave;
+ MV_TWSI_ADDR slave;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ maxMppGrp = MV_6281_MPP_MAX_MODULE;
+ break;
+ case MV_6192_DEV_ID:
+ maxMppGrp = MV_6192_MPP_MAX_MODULE;
+ break;
+ case MV_6190_DEV_ID:
+ maxMppGrp = MV_6190_MPP_MAX_MODULE;
+ break;
+ case MV_6180_DEV_ID:
+ maxMppGrp = MV_6180_MPP_MAX_MODULE;
+ break;
+ }
+
+ for (devClass = 0; devClass < maxMppGrp; devClass++)
+ {
+ mppGroupType = mvBoardMppGroupTypeGet(devClass);
+ if(mppGroupType == MV_BOARD_TDM)
+ break;
+ }
+
+ if(devClass == maxMppGrp)
+ return; /* TDM module not found */
+
+ /* TWSI init */
+ slave.type = ADDR7_BIT;
+ slave.address = 0;
+ mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+ /* Read MPP module ID */
+ DB(mvOsPrintf("Board: twsi exp set\n"));
+ twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(devClass);
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ /* Offset is the first command after the address which indicate the register number to be read
+ in next operation */
+ twsiSlave.offset = 3;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+ if(mvBoardIdGet() == RD_88F6281A_ID)
+ {
+ muxVal = 0xc;
+ muxValMask = 0xf3;
+ }
+
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ muxVal = (twsiVal & muxValMask) | muxVal;
+
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+ {
+ mvOsPrintf("Board: twsi exp out val fail\n");
+ return;
+ }
+ DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+ /* Change twsi exp to output */
+ twsiSlave.offset = 7;
+ muxVal = 0xfe;
+ if(mvBoardIdGet() == RD_88F6281A_ID)
+ muxVal = 0xf3;
+
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ muxVal = (twsiVal & muxVal);
+
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+ {
+ mvOsPrintf("Board: twsi exp change to out fail\n");
+ return;
+ }
+ DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+ /* reset the line to 0 */
+ twsiSlave.offset = 3;
+ muxVal = 0;
+ muxValMask = 1;
+
+ if(mvBoardIdGet() == RD_88F6281A_ID) {
+ muxVal = 0x0;
+ muxValMask = 0xf3;
+ }
+
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ muxVal = (twsiVal & muxValMask) | muxVal;
+
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+ {
+ mvOsPrintf("Board: twsi exp out val fail\n");
+ return;
+ }
+ DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+ mvOsDelay(20);
+
+ /* set the line to 1 */
+ twsiSlave.offset = 3;
+ muxVal = 1;
+ muxValMask = 1;
+
+ if(mvBoardIdGet() == RD_88F6281A_ID)
+ {
+ muxVal = 0xc;
+ muxValMask = 0xf3;
+ if(chType) /* FXS - issue reset properly */
+ {
+ MV_REG_BIT_SET(GPP_DATA_OUT_REG(1), MV_GPP12);
+ mvOsDelay(50);
+ MV_REG_BIT_RESET(GPP_DATA_OUT_REG(1), MV_GPP12);
+ }
+ else /* FXO - issue reset via TDM_CODEC_RST*/
+ {
+ /* change MPP44 type to TDM_CODEC_RST(0x2) */
+ MV_REG_WRITE(MPP_CONTROL_REG5, ((MV_REG_READ(MPP_CONTROL_REG5) & 0xFFF0FFFF) | BIT17));
+ }
+ }
+
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ muxVal = (twsiVal & muxValMask) | muxVal;
+
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+ {
+ mvOsPrintf("Board: twsi exp out val fail\n");
+ return;
+ }
+
+ /* TBD - 5 channels */
+#if defined(MV_TDM_5CHANNELS)
+ /* change MPP38 type to GPIO(0x0) & polarity for TDM_STROBE */
+ MV_REG_WRITE(MPP_CONTROL_REG4, (MV_REG_READ(MPP_CONTROL_REG4) & 0xF0FFFFFF));
+ mvGppPolaritySet(1, MV_GPP6, 0);
+
+ twsiSlave.offset = 6;
+ twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(2);
+
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ muxVal = (twsiVal & ~BIT2);
+
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+ {
+ mvOsPrintf("Board: twsi exp change to out fail\n");
+ return;
+ }
+
+
+ twsiSlave.offset = 2;
+
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ muxVal = (twsiVal & ~BIT2);
+
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+ {
+ mvOsPrintf("Board: twsi exp change to out fail\n");
+ return;
+ }
+#endif
+ DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+
+}
+/*******************************************************************************
+* mvBoardVoiceConnModeGet - return SLIC/DAA connection & interrupt modes
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+
+MV_VOID mvBoardVoiceConnModeGet(MV_32* connMode, MV_32* irqMode)
+{
+ switch(mvBoardIdGet())
+ {
+ case RD_88F6281A_ID:
+ *connMode = DAISY_CHAIN_MODE;
+ *irqMode = INTERRUPT_TO_TDM;
+ break;
+ case DB_88F6281A_BP_ID:
+ *connMode = DUAL_CHIP_SELECT_MODE;
+ *irqMode = INTERRUPT_TO_TDM;
+ break;
+ case RD_88F6192A_ID:
+ *connMode = DUAL_CHIP_SELECT_MODE;
+ *irqMode = INTERRUPT_TO_TDM;
+ break;
+ case DB_88F6192A_BP_ID:
+ *connMode = DUAL_CHIP_SELECT_MODE;
+ *irqMode = INTERRUPT_TO_TDM;
+ break;
+ default:
+ *connMode = *irqMode = -1;
+ mvOsPrintf("mvBoardVoiceAssembleModeGet: TDM not supported(boardId=0x%x)\n",mvBoardIdGet());
+ }
+ return;
+
+}
+
+/*******************************************************************************
+* mvBoardMppModuleTypePrint - print module detect
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardMppModuleTypePrint(MV_VOID)
+{
+
+ MV_BOARD_MPP_GROUP_CLASS devClass;
+ MV_BOARD_MPP_TYPE_CLASS mppGroupType;
+ MV_U32 devId;
+ MV_U32 maxMppGrp = 1;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ maxMppGrp = MV_6281_MPP_MAX_MODULE;
+ break;
+ case MV_6192_DEV_ID:
+ maxMppGrp = MV_6192_MPP_MAX_MODULE;
+ break;
+ case MV_6190_DEV_ID:
+ maxMppGrp = MV_6190_MPP_MAX_MODULE;
+ break;
+ case MV_6180_DEV_ID:
+ maxMppGrp = MV_6180_MPP_MAX_MODULE;
+ break;
+ }
+
+ for (devClass = 0; devClass < maxMppGrp; devClass++)
+ {
+ mppGroupType = mvBoardMppGroupTypeGet(devClass);
+
+ switch(mppGroupType)
+ {
+ case MV_BOARD_TDM:
+ if(devId != MV_6190_DEV_ID)
+ mvOsPrintf("Module %d is TDM\n", devClass);
+ break;
+ case MV_BOARD_AUDIO:
+ if(devId != MV_6190_DEV_ID)
+ mvOsPrintf("Module %d is AUDIO\n", devClass);
+ break;
+ case MV_BOARD_RGMII:
+ if(devId != MV_6190_DEV_ID)
+ mvOsPrintf("Module %d is RGMII\n", devClass);
+ break;
+ case MV_BOARD_GMII:
+ if(devId != MV_6190_DEV_ID)
+ mvOsPrintf("Module %d is GMII\n", devClass);
+ break;
+ case MV_BOARD_TS:
+ if(devId != MV_6190_DEV_ID)
+ mvOsPrintf("Module %d is TS\n", devClass);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/* Board devices API managments */
+
+/*******************************************************************************
+* mvBoardGetDeviceNumber - Get number of device of some type on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+* devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* If the device is found on the board the then the functions returns the
+* number of those devices else the function returns 0
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDevicesNumber(MV_BOARD_DEV_CLASS devClass)
+{
+ MV_U32 foundIndex=0,devNum;
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("mvBoardGetDeviceNumber:Board unknown.\n");
+ return 0xFFFFFFFF;
+
+ }
+
+ for (devNum = START_DEV_CS; devNum < BOARD_INFO(boardId)->numBoardDeviceIf; devNum++)
+ {
+ if (BOARD_INFO(boardId)->pDevCsInfo[devNum].devClass == devClass)
+ {
+ foundIndex++;
+ }
+ }
+
+ return foundIndex;
+
+}
+
+/*******************************************************************************
+* mvBoardGetDeviceBaseAddr - Get base address of a device existing on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+* devIndex - The device sequential number on the board
+* devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* If the device is found on the board the then the functions returns the
+* Base address else the function returns 0xffffffff
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDeviceBaseAddr(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+ MV_DEV_CS_INFO* devEntry;
+ devEntry = boardGetDevEntry(devNum,devClass);
+ if (devEntry != NULL)
+ {
+ return mvCpuIfTargetWinBaseLowGet(DEV_TO_TARGET(devEntry->deviceCS));
+
+ }
+
+ return 0xFFFFFFFF;
+}
+
+/*******************************************************************************
+* mvBoardGetDeviceBusWidth - Get Bus width of a device existing on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+* devIndex - The device sequential number on the board
+* devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* If the device is found on the board the then the functions returns the
+* Bus width else the function returns 0xffffffff
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDeviceBusWidth(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+ MV_DEV_CS_INFO* devEntry;
+
+ devEntry = boardGetDevEntry(devNum,devClass);
+ if (devEntry != NULL)
+ {
+ return 8;
+ }
+
+ return 0xFFFFFFFF;
+
+}
+
+/*******************************************************************************
+* mvBoardGetDeviceWidth - Get dev width of a device existing on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+* devIndex - The device sequential number on the board
+* devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* If the device is found on the board the then the functions returns the
+* dev width else the function returns 0xffffffff
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDeviceWidth(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+ MV_DEV_CS_INFO* devEntry;
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("Board unknown.\n");
+ return 0xFFFFFFFF;
+ }
+
+ devEntry = boardGetDevEntry(devNum,devClass);
+ if (devEntry != NULL)
+ return devEntry->devWidth;
+
+ return MV_ERROR;
+
+}
+
+/*******************************************************************************
+* mvBoardGetDeviceWinSize - Get the window size of a device existing on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+* devIndex - The device sequential number on the board
+* devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* If the device is found on the board the then the functions returns the
+* window size else the function returns 0xffffffff
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDeviceWinSize(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+ MV_DEV_CS_INFO* devEntry;
+ MV_U32 boardId = mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("Board unknown.\n");
+ return 0xFFFFFFFF;
+ }
+
+ devEntry = boardGetDevEntry(devNum,devClass);
+ if (devEntry != NULL)
+ {
+ return mvCpuIfTargetWinSizeGet(DEV_TO_TARGET(devEntry->deviceCS));
+ }
+
+ return 0xFFFFFFFF;
+}
+
+
+/*******************************************************************************
+* boardGetDevEntry - returns the entry pointer of a device on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+* devIndex - The device sequential number on the board
+* devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* If the device is found on the board the then the functions returns the
+* dev number else the function returns 0x0
+*
+*
+*******************************************************************************/
+static MV_DEV_CS_INFO* boardGetDevEntry(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+ MV_U32 foundIndex=0,devIndex;
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("boardGetDevEntry: Board unknown.\n");
+ return NULL;
+
+ }
+
+ for (devIndex = START_DEV_CS; devIndex < BOARD_INFO(boardId)->numBoardDeviceIf; devIndex++)
+ {
+ /* TBR */
+ /*if (BOARD_INFO(boardId)->pDevCsInfo[devIndex].deviceCS == MV_BOOTDEVICE_INDEX)
+ continue;*/
+
+ if (BOARD_INFO(boardId)->pDevCsInfo[devIndex].devClass == devClass)
+ {
+ if (foundIndex == devNum)
+ {
+ return &(BOARD_INFO(boardId)->pDevCsInfo[devIndex]);
+ }
+ foundIndex++;
+ }
+ }
+
+ /* device not found */
+ return NULL;
+}
+
+/* Get device CS number */
+
+MV_U32 boardGetDevCSNum(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+ MV_DEV_CS_INFO* devEntry;
+ MV_U32 boardId= mvBoardIdGet();
+
+ if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+ {
+ mvOsPrintf("Board unknown.\n");
+ return 0xFFFFFFFF;
+
+ }
+
+
+ devEntry = boardGetDevEntry(devNum,devClass);
+ if (devEntry != NULL)
+ return devEntry->deviceCS;
+
+ return 0xFFFFFFFF;
+
+}
+
+/*******************************************************************************
+* mvBoardRtcTwsiAddrTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardRtcTwsiAddrTypeGet()
+{
+ int i;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+ if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_TWSI_RTC)
+ return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddrType;
+ return (MV_ERROR);
+}
+
+/*******************************************************************************
+* mvBoardRtcTwsiAddrGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardRtcTwsiAddrGet()
+{
+ int i;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+ if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_TWSI_RTC)
+ return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddr;
+ return (0xFF);
+}
+
+/*******************************************************************************
+* mvBoardA2DTwsiAddrTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardA2DTwsiAddrTypeGet()
+{
+ int i;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+ if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_TWSI_AUDIO_DEC)
+ return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddrType;
+ return (MV_ERROR);
+}
+
+/*******************************************************************************
+* mvBoardA2DTwsiAddrGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardA2DTwsiAddrGet()
+{
+ int i;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+ if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_TWSI_AUDIO_DEC)
+ return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddr;
+ return (0xFF);
+}
+
+/*******************************************************************************
+* mvBoardTwsiExpAddrTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardTwsiExpAddrTypeGet(MV_U32 index)
+{
+ int i;
+ MV_U32 indexFound = 0;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+ if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_DEV_TWSI_EXP)
+ {
+ if (indexFound == index)
+ return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddrType;
+ else
+ indexFound++;
+ }
+
+ return (MV_ERROR);
+}
+
+/*******************************************************************************
+* mvBoardTwsiExpAddrGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardTwsiExpAddrGet(MV_U32 index)
+{
+ int i;
+ MV_U32 indexFound = 0;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+ if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_DEV_TWSI_EXP)
+ {
+ if (indexFound == index)
+ return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddr;
+ else
+ indexFound++;
+ }
+
+ return (0xFF);
+}
+
+
+/*******************************************************************************
+* mvBoardTwsiSatRAddrTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardTwsiSatRAddrTypeGet(MV_U32 index)
+{
+ int i;
+ MV_U32 indexFound = 0;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+ if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_DEV_TWSI_SATR)
+ {
+ if (indexFound == index)
+ return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddrType;
+ else
+ indexFound++;
+ }
+
+ return (MV_ERROR);
+}
+
+/*******************************************************************************
+* mvBoardTwsiSatRAddrGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardTwsiSatRAddrGet(MV_U32 index)
+{
+ int i;
+ MV_U32 indexFound = 0;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+ if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_DEV_TWSI_SATR)
+ {
+ if (indexFound == index)
+ return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddr;
+ else
+ indexFound++;
+ }
+
+ return (0xFF);
+}
+
+/*******************************************************************************
+* mvBoardNandWidthGet -
+*
+* DESCRIPTION: Get the width of the first NAND device in byte.
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN: 1, 2, 4 or MV_ERROR
+*
+*
+*******************************************************************************/
+/* */
+MV_32 mvBoardNandWidthGet(void)
+{
+ MV_U32 devNum;
+ MV_U32 devWidth;
+ MV_U32 boardId= mvBoardIdGet();
+
+ for (devNum = START_DEV_CS; devNum < BOARD_INFO(boardId)->numBoardDeviceIf; devNum++)
+ {
+ devWidth = mvBoardGetDeviceWidth(devNum, BOARD_DEV_NAND_FLASH);
+ if (devWidth != MV_ERROR)
+ return (devWidth / 8);
+ }
+
+ /* NAND wasn't found */
+ return MV_ERROR;
+}
+
+MV_U32 gBoardId = -1;
+
+/*******************************************************************************
+* mvBoardIdGet - Get Board model
+*
+* DESCRIPTION:
+* This function returns board ID.
+* Board ID is 32bit word constructed of board model (16bit) and
+* board revision (16bit) in the following way: 0xMMMMRRRR.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit board ID number, '-1' if board is undefined.
+*
+*******************************************************************************/
+MV_U32 mvBoardIdGet(MV_VOID)
+{
+ MV_U32 tmpBoardId = -1;
+
+ if(gBoardId == -1)
+ {
+ #if defined(DB_88F6281A)
+ tmpBoardId = DB_88F6281A_BP_ID;
+ #elif defined(RD_88F6281A)
+ tmpBoardId = RD_88F6281A_ID;
+ #elif defined(DB_88F6192A)
+ tmpBoardId = DB_88F6192A_BP_ID;
+ #elif defined(DB_88F6190A)
+ tmpBoardId = DB_88F6190A_BP_ID;
+ #elif defined(RD_88F6192A)
+ tmpBoardId = RD_88F6192A_ID;
+ #elif defined(RD_88F6190A)
+ tmpBoardId = RD_88F6190A_ID;
+ #elif defined(DB_88F6180A)
+ tmpBoardId = DB_88F6180A_BP_ID;
+ #elif defined(RD_88F6281A_PCAC)
+ tmpBoardId = RD_88F6281A_PCAC_ID;
+ #elif defined(RD_88F6281A_SHEEVA_PLUG)
+ tmpBoardId = SHEEVA_PLUG_ID;
+ #elif defined(DB_CUSTOMER)
+ tmpBoardId = DB_CUSTOMER_ID;
+ #endif
+ gBoardId = tmpBoardId;
+ }
+
+ return gBoardId;
+}
+
+
+/*******************************************************************************
+* mvBoarModuleTypeGet - mvBoarModuleTypeGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+* group num - MV_BOARD_MPP_GROUP_CLASS enum
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* module num - MV_BOARD_MODULE_CLASS enum
+*
+*******************************************************************************/
+MV_BOARD_MODULE_ID_CLASS mvBoarModuleTypeGet(MV_BOARD_MPP_GROUP_CLASS devClass)
+{
+ MV_TWSI_SLAVE twsiSlave;
+ MV_TWSI_ADDR slave;
+ MV_U8 data;
+
+ /* TWSI init */
+ slave.type = ADDR7_BIT;
+ slave.address = 0;
+ mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+ /* Read MPP module ID */
+ DB(mvOsPrintf("Board: Read MPP module ID\n"));
+ twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(devClass);
+ twsiSlave.slaveAddr.type = mvBoardTwsiExpAddrTypeGet(devClass);
+ twsiSlave.validOffset = MV_TRUE;
+ /* Offset is the first command after the address which indicate the register number to be read
+ in next operation */
+ twsiSlave.offset = 0;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+
+
+ if( MV_OK != mvTwsiRead (0, &twsiSlave, &data, 1) )
+ {
+ DB(mvOsPrintf("Board: Read MPP module ID fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: Read MPP module ID succeded\n"));
+
+ return data;
+}
+
+/*******************************************************************************
+* mvBoarTwsiSatRGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+* device num - one of three devices
+* reg num - 0 or 1
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* reg value
+*
+*******************************************************************************/
+MV_U8 mvBoarTwsiSatRGet(MV_U8 devNum, MV_U8 regNum)
+{
+ MV_TWSI_SLAVE twsiSlave;
+ MV_TWSI_ADDR slave;
+ MV_U8 data;
+
+ /* TWSI init */
+ slave.type = ADDR7_BIT;
+ slave.address = 0;
+ mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+ /* Read MPP module ID */
+ DB(mvOsPrintf("Board: Read S@R device read\n"));
+ twsiSlave.slaveAddr.address = mvBoardTwsiSatRAddrGet(devNum);
+ twsiSlave.slaveAddr.type = mvBoardTwsiSatRAddrTypeGet(devNum);
+ twsiSlave.validOffset = MV_TRUE;
+ /* Use offset as command */
+ twsiSlave.offset = regNum;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+ if( MV_OK != mvTwsiRead (0, &twsiSlave, &data, 1) )
+ {
+ DB(mvOsPrintf("Board: Read S@R fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: Read S@R succeded\n"));
+
+ return data;
+}
+
+/*******************************************************************************
+* mvBoarTwsiSatRSet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+* devNum - one of three devices
+* regNum - 0 or 1
+* regVal - value
+*
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* reg value
+*
+*******************************************************************************/
+MV_STATUS mvBoarTwsiSatRSet(MV_U8 devNum, MV_U8 regNum, MV_U8 regVal)
+{
+ MV_TWSI_SLAVE twsiSlave;
+ MV_TWSI_ADDR slave;
+
+ /* TWSI init */
+ slave.type = ADDR7_BIT;
+ slave.address = 0;
+ mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+ /* Read MPP module ID */
+ twsiSlave.slaveAddr.address = mvBoardTwsiSatRAddrGet(devNum);
+ twsiSlave.slaveAddr.type = mvBoardTwsiSatRAddrTypeGet(devNum);
+ twsiSlave.validOffset = MV_TRUE;
+ DB(mvOsPrintf("Board: Write S@R device addr %x, type %x, data %x\n", twsiSlave.slaveAddr.address,\
+ twsiSlave.slaveAddr.type, regVal));
+ /* Use offset as command */
+ twsiSlave.offset = regNum;
+ twsiSlave.moreThen256 = MV_FALSE;
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &regVal, 1) )
+ {
+ DB(mvOsPrintf("Board: Write S@R fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: Write S@R succeded\n"));
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvBoardSlicGpioPinGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_32 mvBoardSlicGpioPinGet(MV_U32 slicNum)
+{
+ MV_U32 boardId;
+ boardId = mvBoardIdGet();
+
+ switch (boardId)
+ {
+ case DB_88F6281A_BP_ID:
+ case RD_88F6281A_ID:
+ default:
+ return MV_ERROR;
+ break;
+
+ }
+}
+
+/*******************************************************************************
+* mvBoardFanPowerControl - Turn on/off the fan power control on the RD-6281A
+*
+* DESCRIPTION:
+*
+* INPUT:
+* mode - MV_TRUE = on ; MV_FALSE = off
+*
+* OUTPUT:
+* MV_STATUS - MV_OK , MV_ERROR.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvBoardFanPowerControl(MV_BOOL mode)
+{
+
+ MV_U8 val = 1, twsiVal;
+ MV_TWSI_SLAVE twsiSlave;
+ MV_TWSI_ADDR slave;
+
+ if(mvBoardIdGet() != RD_88F6281A_ID)
+ return MV_ERROR;
+
+ /* TWSI init */
+ slave.type = ADDR7_BIT;
+ slave.address = 0;
+ mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+ /* Read MPP module ID */
+ DB(mvOsPrintf("Board: twsi exp set\n"));
+ twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(1);
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ /* Offset is the first command after the address which indicate the register number to be read
+ in next operation */
+ twsiSlave.offset = 3;
+ twsiSlave.moreThen256 = MV_FALSE;
+ if(mode == MV_TRUE)
+ val = 0x1;
+ else
+ val = 0;
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ val = (twsiVal & 0xfe) | val;
+
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+ {
+ DB(mvOsPrintf("Board: twsi exp out val fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+ /* Change twsi exp to output */
+ twsiSlave.offset = 7;
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ val = (twsiVal & 0xfe);
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+ {
+ DB(mvOsPrintf("Board: twsi exp change to out fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvBoardHDDPowerControl - Turn on/off the HDD power control on the RD-6281A
+*
+* DESCRIPTION:
+*
+* INPUT:
+* mode - MV_TRUE = on ; MV_FALSE = off
+*
+* OUTPUT:
+* MV_STATUS - MV_OK , MV_ERROR.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvBoardHDDPowerControl(MV_BOOL mode)
+{
+
+ MV_U8 val = 1, twsiVal;
+ MV_TWSI_SLAVE twsiSlave;
+ MV_TWSI_ADDR slave;
+
+ if(mvBoardIdGet() != RD_88F6281A_ID)
+ return MV_ERROR;
+
+ /* TWSI init */
+ slave.type = ADDR7_BIT;
+ slave.address = 0;
+ mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+ /* Read MPP module ID */
+ DB(mvOsPrintf("Board: twsi exp set\n"));
+ twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(1);
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ /* Offset is the first command after the address which indicate the register number to be read
+ in next operation */
+ twsiSlave.offset = 3;
+ twsiSlave.moreThen256 = MV_FALSE;
+ if(mode == MV_TRUE)
+ val = 0x2;
+ else
+ val = 0;
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ val = (twsiVal & 0xfd) | val;
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+ {
+ DB(mvOsPrintf("Board: twsi exp out val fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+ /* Change twsi exp to output */
+ twsiSlave.offset = 7;
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ val = (twsiVal & 0xfd);
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+ {
+ DB(mvOsPrintf("Board: twsi exp change to out fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvBoardSDioWPControl - Turn on/off the SDIO WP on the RD-6281A
+*
+* DESCRIPTION:
+*
+* INPUT:
+* mode - MV_TRUE = on ; MV_FALSE = off
+*
+* OUTPUT:
+* MV_STATUS - MV_OK , MV_ERROR.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvBoardSDioWPControl(MV_BOOL mode)
+{
+
+ MV_U8 val = 1, twsiVal;
+ MV_TWSI_SLAVE twsiSlave;
+ MV_TWSI_ADDR slave;
+
+ if(mvBoardIdGet() != RD_88F6281A_ID)
+ return MV_ERROR;
+
+ /* TWSI init */
+ slave.type = ADDR7_BIT;
+ slave.address = 0;
+ mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+ /* Read MPP module ID */
+ DB(mvOsPrintf("Board: twsi exp set\n"));
+ twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(0);
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ /* Offset is the first command after the address which indicate the register number to be read
+ in next operation */
+ twsiSlave.offset = 3;
+ twsiSlave.moreThen256 = MV_FALSE;
+ if(mode == MV_TRUE)
+ val = 0x10;
+ else
+ val = 0;
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ val = (twsiVal & 0xef) | val;
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+ {
+ DB(mvOsPrintf("Board: twsi exp out val fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+ /* Change twsi exp to output */
+ twsiSlave.offset = 7;
+ mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+ val = (twsiVal & 0xef);
+ if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+ {
+ DB(mvOsPrintf("Board: twsi exp change to out fail\n"));
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+ return MV_OK;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.h
new file mode 100644
index 000000000..dead63369
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.h
@@ -0,0 +1,376 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCmvBoardEnvLibh
+#define __INCmvBoardEnvLibh
+
+/* defines */
+/* The below constant macros defines the board I2C EEPROM data offsets */
+
+
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "mvSysHwConfig.h"
+#include "boardEnv/mvBoardEnvSpec.h"
+
+
+/* DUART stuff for Tclk detection only */
+#define DUART_BAUD_RATE 115200
+#define MAX_CLOCK_MARGINE 5000000 /* Maximum detected clock margine */
+
+/* Voice devices assembly modes */
+#define DAISY_CHAIN_MODE 1
+#define DUAL_CHIP_SELECT_MODE 0
+#define INTERRUPT_TO_MPP 1
+#define INTERRUPT_TO_TDM 0
+
+
+#define BOARD_ETH_PORT_NUM MV_ETH_MAX_PORTS
+#define BOARD_ETH_SWITCH_PORT_NUM 5
+
+#define MV_BOARD_MAX_USB_IF 1
+#define MV_BOARD_MAX_MPP 7
+#define MV_BOARD_NAME_LEN 0x20
+
+typedef struct _boardData
+{
+ MV_U32 magic;
+ MV_U16 boardId;
+ MV_U8 boardVer;
+ MV_U8 boardRev;
+ MV_U32 reserved1;
+ MV_U32 reserved2;
+
+}BOARD_DATA;
+
+typedef enum _devBoardMppGroupClass
+{
+ MV_BOARD_MPP_GROUP_1,
+ MV_BOARD_MPP_GROUP_2,
+ MV_BOARD_MAX_MPP_GROUP
+}MV_BOARD_MPP_GROUP_CLASS;
+
+typedef enum _devBoardMppTypeClass
+{
+ MV_BOARD_AUTO,
+ MV_BOARD_TDM,
+ MV_BOARD_AUDIO,
+ MV_BOARD_RGMII,
+ MV_BOARD_GMII,
+ MV_BOARD_TS,
+ MV_BOARD_MII,
+ MV_BOARD_OTHER
+}MV_BOARD_MPP_TYPE_CLASS;
+
+typedef enum _devBoardModuleIdClass
+{
+ MV_BOARD_MODULE_TDM_ID = 1,
+ MV_BOARD_MODULE_AUDIO_ID,
+ MV_BOARD_MODULE_RGMII_ID,
+ MV_BOARD_MODULE_GMII_ID,
+ MV_BOARD_MODULE_TS_ID,
+ MV_BOARD_MODULE_MII_ID,
+ MV_BOARD_MODULE_TDM_5CHAN_ID,
+ MV_BOARD_MODULE_OTHER_ID
+}MV_BOARD_MODULE_ID_CLASS;
+
+typedef struct _boardMppTypeInfo
+{
+ MV_BOARD_MPP_TYPE_CLASS boardMppGroup1;
+ MV_BOARD_MPP_TYPE_CLASS boardMppGroup2;
+
+}MV_BOARD_MPP_TYPE_INFO;
+
+
+typedef enum _devBoardClass
+{
+ BOARD_DEV_NOR_FLASH,
+ BOARD_DEV_NAND_FLASH,
+ BOARD_DEV_SEVEN_SEG,
+ BOARD_DEV_FPGA,
+ BOARD_DEV_SRAM,
+ BOARD_DEV_SPI_FLASH,
+ BOARD_DEV_OTHER,
+}MV_BOARD_DEV_CLASS;
+
+typedef enum _devTwsiBoardClass
+{
+ BOARD_TWSI_RTC,
+ BOARD_DEV_TWSI_EXP,
+ BOARD_DEV_TWSI_SATR,
+ BOARD_TWSI_AUDIO_DEC,
+ BOARD_TWSI_OTHER
+}MV_BOARD_TWSI_CLASS;
+
+typedef enum _devGppBoardClass
+{
+ BOARD_GPP_RTC,
+ BOARD_GPP_MV_SWITCH,
+ BOARD_GPP_USB_VBUS,
+ BOARD_GPP_USB_VBUS_EN,
+ BOARD_GPP_USB_OC,
+ BOARD_GPP_USB_HOST_DEVICE,
+ BOARD_GPP_REF_CLCK,
+ BOARD_GPP_VOIP_SLIC,
+ BOARD_GPP_LIFELINE,
+ BOARD_GPP_BUTTON,
+ BOARD_GPP_TS_BUTTON_C,
+ BOARD_GPP_TS_BUTTON_U,
+ BOARD_GPP_TS_BUTTON_D,
+ BOARD_GPP_TS_BUTTON_L,
+ BOARD_GPP_TS_BUTTON_R,
+ BOARD_GPP_POWER_BUTTON,
+ BOARD_GPP_RESTOR_BUTTON,
+ BOARD_GPP_WPS_BUTTON,
+ BOARD_GPP_HDD0_POWER,
+ BOARD_GPP_HDD1_POWER,
+ BOARD_GPP_FAN_POWER,
+ BOARD_GPP_RESET,
+ BOARD_GPP_POWER_ON_LED,
+ BOARD_GPP_HDD_POWER,
+ BOARD_GPP_SDIO_POWER,
+ BOARD_GPP_SDIO_DETECT,
+ BOARD_GPP_SDIO_WP,
+ BOARD_GPP_SWITCH_PHY_INT,
+ BOARD_GPP_TSU_DIRCTION,
+ BOARD_GPP_OTHER
+}MV_BOARD_GPP_CLASS;
+
+
+typedef struct _devCsInfo
+{
+ MV_U8 deviceCS;
+ MV_U32 params;
+ MV_U32 devClass; /* MV_BOARD_DEV_CLASS */
+ MV_U8 devWidth;
+
+}MV_DEV_CS_INFO;
+
+
+#define MV_BOARD_PHY_FORCE_10MB 0x0
+#define MV_BOARD_PHY_FORCE_100MB 0x1
+#define MV_BOARD_PHY_FORCE_1000MB 0x2
+#define MV_BOARD_PHY_SPEED_AUTO 0x3
+
+typedef struct _boardSwitchInfo
+{
+ MV_32 linkStatusIrq;
+ MV_32 qdPort[BOARD_ETH_SWITCH_PORT_NUM];
+ MV_32 qdCpuPort;
+ MV_32 smiScanMode; /* 1 for SMI_MANUAL_MODE, 0 otherwise */
+ MV_32 switchOnPort;
+
+}MV_BOARD_SWITCH_INFO;
+
+typedef struct _boardLedInfo
+{
+ MV_U8 activeLedsNumber;
+ MV_U8 ledsPolarity; /* '0' or '1' to turn on led */
+ MV_U8* gppPinNum; /* Pointer to GPP values */
+
+}MV_BOARD_LED_INFO;
+
+typedef struct _boardGppInfo
+{
+ MV_BOARD_GPP_CLASS devClass;
+ MV_U8 gppPinNum;
+
+}MV_BOARD_GPP_INFO;
+
+
+typedef struct _boardTwsiInfo
+{
+ MV_BOARD_TWSI_CLASS devClass;
+ MV_U8 twsiDevAddr;
+ MV_U8 twsiDevAddrType;
+
+}MV_BOARD_TWSI_INFO;
+
+
+typedef enum _boardMacSpeed
+{
+ BOARD_MAC_SPEED_10M,
+ BOARD_MAC_SPEED_100M,
+ BOARD_MAC_SPEED_1000M,
+ BOARD_MAC_SPEED_AUTO,
+
+}MV_BOARD_MAC_SPEED;
+
+typedef struct _boardMacInfo
+{
+ MV_BOARD_MAC_SPEED boardMacSpeed;
+ MV_U8 boardEthSmiAddr;
+
+}MV_BOARD_MAC_INFO;
+
+typedef struct _boardMppInfo
+{
+ MV_U32 mppGroup[MV_BOARD_MAX_MPP];
+
+}MV_BOARD_MPP_INFO;
+
+typedef struct _boardInfo
+{
+ char boardName[MV_BOARD_NAME_LEN];
+ MV_U8 numBoardMppTypeValue;
+ MV_BOARD_MPP_TYPE_INFO* pBoardMppTypeValue;
+ MV_U8 numBoardMppConfigValue;
+ MV_BOARD_MPP_INFO* pBoardMppConfigValue;
+ MV_U32 intsGppMaskLow;
+ MV_U32 intsGppMaskHigh;
+ MV_U8 numBoardDeviceIf;
+ MV_DEV_CS_INFO* pDevCsInfo;
+ MV_U8 numBoardTwsiDev;
+ MV_BOARD_TWSI_INFO* pBoardTwsiDev;
+ MV_U8 numBoardMacInfo;
+ MV_BOARD_MAC_INFO* pBoardMacInfo;
+ MV_U8 numBoardGppInfo;
+ MV_BOARD_GPP_INFO* pBoardGppInfo;
+ MV_U8 activeLedsNumber;
+ MV_U8* pLedGppPin;
+ MV_U8 ledsPolarity; /* '0' or '1' to turn on led */
+ /* GPP values */
+ MV_U32 gppOutEnValLow;
+ MV_U32 gppOutEnValHigh;
+ MV_U32 gppOutValLow;
+ MV_U32 gppOutValHigh;
+ MV_U32 gppPolarityValLow;
+ MV_U32 gppPolarityValHigh;
+
+ /* Switch Configuration */
+ MV_BOARD_SWITCH_INFO* pSwitchInfo;
+}MV_BOARD_INFO;
+
+
+
+MV_VOID mvBoardEnvInit(MV_VOID);
+MV_U32 mvBoardIdGet(MV_VOID);
+MV_U16 mvBoardModelGet(MV_VOID);
+MV_U16 mvBoardRevGet(MV_VOID);
+MV_STATUS mvBoardNameGet(char *pNameBuff);
+MV_32 mvBoardPhyAddrGet(MV_U32 ethPortNum);
+MV_BOARD_MAC_SPEED mvBoardMacSpeedGet(MV_U32 ethPortNum);
+MV_32 mvBoardLinkStatusIrqGet(MV_U32 ethPortNum);
+MV_32 mvBoardSwitchPortGet(MV_U32 ethPortNum, MV_U8 boardPortNum);
+MV_32 mvBoardSwitchCpuPortGet(MV_U32 ethPortNum);
+MV_32 mvBoardIsSwitchConnected(MV_U32 ethPortNum);
+MV_32 mvBoardSmiScanModeGet(MV_U32 ethPortNum);
+MV_BOOL mvBoardIsPortInSgmii(MV_U32 ethPortNum);
+MV_BOOL mvBoardIsPortInGmii(MV_VOID);
+MV_U32 mvBoardTclkGet(MV_VOID);
+MV_U32 mvBoardSysClkGet(MV_VOID);
+MV_U32 mvBoardDebugLedNumGet(MV_U32 boardId);
+MV_VOID mvBoardDebugLed(MV_U32 hexNum);
+MV_32 mvBoardMppGet(MV_U32 mppGroupNum);
+
+MV_U8 mvBoardRtcTwsiAddrTypeGet(MV_VOID);
+MV_U8 mvBoardRtcTwsiAddrGet(MV_VOID);
+
+MV_U8 mvBoardA2DTwsiAddrTypeGet(MV_VOID);
+MV_U8 mvBoardA2DTwsiAddrGet(MV_VOID);
+
+MV_U8 mvBoardTwsiExpAddrGet(MV_U32 index);
+MV_U8 mvBoardTwsiSatRAddrTypeGet(MV_U32 index);
+MV_U8 mvBoardTwsiSatRAddrGet(MV_U32 index);
+MV_U8 mvBoardTwsiExpAddrTypeGet(MV_U32 index);
+MV_BOARD_MODULE_ID_CLASS mvBoarModuleTypeGet(MV_BOARD_MPP_GROUP_CLASS devClass);
+MV_BOARD_MPP_TYPE_CLASS mvBoardMppGroupTypeGet(MV_BOARD_MPP_GROUP_CLASS mppGroupClass);
+MV_VOID mvBoardMppGroupTypeSet(MV_BOARD_MPP_GROUP_CLASS mppGroupClass,
+ MV_BOARD_MPP_TYPE_CLASS mppGroupType);
+MV_VOID mvBoardMppGroupIdUpdate(MV_VOID);
+MV_VOID mvBoardMppMuxSet(MV_VOID);
+MV_VOID mvBoardTdmMppSet(MV_32 chType);
+MV_VOID mvBoardVoiceConnModeGet(MV_32* connMode, MV_32* irqMode);
+
+MV_VOID mvBoardMppModuleTypePrint(MV_VOID);
+MV_VOID mvBoardReset(MV_VOID);
+MV_U8 mvBoarTwsiSatRGet(MV_U8 devNum, MV_U8 regNum);
+MV_STATUS mvBoarTwsiSatRSet(MV_U8 devNum, MV_U8 regNum, MV_U8 regVal);
+MV_BOOL mvBoardSpecInitGet(MV_U32* regOff, MV_U32* data);
+/* Board devices API managments */
+MV_32 mvBoardGetDevicesNumber(MV_BOARD_DEV_CLASS devClass);
+MV_32 mvBoardGetDeviceBaseAddr(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+MV_32 mvBoardGetDeviceBusWidth(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+MV_32 mvBoardGetDeviceWidth(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+MV_32 mvBoardGetDeviceWinSize(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+MV_U32 boardGetDevCSNum(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+
+/* Gpio Pin Connections API */
+MV_32 mvBoardUSBVbusGpioPinGet(int devId);
+MV_32 mvBoardUSBVbusEnGpioPinGet(int devId);
+MV_U32 mvBoardPexBridgeIntPinGet(MV_U32 devNum, MV_U32 intPin);
+
+MV_32 mvBoardResetGpioPinGet(MV_VOID);
+MV_32 mvBoardRTCGpioPinGet(MV_VOID);
+MV_32 mvBoardGpioIntMaskLowGet(MV_VOID);
+MV_32 mvBoardGpioIntMaskHighGet(MV_VOID);
+MV_32 mvBoardSlicGpioPinGet(MV_U32 slicNum);
+
+MV_32 mvBoardSDIOGpioPinGet(MV_VOID);
+MV_STATUS mvBoardSDioWPControl(MV_BOOL mode);
+MV_32 mvBoarGpioPinNumGet(MV_BOARD_GPP_CLASS class, MV_U32 index);
+
+MV_32 mvBoardNandWidthGet(void);
+MV_STATUS mvBoardFanPowerControl(MV_BOOL mode);
+MV_STATUS mvBoardHDDPowerControl(MV_BOOL mode);
+#endif /* __INCmvBoardEnvLibh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.c
new file mode 100644
index 000000000..e256c4f70
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.c
@@ -0,0 +1,848 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvCommon.h"
+#include "mvBoardEnvLib.h"
+#include "mvBoardEnvSpec.h"
+#include "twsi/mvTwsi.h"
+
+#define DB_88F6281A_BOARD_PCI_IF_NUM 0x0
+#define DB_88F6281A_BOARD_TWSI_DEF_NUM 0x7
+#define DB_88F6281A_BOARD_MAC_INFO_NUM 0x2
+#define DB_88F6281A_BOARD_GPP_INFO_NUM 0x3
+#define DB_88F6281A_BOARD_MPP_CONFIG_NUM 0x1
+#define DB_88F6281A_BOARD_MPP_GROUP_TYPE_NUM 0x1
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ #define DB_88F6281A_BOARD_DEVICE_CONFIG_NUM 0x1
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ #define DB_88F6281A_BOARD_DEVICE_CONFIG_NUM 0x2
+#else
+ #define DB_88F6281A_BOARD_DEVICE_CONFIG_NUM 0x1
+#endif
+#define DB_88F6281A_BOARD_DEBUG_LED_NUM 0x0
+
+
+MV_BOARD_TWSI_INFO db88f6281AInfoBoardTwsiDev[] =
+ /* {{MV_BOARD_DEV_CLASS devClass, MV_U8 twsiDevAddr, MV_U8 twsiDevAddrType}} */
+ {
+ {BOARD_DEV_TWSI_EXP, 0x20, ADDR7_BIT},
+ {BOARD_DEV_TWSI_EXP, 0x21, ADDR7_BIT},
+ {BOARD_DEV_TWSI_EXP, 0x27, ADDR7_BIT},
+ {BOARD_DEV_TWSI_SATR, 0x4C, ADDR7_BIT},
+ {BOARD_DEV_TWSI_SATR, 0x4D, ADDR7_BIT},
+ {BOARD_DEV_TWSI_SATR, 0x4E, ADDR7_BIT},
+ {BOARD_TWSI_AUDIO_DEC, 0x4A, ADDR7_BIT}
+ };
+
+MV_BOARD_MAC_INFO db88f6281AInfoBoardMacInfo[] =
+ /* {{MV_BOARD_MAC_SPEED boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+ {
+ {BOARD_MAC_SPEED_AUTO, 0x8},
+ {BOARD_MAC_SPEED_AUTO, 0x9}
+ };
+
+MV_BOARD_MPP_TYPE_INFO db88f6281AInfoBoardMppTypeInfo[] =
+ /* {{MV_BOARD_MPP_TYPE_CLASS boardMppGroup1,
+ MV_BOARD_MPP_TYPE_CLASS boardMppGroup2}} */
+ {{MV_BOARD_AUTO, MV_BOARD_AUTO}
+ };
+
+MV_BOARD_GPP_INFO db88f6281AInfoBoardGppInfo[] =
+ /* {{MV_BOARD_GPP_CLASS devClass, MV_U8 gppPinNum}} */
+ {
+ {BOARD_GPP_TSU_DIRCTION, 33}
+ /*muxed with TDM/Audio module via IOexpender
+ {BOARD_GPP_SDIO_DETECT, 38},
+ {BOARD_GPP_USB_VBUS, 49}*/
+ };
+
+MV_DEV_CS_INFO db88f6281AInfoBoardDeCsInfo[] =
+ /*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ {{0, N_A, BOARD_DEV_NAND_FLASH, 8}}; /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ {
+ {0, N_A, BOARD_DEV_NAND_FLASH, 8}, /* NAND DEV */
+ {1, N_A, BOARD_DEV_SPI_FLASH, 8}, /* SPI DEV */
+ };
+#else
+ {{1, N_A, BOARD_DEV_SPI_FLASH, 8}}; /* SPI DEV */
+#endif
+
+MV_BOARD_MPP_INFO db88f6281AInfoBoardMppConfigValue[] =
+ {{{
+ DB_88F6281A_MPP0_7,
+ DB_88F6281A_MPP8_15,
+ DB_88F6281A_MPP16_23,
+ DB_88F6281A_MPP24_31,
+ DB_88F6281A_MPP32_39,
+ DB_88F6281A_MPP40_47,
+ DB_88F6281A_MPP48_55
+ }}};
+
+
+MV_BOARD_INFO db88f6281AInfo = {
+ "DB-88F6281A-BP", /* boardName[MAX_BOARD_NAME_LEN] */
+ DB_88F6281A_BOARD_MPP_GROUP_TYPE_NUM, /* numBoardMppGroupType */
+ db88f6281AInfoBoardMppTypeInfo,
+ DB_88F6281A_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ db88f6281AInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ 0, /* intsGppMaskHigh */
+ DB_88F6281A_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ db88f6281AInfoBoardDeCsInfo,
+ DB_88F6281A_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ db88f6281AInfoBoardTwsiDev,
+ DB_88F6281A_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ db88f6281AInfoBoardMacInfo,
+ DB_88F6281A_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ db88f6281AInfoBoardGppInfo,
+ DB_88F6281A_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ NULL,
+ 0, /* ledsPolarity */
+ DB_88F6281A_OE_LOW, /* gppOutEnLow */
+ DB_88F6281A_OE_HIGH, /* gppOutEnHigh */
+ DB_88F6281A_OE_VAL_LOW, /* gppOutValLow */
+ DB_88F6281A_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ BIT6, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+
+#define RD_88F6281A_BOARD_PCI_IF_NUM 0x0
+#define RD_88F6281A_BOARD_TWSI_DEF_NUM 0x2
+#define RD_88F6281A_BOARD_MAC_INFO_NUM 0x2
+#define RD_88F6281A_BOARD_GPP_INFO_NUM 0x5
+#define RD_88F6281A_BOARD_MPP_GROUP_TYPE_NUM 0x1
+#define RD_88F6281A_BOARD_MPP_CONFIG_NUM 0x1
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ #define RD_88F6281A_BOARD_DEVICE_CONFIG_NUM 0x1
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ #define RD_88F6281A_BOARD_DEVICE_CONFIG_NUM 0x2
+#else
+ #define RD_88F6281A_BOARD_DEVICE_CONFIG_NUM 0x1
+#endif
+#define RD_88F6281A_BOARD_DEBUG_LED_NUM 0x0
+
+MV_BOARD_MAC_INFO rd88f6281AInfoBoardMacInfo[] =
+ /* {{MV_BOARD_MAC_SPEED boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+ {{BOARD_MAC_SPEED_1000M, 0xa},
+ {BOARD_MAC_SPEED_AUTO, 0xb}
+ };
+
+MV_BOARD_SWITCH_INFO rd88f6281AInfoBoardSwitchInfo[] =
+ /* MV_32 linkStatusIrq, {MV_32 qdPort0, MV_32 qdPort1, MV_32 qdPort2, MV_32 qdPort3, MV_32 qdPort4},
+ MV_32 qdCpuPort, MV_32 smiScanMode, MV_32 switchOnPort} */
+ {{38, {0, 1, 2, 3, -1}, 5, 2, 0},
+ {-1, {-1}, -1, -1, -1}};
+
+MV_BOARD_TWSI_INFO rd88f6281AInfoBoardTwsiDev[] =
+ /* {{MV_BOARD_DEV_CLASS devClass, MV_U8 twsiDevAddr, MV_U8 twsiDevAddrType}} */
+ {
+ {BOARD_DEV_TWSI_EXP, 0xFF, ADDR7_BIT}, /* dummy entry to align with modules indexes */
+ {BOARD_DEV_TWSI_EXP, 0x27, ADDR7_BIT}
+ };
+
+MV_BOARD_MPP_TYPE_INFO rd88f6281AInfoBoardMppTypeInfo[] =
+ {{MV_BOARD_RGMII, MV_BOARD_TDM}
+ };
+
+MV_DEV_CS_INFO rd88f6281AInfoBoardDeCsInfo[] =
+ /*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ {{0, N_A, BOARD_DEV_NAND_FLASH, 8}}; /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ {
+ {0, N_A, BOARD_DEV_NAND_FLASH, 8}, /* NAND DEV */
+ {1, N_A, BOARD_DEV_SPI_FLASH, 8}, /* SPI DEV */
+ };
+#else
+ {{1, N_A, BOARD_DEV_SPI_FLASH, 8}}; /* SPI DEV */
+#endif
+
+MV_BOARD_GPP_INFO rd88f6281AInfoBoardGppInfo[] =
+ /* {{MV_BOARD_GPP_CLASS devClass, MV_U8 gppPinNum}} */
+ {{BOARD_GPP_SDIO_DETECT, 28},
+ {BOARD_GPP_USB_OC, 29},
+ {BOARD_GPP_WPS_BUTTON, 35},
+ {BOARD_GPP_MV_SWITCH, 38},
+ {BOARD_GPP_USB_VBUS, 49}
+ };
+
+MV_BOARD_MPP_INFO rd88f6281AInfoBoardMppConfigValue[] =
+ {{{
+ RD_88F6281A_MPP0_7,
+ RD_88F6281A_MPP8_15,
+ RD_88F6281A_MPP16_23,
+ RD_88F6281A_MPP24_31,
+ RD_88F6281A_MPP32_39,
+ RD_88F6281A_MPP40_47,
+ RD_88F6281A_MPP48_55
+ }}};
+
+MV_BOARD_INFO rd88f6281AInfo = {
+ "RD-88F6281A", /* boardName[MAX_BOARD_NAME_LEN] */
+ RD_88F6281A_BOARD_MPP_GROUP_TYPE_NUM, /* numBoardMppGroupType */
+ rd88f6281AInfoBoardMppTypeInfo,
+ RD_88F6281A_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ rd88f6281AInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ (1 << 3), /* intsGppMaskHigh */
+ RD_88F6281A_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ rd88f6281AInfoBoardDeCsInfo,
+ RD_88F6281A_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ rd88f6281AInfoBoardTwsiDev,
+ RD_88F6281A_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ rd88f6281AInfoBoardMacInfo,
+ RD_88F6281A_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ rd88f6281AInfoBoardGppInfo,
+ RD_88F6281A_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ NULL,
+ 0, /* ledsPolarity */
+ RD_88F6281A_OE_LOW, /* gppOutEnLow */
+ RD_88F6281A_OE_HIGH, /* gppOutEnHigh */
+ RD_88F6281A_OE_VAL_LOW, /* gppOutValLow */
+ RD_88F6281A_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ BIT6, /* gppPolarityValHigh */
+ rd88f6281AInfoBoardSwitchInfo /* pSwitchInfo */
+};
+
+
+#define DB_88F6192A_BOARD_PCI_IF_NUM 0x0
+#define DB_88F6192A_BOARD_TWSI_DEF_NUM 0x7
+#define DB_88F6192A_BOARD_MAC_INFO_NUM 0x2
+#define DB_88F6192A_BOARD_GPP_INFO_NUM 0x3
+#define DB_88F6192A_BOARD_MPP_GROUP_TYPE_NUM 0x1
+#define DB_88F6192A_BOARD_MPP_CONFIG_NUM 0x1
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ #define DB_88F6192A_BOARD_DEVICE_CONFIG_NUM 0x1
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ #define DB_88F6192A_BOARD_DEVICE_CONFIG_NUM 0x2
+#else
+ #define DB_88F6192A_BOARD_DEVICE_CONFIG_NUM 0x1
+#endif
+#define DB_88F6192A_BOARD_DEBUG_LED_NUM 0x0
+
+MV_BOARD_TWSI_INFO db88f6192AInfoBoardTwsiDev[] =
+ /* {{MV_BOARD_DEV_CLASS devClass, MV_U8 twsiDevAddr, MV_U8 twsiDevAddrType}} */
+ {
+ {BOARD_DEV_TWSI_EXP, 0x20, ADDR7_BIT},
+ {BOARD_DEV_TWSI_EXP, 0x21, ADDR7_BIT},
+ {BOARD_DEV_TWSI_EXP, 0x27, ADDR7_BIT},
+ {BOARD_DEV_TWSI_SATR, 0x4C, ADDR7_BIT},
+ {BOARD_DEV_TWSI_SATR, 0x4D, ADDR7_BIT},
+ {BOARD_DEV_TWSI_SATR, 0x4E, ADDR7_BIT},
+ {BOARD_TWSI_AUDIO_DEC, 0x4A, ADDR7_BIT}
+ };
+
+MV_BOARD_MAC_INFO db88f6192AInfoBoardMacInfo[] =
+ /* {{MV_BOARD_MAC_SPEED boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+ {
+ {BOARD_MAC_SPEED_AUTO, 0x8},
+ {BOARD_MAC_SPEED_AUTO, 0x9}
+ };
+
+MV_BOARD_MPP_TYPE_INFO db88f6192AInfoBoardMppTypeInfo[] =
+ /* {{MV_BOARD_MPP_TYPE_CLASS boardMppGroup1,
+ MV_BOARD_MPP_TYPE_CLASS boardMppGroup2}} */
+ {{MV_BOARD_AUTO, MV_BOARD_OTHER}
+ };
+
+MV_DEV_CS_INFO db88f6192AInfoBoardDeCsInfo[] =
+ /*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ {{0, N_A, BOARD_DEV_NAND_FLASH, 8}}; /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ {
+ {0, N_A, BOARD_DEV_NAND_FLASH, 8}, /* NAND DEV */
+ {1, N_A, BOARD_DEV_SPI_FLASH, 8}, /* SPI DEV */
+ };
+#else
+ {{1, N_A, BOARD_DEV_SPI_FLASH, 8}}; /* SPI DEV */
+#endif
+
+MV_BOARD_GPP_INFO db88f6192AInfoBoardGppInfo[] =
+ /* {{MV_BOARD_GPP_CLASS devClass, MV_U8 gppPinNum}} */
+ {
+ {BOARD_GPP_SDIO_WP, 20},
+ {BOARD_GPP_USB_VBUS, 22},
+ {BOARD_GPP_SDIO_DETECT, 23},
+ };
+
+MV_BOARD_MPP_INFO db88f6192AInfoBoardMppConfigValue[] =
+ {{{
+ DB_88F6192A_MPP0_7,
+ DB_88F6192A_MPP8_15,
+ DB_88F6192A_MPP16_23,
+ DB_88F6192A_MPP24_31,
+ DB_88F6192A_MPP32_35
+ }}};
+
+MV_BOARD_INFO db88f6192AInfo = {
+ "DB-88F6192A-BP", /* boardName[MAX_BOARD_NAME_LEN] */
+ DB_88F6192A_BOARD_MPP_GROUP_TYPE_NUM, /* numBoardMppGroupType */
+ db88f6192AInfoBoardMppTypeInfo,
+ DB_88F6192A_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ db88f6192AInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ (1 << 3), /* intsGppMaskHigh */
+ DB_88F6192A_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ db88f6192AInfoBoardDeCsInfo,
+ DB_88F6192A_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ db88f6192AInfoBoardTwsiDev,
+ DB_88F6192A_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ db88f6192AInfoBoardMacInfo,
+ DB_88F6192A_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ db88f6192AInfoBoardGppInfo,
+ DB_88F6192A_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ NULL,
+ 0, /* ledsPolarity */
+ DB_88F6192A_OE_LOW, /* gppOutEnLow */
+ DB_88F6192A_OE_HIGH, /* gppOutEnHigh */
+ DB_88F6192A_OE_VAL_LOW, /* gppOutValLow */
+ DB_88F6192A_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ 0, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+#define DB_88F6190A_BOARD_MAC_INFO_NUM 0x1
+
+MV_BOARD_INFO db88f6190AInfo = {
+ "DB-88F6190A-BP", /* boardName[MAX_BOARD_NAME_LEN] */
+ DB_88F6192A_BOARD_MPP_GROUP_TYPE_NUM, /* numBoardMppGroupType */
+ db88f6192AInfoBoardMppTypeInfo,
+ DB_88F6192A_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ db88f6192AInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ (1 << 3), /* intsGppMaskHigh */
+ DB_88F6192A_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ db88f6192AInfoBoardDeCsInfo,
+ DB_88F6192A_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ db88f6192AInfoBoardTwsiDev,
+ DB_88F6190A_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ db88f6192AInfoBoardMacInfo,
+ DB_88F6192A_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ db88f6192AInfoBoardGppInfo,
+ DB_88F6192A_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ NULL,
+ 0, /* ledsPolarity */
+ DB_88F6192A_OE_LOW, /* gppOutEnLow */
+ DB_88F6192A_OE_HIGH, /* gppOutEnHigh */
+ DB_88F6192A_OE_VAL_LOW, /* gppOutValLow */
+ DB_88F6192A_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ 0, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+#define RD_88F6192A_BOARD_PCI_IF_NUM 0x0
+#define RD_88F6192A_BOARD_TWSI_DEF_NUM 0x0
+#define RD_88F6192A_BOARD_MAC_INFO_NUM 0x1
+#define RD_88F6192A_BOARD_GPP_INFO_NUM 0xE
+#define RD_88F6192A_BOARD_MPP_GROUP_TYPE_NUM 0x1
+#define RD_88F6192A_BOARD_MPP_CONFIG_NUM 0x1
+#define RD_88F6192A_BOARD_DEVICE_CONFIG_NUM 0x1
+#define RD_88F6192A_BOARD_DEBUG_LED_NUM 0x3
+
+MV_U8 rd88f6192AInfoBoardDebugLedIf[] =
+ {17, 28, 29};
+
+MV_BOARD_MAC_INFO rd88f6192AInfoBoardMacInfo[] =
+ /* {{MV_BOARD_MAC_SPEED boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+ {{BOARD_MAC_SPEED_AUTO, 0x8}
+ };
+
+MV_BOARD_MPP_TYPE_INFO rd88f6192AInfoBoardMppTypeInfo[] =
+ /* {{MV_BOARD_MPP_TYPE_CLASS boardMppGroup1,
+ MV_BOARD_MPP_TYPE_CLASS boardMppGroup2}} */
+ {{MV_BOARD_OTHER, MV_BOARD_OTHER}
+ };
+
+MV_DEV_CS_INFO rd88f6192AInfoBoardDeCsInfo[] =
+ /*{deviceCS, params, devType, devWidth}*/
+ {{1, N_A, BOARD_DEV_SPI_FLASH, 8}}; /* SPI DEV */
+
+MV_BOARD_GPP_INFO rd88f6192AInfoBoardGppInfo[] =
+ /* {{MV_BOARD_GPP_CLASS devClass, MV_U8 gppPinNum}} */
+ {
+ {BOARD_GPP_USB_VBUS_EN, 10},
+ {BOARD_GPP_USB_HOST_DEVICE, 11},
+ {BOARD_GPP_RESET, 14},
+ {BOARD_GPP_POWER_ON_LED, 15},
+ {BOARD_GPP_HDD_POWER, 16},
+ {BOARD_GPP_WPS_BUTTON, 24},
+ {BOARD_GPP_TS_BUTTON_C, 25},
+ {BOARD_GPP_USB_VBUS, 26},
+ {BOARD_GPP_USB_OC, 27},
+ {BOARD_GPP_TS_BUTTON_U, 30},
+ {BOARD_GPP_TS_BUTTON_R, 31},
+ {BOARD_GPP_TS_BUTTON_L, 32},
+ {BOARD_GPP_TS_BUTTON_D, 34},
+ {BOARD_GPP_FAN_POWER, 35}
+ };
+
+MV_BOARD_MPP_INFO rd88f6192AInfoBoardMppConfigValue[] =
+ {{{
+ RD_88F6192A_MPP0_7,
+ RD_88F6192A_MPP8_15,
+ RD_88F6192A_MPP16_23,
+ RD_88F6192A_MPP24_31,
+ RD_88F6192A_MPP32_35
+ }}};
+
+MV_BOARD_INFO rd88f6192AInfo = {
+ "RD-88F6192A-NAS", /* boardName[MAX_BOARD_NAME_LEN] */
+ RD_88F6192A_BOARD_MPP_GROUP_TYPE_NUM, /* numBoardMppGroupType */
+ rd88f6192AInfoBoardMppTypeInfo,
+ RD_88F6192A_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ rd88f6192AInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ (1 << 3), /* intsGppMaskHigh */
+ RD_88F6192A_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ rd88f6192AInfoBoardDeCsInfo,
+ RD_88F6192A_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ NULL,
+ RD_88F6192A_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ rd88f6192AInfoBoardMacInfo,
+ RD_88F6192A_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ rd88f6192AInfoBoardGppInfo,
+ RD_88F6192A_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ rd88f6192AInfoBoardDebugLedIf,
+ 0, /* ledsPolarity */
+ RD_88F6192A_OE_LOW, /* gppOutEnLow */
+ RD_88F6192A_OE_HIGH, /* gppOutEnHigh */
+ RD_88F6192A_OE_VAL_LOW, /* gppOutValLow */
+ RD_88F6192A_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ 0, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+MV_BOARD_INFO rd88f6190AInfo = {
+ "RD-88F6190A-NAS", /* boardName[MAX_BOARD_NAME_LEN] */
+ RD_88F6192A_BOARD_MPP_GROUP_TYPE_NUM, /* numBoardMppGroupType */
+ rd88f6192AInfoBoardMppTypeInfo,
+ RD_88F6192A_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ rd88f6192AInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ (1 << 3), /* intsGppMaskHigh */
+ RD_88F6192A_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ rd88f6192AInfoBoardDeCsInfo,
+ RD_88F6192A_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ NULL,
+ RD_88F6192A_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ rd88f6192AInfoBoardMacInfo,
+ RD_88F6192A_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ rd88f6192AInfoBoardGppInfo,
+ RD_88F6192A_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ rd88f6192AInfoBoardDebugLedIf,
+ 0, /* ledsPolarity */
+ RD_88F6192A_OE_LOW, /* gppOutEnLow */
+ RD_88F6192A_OE_HIGH, /* gppOutEnHigh */
+ RD_88F6192A_OE_VAL_LOW, /* gppOutValLow */
+ RD_88F6192A_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ 0, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+#define DB_88F6180A_BOARD_PCI_IF_NUM 0x0
+#define DB_88F6180A_BOARD_TWSI_DEF_NUM 0x5
+#define DB_88F6180A_BOARD_MAC_INFO_NUM 0x1
+#define DB_88F6180A_BOARD_GPP_INFO_NUM 0x0
+#define DB_88F6180A_BOARD_MPP_GROUP_TYPE_NUM 0x2
+#define DB_88F6180A_BOARD_MPP_CONFIG_NUM 0x1
+#define DB_88F6180A_BOARD_DEVICE_CONFIG_NUM 0x1
+#define DB_88F6180A_BOARD_DEBUG_LED_NUM 0x0
+
+MV_BOARD_TWSI_INFO db88f6180AInfoBoardTwsiDev[] =
+ /* {{MV_BOARD_DEV_CLASS devClass, MV_U8 twsiDevAddr, MV_U8 twsiDevAddrType}} */
+ {
+ {BOARD_DEV_TWSI_EXP, 0x20, ADDR7_BIT},
+ {BOARD_DEV_TWSI_EXP, 0x21, ADDR7_BIT},
+ {BOARD_DEV_TWSI_EXP, 0x27, ADDR7_BIT},
+ {BOARD_DEV_TWSI_SATR, 0x4C, ADDR7_BIT},
+ {BOARD_TWSI_AUDIO_DEC, 0x4A, ADDR7_BIT}
+ };
+
+MV_BOARD_MAC_INFO db88f6180AInfoBoardMacInfo[] =
+ /* {{MV_BOARD_MAC_SPEED boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+ {{BOARD_MAC_SPEED_AUTO, 0x8}
+ };
+
+MV_BOARD_GPP_INFO db88f6180AInfoBoardGppInfo[] =
+ /* {{MV_BOARD_GPP_CLASS devClass, MV_U8 gppPinNum}} */
+ {
+ /* Muxed with TDM/Audio module via IOexpender
+ {BOARD_GPP_USB_VBUS, 6} */
+ };
+
+MV_BOARD_MPP_TYPE_INFO db88f6180AInfoBoardMppTypeInfo[] =
+ /* {{MV_BOARD_MPP_TYPE_CLASS boardMppGroup1,
+ MV_BOARD_MPP_TYPE_CLASS boardMppGroup2}} */
+ {{MV_BOARD_OTHER, MV_BOARD_AUTO}
+ };
+
+MV_DEV_CS_INFO db88f6180AInfoBoardDeCsInfo[] =
+ /*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND_BOOT)
+ {{0, N_A, BOARD_DEV_NAND_FLASH, 8}}; /* NAND DEV */
+#else
+ {{1, N_A, BOARD_DEV_SPI_FLASH, 8}}; /* SPI DEV */
+#endif
+
+MV_BOARD_MPP_INFO db88f6180AInfoBoardMppConfigValue[] =
+ {{{
+ DB_88F6180A_MPP0_7,
+ DB_88F6180A_MPP8_15,
+ DB_88F6180A_MPP16_23,
+ DB_88F6180A_MPP24_31,
+ DB_88F6180A_MPP32_39,
+ DB_88F6180A_MPP40_44
+ }}};
+
+MV_BOARD_INFO db88f6180AInfo = {
+ "DB-88F6180A-BP", /* boardName[MAX_BOARD_NAME_LEN] */
+ DB_88F6180A_BOARD_MPP_GROUP_TYPE_NUM, /* numBoardMppGroupType */
+ db88f6180AInfoBoardMppTypeInfo,
+ DB_88F6180A_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ db88f6180AInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ 0, /* intsGppMaskHigh */
+ DB_88F6180A_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ db88f6180AInfoBoardDeCsInfo,
+ DB_88F6180A_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ db88f6180AInfoBoardTwsiDev,
+ DB_88F6180A_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ db88f6180AInfoBoardMacInfo,
+ DB_88F6180A_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ NULL,
+ DB_88F6180A_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ NULL,
+ 0, /* ledsPolarity */
+ DB_88F6180A_OE_LOW, /* gppOutEnLow */
+ DB_88F6180A_OE_HIGH, /* gppOutEnHigh */
+ DB_88F6180A_OE_VAL_LOW, /* gppOutValLow */
+ DB_88F6180A_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ 0, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+
+#define RD_88F6281A_PCAC_BOARD_PCI_IF_NUM 0x0
+#define RD_88F6281A_PCAC_BOARD_TWSI_DEF_NUM 0x1
+#define RD_88F6281A_PCAC_BOARD_MAC_INFO_NUM 0x1
+#define RD_88F6281A_PCAC_BOARD_GPP_INFO_NUM 0x0
+#define RD_88F6281A_PCAC_BOARD_MPP_GROUP_TYPE_NUM 0x1
+#define RD_88F6281A_PCAC_BOARD_MPP_CONFIG_NUM 0x1
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ #define RD_88F6281A_PCAC_BOARD_DEVICE_CONFIG_NUM 0x1
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ #define RD_88F6281A_PCAC_BOARD_DEVICE_CONFIG_NUM 0x2
+#else
+ #define RD_88F6281A_PCAC_BOARD_DEVICE_CONFIG_NUM 0x1
+#endif
+#define RD_88F6281A_PCAC_BOARD_DEBUG_LED_NUM 0x4
+
+MV_U8 rd88f6281APcacInfoBoardDebugLedIf[] =
+ {38, 39, 40, 41};
+
+MV_BOARD_MAC_INFO rd88f6281APcacInfoBoardMacInfo[] =
+ /* {{MV_BOARD_MAC_SPEED boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+ {{BOARD_MAC_SPEED_AUTO, 0x8}
+ };
+
+MV_BOARD_TWSI_INFO rd88f6281APcacInfoBoardTwsiDev[] =
+ /* {{MV_BOARD_DEV_CLASS devClass, MV_U8 twsiDevAddr, MV_U8 twsiDevAddrType}} */
+ {
+ {BOARD_TWSI_OTHER, 0xa7, ADDR7_BIT}
+ };
+
+MV_BOARD_MPP_TYPE_INFO rd88f6281APcacInfoBoardMppTypeInfo[] =
+ {{MV_BOARD_OTHER, MV_BOARD_OTHER}
+ };
+
+MV_DEV_CS_INFO rd88f6281APcacInfoBoardDeCsInfo[] =
+ /*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ {{0, N_A, BOARD_DEV_NAND_FLASH, 8}}; /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ {
+ {0, N_A, BOARD_DEV_NAND_FLASH, 8}, /* NAND DEV */
+ {1, N_A, BOARD_DEV_SPI_FLASH, 8}, /* SPI DEV */
+ };
+#else
+ {{1, N_A, BOARD_DEV_SPI_FLASH, 8}}; /* SPI DEV */
+#endif
+
+MV_BOARD_MPP_INFO rd88f6281APcacInfoBoardMppConfigValue[] =
+ {{{
+ RD_88F6281A_PCAC_MPP0_7,
+ RD_88F6281A_PCAC_MPP8_15,
+ RD_88F6281A_PCAC_MPP16_23,
+ RD_88F6281A_PCAC_MPP24_31,
+ RD_88F6281A_PCAC_MPP32_39,
+ RD_88F6281A_PCAC_MPP40_47,
+ RD_88F6281A_PCAC_MPP48_55
+ }}};
+
+MV_BOARD_INFO rd88f6281APcacInfo = {
+ "RD-88F6281A-PCAC", /* boardName[MAX_BOARD_NAME_LEN] */
+ RD_88F6281A_PCAC_BOARD_MPP_GROUP_TYPE_NUM, /* numBoardMppGroupType */
+ rd88f6281APcacInfoBoardMppTypeInfo,
+ RD_88F6281A_PCAC_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ rd88f6281APcacInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ (1 << 3), /* intsGppMaskHigh */
+ RD_88F6281A_PCAC_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ rd88f6281APcacInfoBoardDeCsInfo,
+ RD_88F6281A_PCAC_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ rd88f6281APcacInfoBoardTwsiDev,
+ RD_88F6281A_PCAC_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ rd88f6281APcacInfoBoardMacInfo,
+ RD_88F6281A_PCAC_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ 0,
+ RD_88F6281A_PCAC_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ NULL,
+ 0, /* ledsPolarity */
+ RD_88F6281A_PCAC_OE_LOW, /* gppOutEnLow */
+ RD_88F6281A_PCAC_OE_HIGH, /* gppOutEnHigh */
+ RD_88F6281A_PCAC_OE_VAL_LOW, /* gppOutValLow */
+ RD_88F6281A_PCAC_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ 0, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+
+/* 6281 Sheeva Plug*/
+
+#define SHEEVA_PLUG_BOARD_PCI_IF_NUM 0x0
+#define SHEEVA_PLUG_BOARD_TWSI_DEF_NUM 0x0
+#define SHEEVA_PLUG_BOARD_MAC_INFO_NUM 0x1
+#define SHEEVA_PLUG_BOARD_GPP_INFO_NUM 0x0
+#define SHEEVA_PLUG_BOARD_MPP_GROUP_TYPE_NUN 0x1
+#define SHEEVA_PLUG_BOARD_MPP_CONFIG_NUM 0x1
+#define SHEEVA_PLUG_BOARD_DEVICE_CONFIG_NUM 0x1
+#define SHEEVA_PLUG_BOARD_DEBUG_LED_NUM 0x1
+
+MV_U8 sheevaPlugInfoBoardDebugLedIf[] =
+ {49};
+
+MV_BOARD_MAC_INFO sheevaPlugInfoBoardMacInfo[] =
+ /* {{MV_BOARD_MAC_SPEED boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+ {{BOARD_MAC_SPEED_AUTO, 0x0}};
+
+MV_BOARD_TWSI_INFO sheevaPlugInfoBoardTwsiDev[] =
+ /* {{MV_BOARD_DEV_CLASS devClass, MV_U8 twsiDevAddr, MV_U8 twsiDevAddrType}} */
+ {{BOARD_TWSI_OTHER, 0x0, ADDR7_BIT}};
+
+MV_BOARD_MPP_TYPE_INFO sheevaPlugInfoBoardMppTypeInfo[] =
+ {{MV_BOARD_OTHER, MV_BOARD_OTHER}
+ };
+
+MV_DEV_CS_INFO sheevaPlugInfoBoardDeCsInfo[] =
+ /*{deviceCS, params, devType, devWidth}*/
+ {{0, N_A, BOARD_DEV_NAND_FLASH, 8}}; /* NAND DEV */
+
+MV_BOARD_MPP_INFO sheevaPlugInfoBoardMppConfigValue[] =
+ {{{
+ RD_SHEEVA_PLUG_MPP0_7,
+ RD_SHEEVA_PLUG_MPP8_15,
+ RD_SHEEVA_PLUG_MPP16_23,
+ RD_SHEEVA_PLUG_MPP24_31,
+ RD_SHEEVA_PLUG_MPP32_39,
+ RD_SHEEVA_PLUG_MPP40_47,
+ RD_SHEEVA_PLUG_MPP48_55
+ }}};
+
+MV_BOARD_INFO sheevaPlugInfo = {
+ "SHEEVA PLUG", /* boardName[MAX_BOARD_NAME_LEN] */
+ SHEEVA_PLUG_BOARD_MPP_GROUP_TYPE_NUN, /* numBoardMppGroupType */
+ sheevaPlugInfoBoardMppTypeInfo,
+ SHEEVA_PLUG_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ sheevaPlugInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ 0, /* intsGppMaskHigh */
+ SHEEVA_PLUG_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ sheevaPlugInfoBoardDeCsInfo,
+ SHEEVA_PLUG_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ sheevaPlugInfoBoardTwsiDev,
+ SHEEVA_PLUG_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ sheevaPlugInfoBoardMacInfo,
+ SHEEVA_PLUG_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ 0,
+ SHEEVA_PLUG_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ sheevaPlugInfoBoardDebugLedIf,
+ 0, /* ledsPolarity */
+ RD_SHEEVA_PLUG_OE_LOW, /* gppOutEnLow */
+ RD_SHEEVA_PLUG_OE_HIGH, /* gppOutEnHigh */
+ RD_SHEEVA_PLUG_OE_VAL_LOW, /* gppOutValLow */
+ RD_SHEEVA_PLUG_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ 0, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+/* Customer specific board place holder*/
+
+#define DB_CUSTOMER_BOARD_PCI_IF_NUM 0x0
+#define DB_CUSTOMER_BOARD_TWSI_DEF_NUM 0x0
+#define DB_CUSTOMER_BOARD_MAC_INFO_NUM 0x0
+#define DB_CUSTOMER_BOARD_GPP_INFO_NUM 0x0
+#define DB_CUSTOMER_BOARD_MPP_GROUP_TYPE_NUN 0x0
+#define DB_CUSTOMER_BOARD_MPP_CONFIG_NUM 0x0
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ #define DB_CUSTOMER_BOARD_DEVICE_CONFIG_NUM 0x0
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ #define DB_CUSTOMER_BOARD_DEVICE_CONFIG_NUM 0x0
+#else
+ #define DB_CUSTOMER_BOARD_DEVICE_CONFIG_NUM 0x0
+#endif
+#define DB_CUSTOMER_BOARD_DEBUG_LED_NUM 0x0
+
+MV_U8 dbCustomerInfoBoardDebugLedIf[] =
+ {0};
+
+MV_BOARD_MAC_INFO dbCustomerInfoBoardMacInfo[] =
+ /* {{MV_BOARD_MAC_SPEED boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+ {{BOARD_MAC_SPEED_AUTO, 0x0}};
+
+MV_BOARD_TWSI_INFO dbCustomerInfoBoardTwsiDev[] =
+ /* {{MV_BOARD_DEV_CLASS devClass, MV_U8 twsiDevAddr, MV_U8 twsiDevAddrType}} */
+ {{BOARD_TWSI_OTHER, 0x0, ADDR7_BIT}};
+
+MV_BOARD_MPP_TYPE_INFO dbCustomerInfoBoardMppTypeInfo[] =
+ {{MV_BOARD_OTHER, MV_BOARD_OTHER}
+ };
+
+MV_DEV_CS_INFO dbCustomerInfoBoardDeCsInfo[] =
+ /*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+ {{0, N_A, BOARD_DEV_NAND_FLASH, 8}}; /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+ {
+ {0, N_A, BOARD_DEV_NAND_FLASH, 8}, /* NAND DEV */
+ {2, N_A, BOARD_DEV_SPI_FLASH, 8}, /* SPI DEV */
+ };
+#else
+ {{2, N_A, BOARD_DEV_SPI_FLASH, 8}}; /* SPI DEV */
+#endif
+
+MV_BOARD_MPP_INFO dbCustomerInfoBoardMppConfigValue[] =
+ {{{
+ DB_CUSTOMER_MPP0_7,
+ DB_CUSTOMER_MPP8_15,
+ DB_CUSTOMER_MPP16_23,
+ DB_CUSTOMER_MPP24_31,
+ DB_CUSTOMER_MPP32_39,
+ DB_CUSTOMER_MPP40_47,
+ DB_CUSTOMER_MPP48_55
+ }}};
+
+MV_BOARD_INFO dbCustomerInfo = {
+ "DB-CUSTOMER", /* boardName[MAX_BOARD_NAME_LEN] */
+ DB_CUSTOMER_BOARD_MPP_GROUP_TYPE_NUN, /* numBoardMppGroupType */
+ dbCustomerInfoBoardMppTypeInfo,
+ DB_CUSTOMER_BOARD_MPP_CONFIG_NUM, /* numBoardMppConfig */
+ dbCustomerInfoBoardMppConfigValue,
+ 0, /* intsGppMaskLow */
+ 0, /* intsGppMaskHigh */
+ DB_CUSTOMER_BOARD_DEVICE_CONFIG_NUM, /* numBoardDevIf */
+ dbCustomerInfoBoardDeCsInfo,
+ DB_CUSTOMER_BOARD_TWSI_DEF_NUM, /* numBoardTwsiDev */
+ dbCustomerInfoBoardTwsiDev,
+ DB_CUSTOMER_BOARD_MAC_INFO_NUM, /* numBoardMacInfo */
+ dbCustomerInfoBoardMacInfo,
+ DB_CUSTOMER_BOARD_GPP_INFO_NUM, /* numBoardGppInfo */
+ 0,
+ DB_CUSTOMER_BOARD_DEBUG_LED_NUM, /* activeLedsNumber */
+ NULL,
+ 0, /* ledsPolarity */
+ DB_CUSTOMER_OE_LOW, /* gppOutEnLow */
+ DB_CUSTOMER_OE_HIGH, /* gppOutEnHigh */
+ DB_CUSTOMER_OE_VAL_LOW, /* gppOutValLow */
+ DB_CUSTOMER_OE_VAL_HIGH, /* gppOutValHigh */
+ 0, /* gppPolarityValLow */
+ 0, /* gppPolarityValHigh */
+ NULL /* pSwitchInfo */
+};
+
+MV_BOARD_INFO* boardInfoTbl[] = {
+ &db88f6281AInfo,
+ &rd88f6281AInfo,
+ &db88f6192AInfo,
+ &rd88f6192AInfo,
+ &db88f6180AInfo,
+ &db88f6190AInfo,
+ &rd88f6190AInfo,
+ &rd88f6281APcacInfo,
+ &dbCustomerInfo,
+ &sheevaPlugInfo
+ };
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.h
new file mode 100644
index 000000000..0372eee5d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.h
@@ -0,0 +1,262 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvBoardEnvSpech
+#define __INCmvBoardEnvSpech
+
+#include "mvSysHwConfig.h"
+
+
+/* For future use */
+#define BD_ID_DATA_START_OFFS 0x0
+#define BD_DETECT_SEQ_OFFS 0x0
+#define BD_SYS_NUM_OFFS 0x4
+#define BD_NAME_OFFS 0x8
+
+/* I2C bus addresses */
+#define MV_BOARD_CTRL_I2C_ADDR 0x0 /* Controller slave addr */
+#define MV_BOARD_CTRL_I2C_ADDR_TYPE ADDR7_BIT
+#define MV_BOARD_DIMM0_I2C_ADDR 0x56
+#define MV_BOARD_DIMM0_I2C_ADDR_TYPE ADDR7_BIT
+#define MV_BOARD_DIMM1_I2C_ADDR 0x54
+#define MV_BOARD_DIMM1_I2C_ADDR_TYPE ADDR7_BIT
+#define MV_BOARD_EEPROM_I2C_ADDR 0x51
+#define MV_BOARD_EEPROM_I2C_ADDR_TYPE ADDR7_BIT
+#define MV_BOARD_MAIN_EEPROM_I2C_ADDR 0x50
+#define MV_BOARD_MAIN_EEPROM_I2C_ADDR_TYPE ADDR7_BIT
+#define MV_BOARD_MUX_I2C_ADDR_ENTRY 0x2
+#define MV_BOARD_DIMM_I2C_CHANNEL 0x0
+
+#define BOOT_FLASH_INDEX 0
+#define MAIN_FLASH_INDEX 1
+
+#define BOARD_ETH_START_PORT_NUM 0
+
+/* Supported clocks */
+#define MV_BOARD_TCLK_100MHZ 100000000
+#define MV_BOARD_TCLK_125MHZ 125000000
+#define MV_BOARD_TCLK_133MHZ 133333333
+#define MV_BOARD_TCLK_150MHZ 150000000
+#define MV_BOARD_TCLK_166MHZ 166666667
+#define MV_BOARD_TCLK_200MHZ 200000000
+
+#define MV_BOARD_SYSCLK_100MHZ 100000000
+#define MV_BOARD_SYSCLK_125MHZ 125000000
+#define MV_BOARD_SYSCLK_133MHZ 133333333
+#define MV_BOARD_SYSCLK_150MHZ 150000000
+#define MV_BOARD_SYSCLK_166MHZ 166666667
+#define MV_BOARD_SYSCLK_200MHZ 200000000
+#define MV_BOARD_SYSCLK_233MHZ 233333333
+#define MV_BOARD_SYSCLK_250MHZ 250000000
+#define MV_BOARD_SYSCLK_267MHZ 266666667
+#define MV_BOARD_SYSCLK_300MHZ 300000000
+#define MV_BOARD_SYSCLK_333MHZ 333333334
+#define MV_BOARD_SYSCLK_400MHZ 400000000
+
+#define MV_BOARD_REFCLK_25MHZ 25000000
+
+/* Board specific */
+/* =============================== */
+
+/* boards ID numbers */
+
+#define BOARD_ID_BASE 0x0
+
+/* New board ID numbers */
+#define DB_88F6281A_BP_ID (BOARD_ID_BASE)
+#define DB_88F6281_BP_MLL_ID 1680
+#define RD_88F6281A_ID (BOARD_ID_BASE+0x1)
+#define RD_88F6281_MLL_ID 1682
+#define DB_88F6192A_BP_ID (BOARD_ID_BASE+0x2)
+#define RD_88F6192A_ID (BOARD_ID_BASE+0x3)
+#define RD_88F6192_MLL_ID 1681
+#define DB_88F6180A_BP_ID (BOARD_ID_BASE+0x4)
+#define DB_88F6190A_BP_ID (BOARD_ID_BASE+0x5)
+#define RD_88F6190A_ID (BOARD_ID_BASE+0x6)
+#define RD_88F6281A_PCAC_ID (BOARD_ID_BASE+0x7)
+#define DB_CUSTOMER_ID (BOARD_ID_BASE+0x8)
+#define SHEEVA_PLUG_ID (BOARD_ID_BASE+0x9)
+#define MV_MAX_BOARD_ID (SHEEVA_PLUG_ID + 1)
+
+/* DB-88F6281A-BP */
+#if defined(MV_NAND)
+ #define DB_88F6281A_MPP0_7 0x21111111
+#else
+ #define DB_88F6281A_MPP0_7 0x21112220
+#endif
+#define DB_88F6281A_MPP8_15 0x11113311
+#define DB_88F6281A_MPP16_23 0x00551111
+#define DB_88F6281A_MPP24_31 0x00000000
+#define DB_88F6281A_MPP32_39 0x00000000
+#define DB_88F6281A_MPP40_47 0x00000000
+#define DB_88F6281A_MPP48_55 0x00000000
+#define DB_88F6281A_OE_LOW 0x0
+#if defined(MV_TDM_5CHANNELS)
+ #define DB_88F6281A_OE_HIGH (BIT6)
+#else
+#define DB_88F6281A_OE_HIGH 0x0
+#endif
+#define DB_88F6281A_OE_VAL_LOW 0x0
+#define DB_88F6281A_OE_VAL_HIGH 0x0
+
+/* RD-88F6281A */
+#if defined(MV_NAND)
+ #define RD_88F6281A_MPP0_7 0x21111111
+#else
+ #define RD_88F6281A_MPP0_7 0x21112220
+#endif
+#define RD_88F6281A_MPP8_15 0x11113311
+#define RD_88F6281A_MPP16_23 0x33331111
+#define RD_88F6281A_MPP24_31 0x33003333
+#define RD_88F6281A_MPP32_39 0x20440533
+#define RD_88F6281A_MPP40_47 0x22202222
+#define RD_88F6281A_MPP48_55 0x00000002
+#define RD_88F6281A_OE_LOW (BIT28 | BIT29)
+#define RD_88F6281A_OE_HIGH (BIT3 | BIT6 | BIT17)
+#define RD_88F6281A_OE_VAL_LOW 0x0
+#define RD_88F6281A_OE_VAL_HIGH 0x0
+
+/* DB-88F6192A-BP */
+#if defined(MV_NAND)
+ #define DB_88F6192A_MPP0_7 0x21111111
+#else
+ #define DB_88F6192A_MPP0_7 0x21112220
+#endif
+#define DB_88F6192A_MPP8_15 0x11113311
+#define DB_88F6192A_MPP16_23 0x00501111
+#define DB_88F6192A_MPP24_31 0x00000000
+#define DB_88F6192A_MPP32_35 0x00000000
+#define DB_88F6192A_OE_LOW (BIT22 | BIT23)
+#define DB_88F6192A_OE_HIGH 0x0
+#define DB_88F6192A_OE_VAL_LOW 0x0
+#define DB_88F6192A_OE_VAL_HIGH 0x0
+
+/* RD-88F6192A */
+#define RD_88F6192A_MPP0_7 0x01222222
+#define RD_88F6192A_MPP8_15 0x00000011
+#define RD_88F6192A_MPP16_23 0x05550000
+#define RD_88F6192A_MPP24_31 0x0
+#define RD_88F6192A_MPP32_35 0x0
+#define RD_88F6192A_OE_LOW (BIT11 | BIT14 | BIT24 | BIT25 | BIT26 | BIT27 | BIT30 | BIT31)
+#define RD_88F6192A_OE_HIGH (BIT0 | BIT2)
+#define RD_88F6192A_OE_VAL_LOW 0x18400
+#define RD_88F6192A_OE_VAL_HIGH 0x8
+
+/* DB-88F6180A-BP */
+#if defined(MV_NAND)
+ #define DB_88F6180A_MPP0_7 0x21111111
+#else
+ #define DB_88F6180A_MPP0_7 0x01112222
+#endif
+#define DB_88F6180A_MPP8_15 0x11113311
+#define DB_88F6180A_MPP16_23 0x00001111
+#define DB_88F6180A_MPP24_31 0x0
+#define DB_88F6180A_MPP32_39 0x4444c000
+#define DB_88F6180A_MPP40_44 0x00044444
+#define DB_88F6180A_OE_LOW 0x0
+#define DB_88F6180A_OE_HIGH 0x0
+#define DB_88F6180A_OE_VAL_LOW 0x0
+#define DB_88F6180A_OE_VAL_HIGH 0x0
+
+/* RD-88F6281A_PCAC */
+#define RD_88F6281A_PCAC_MPP0_7 0x21111111
+#define RD_88F6281A_PCAC_MPP8_15 0x00003311
+#define RD_88F6281A_PCAC_MPP16_23 0x00001100
+#define RD_88F6281A_PCAC_MPP24_31 0x00000000
+#define RD_88F6281A_PCAC_MPP32_39 0x00000000
+#define RD_88F6281A_PCAC_MPP40_47 0x00000000
+#define RD_88F6281A_PCAC_MPP48_55 0x00000000
+#define RD_88F6281A_PCAC_OE_LOW 0x0
+#define RD_88F6281A_PCAC_OE_HIGH 0x0
+#define RD_88F6281A_PCAC_OE_VAL_LOW 0x0
+#define RD_88F6281A_PCAC_OE_VAL_HIGH 0x0
+
+/* SHEEVA PLUG */
+#define RD_SHEEVA_PLUG_MPP0_7 0x01111111
+#define RD_SHEEVA_PLUG_MPP8_15 0x11113322
+#define RD_SHEEVA_PLUG_MPP16_23 0x00001111
+#define RD_SHEEVA_PLUG_MPP24_31 0x00100000
+#define RD_SHEEVA_PLUG_MPP32_39 0x00000000
+#define RD_SHEEVA_PLUG_MPP40_47 0x00000000
+#define RD_SHEEVA_PLUG_MPP48_55 0x00000000
+#define RD_SHEEVA_PLUG_OE_LOW 0x0
+#define RD_SHEEVA_PLUG_OE_HIGH 0x0
+#define RD_SHEEVA_PLUG_OE_VAL_LOW (BIT29)
+#define RD_SHEEVA_PLUG_OE_VAL_HIGH ((~(BIT17 | BIT16 | BIT15)) | BIT14)
+
+/* DB-CUSTOMER */
+#define DB_CUSTOMER_MPP0_7 0x21111111
+#define DB_CUSTOMER_MPP8_15 0x00003311
+#define DB_CUSTOMER_MPP16_23 0x00001100
+#define DB_CUSTOMER_MPP24_31 0x00000000
+#define DB_CUSTOMER_MPP32_39 0x00000000
+#define DB_CUSTOMER_MPP40_47 0x00000000
+#define DB_CUSTOMER_MPP48_55 0x00000000
+#define DB_CUSTOMER_OE_LOW 0x0
+#define DB_CUSTOMER_OE_HIGH (~((BIT6) | (BIT7) | (BIT8) | (BIT9)))
+#define DB_CUSTOMER_OE_VAL_LOW 0x0
+#define DB_CUSTOMER_OE_VAL_HIGH 0x0
+
+#endif /* __INCmvBoardEnvSpech */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c
new file mode 100644
index 000000000..fed0fa114
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c
@@ -0,0 +1,320 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "cpu/mvCpu.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvRegs.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+/* locals */
+
+/*******************************************************************************
+* mvCpuPclkGet - Get the CPU pClk (pipe clock)
+*
+* DESCRIPTION:
+* This routine extract the CPU core clock.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit clock cycles in MHertz.
+*
+*******************************************************************************/
+/* 6180 have different clk reset sampling */
+
+static MV_U32 mvCpu6180PclkGet(MV_VOID)
+{
+ MV_U32 tmpPClkRate=0;
+ MV_CPU_ARM_CLK cpu6180_ddr_l2_CLK[] = MV_CPU6180_DDR_L2_CLCK_TBL;
+
+ tmpPClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ tmpPClkRate = tmpPClkRate & MSAR_CPUCLCK_MASK_6180;
+ tmpPClkRate = tmpPClkRate >> MSAR_CPUCLCK_OFFS_6180;
+
+ tmpPClkRate = cpu6180_ddr_l2_CLK[tmpPClkRate].cpuClk;
+
+ return tmpPClkRate;
+}
+
+
+MV_U32 mvCpuPclkGet(MV_VOID)
+{
+#if defined(PCLCK_AUTO_DETECT)
+ MV_U32 tmpPClkRate=0;
+ MV_U32 cpuCLK[] = MV_CPU_CLCK_TBL;
+
+ if(mvCtrlModelGet() == MV_6180_DEV_ID)
+ return mvCpu6180PclkGet();
+
+ tmpPClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ tmpPClkRate = MSAR_CPUCLCK_EXTRACT(tmpPClkRate);
+ tmpPClkRate = cpuCLK[tmpPClkRate];
+
+ return tmpPClkRate;
+#else
+ return MV_DEFAULT_PCLK
+#endif
+}
+
+/*******************************************************************************
+* mvCpuL2ClkGet - Get the CPU L2 (CPU bus clock)
+*
+* DESCRIPTION:
+* This routine extract the CPU L2 clock.
+*
+* RETURN:
+* 32bit clock cycles in Hertz.
+*
+*******************************************************************************/
+static MV_U32 mvCpu6180L2ClkGet(MV_VOID)
+{
+ MV_U32 L2ClkRate=0;
+ MV_CPU_ARM_CLK _cpu6180_ddr_l2_CLK[] = MV_CPU6180_DDR_L2_CLCK_TBL;
+
+ L2ClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ L2ClkRate = L2ClkRate & MSAR_CPUCLCK_MASK_6180;
+ L2ClkRate = L2ClkRate >> MSAR_CPUCLCK_OFFS_6180;
+
+ L2ClkRate = _cpu6180_ddr_l2_CLK[L2ClkRate].l2Clk;
+
+ return L2ClkRate;
+
+}
+
+MV_U32 mvCpuL2ClkGet(MV_VOID)
+{
+#ifdef L2CLK_AUTO_DETECT
+ MV_U32 L2ClkRate, tmp, pClkRate, indexL2Rtio;
+ MV_U32 L2Rtio[][2] = MV_L2_CLCK_RTIO_TBL;
+
+ if(mvCtrlModelGet() == MV_6180_DEV_ID)
+ return mvCpu6180L2ClkGet();
+
+ pClkRate = mvCpuPclkGet();
+
+ tmp = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ indexL2Rtio = MSAR_L2CLCK_EXTRACT(tmp);
+
+ L2ClkRate = ((pClkRate * L2Rtio[indexL2Rtio][1]) / L2Rtio[indexL2Rtio][0]);
+
+ return L2ClkRate;
+#else
+ return MV_BOARD_DEFAULT_L2CLK;
+#endif
+}
+
+
+/*******************************************************************************
+* mvCpuNameGet - Get CPU name
+*
+* DESCRIPTION:
+* This function returns a string describing the CPU model and revision.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* pNameBuff - Buffer to contain board name string. Minimum size 32 chars.
+*
+* RETURN:
+* None.
+*******************************************************************************/
+MV_VOID mvCpuNameGet(char *pNameBuff)
+{
+ MV_U32 cpuModel;
+
+ cpuModel = mvOsCpuPartGet();
+
+ /* The CPU module is indicated in the Processor Version Register (PVR) */
+ switch(cpuModel)
+ {
+ case CPU_PART_MRVL131:
+ mvOsSPrintf(pNameBuff, "%s (Rev %d)", "Marvell Feroceon",mvOsCpuRevGet());
+ break;
+ case CPU_PART_ARM926:
+ mvOsSPrintf(pNameBuff, "%s (Rev %d)", "ARM926",mvOsCpuRevGet());
+ break;
+ case CPU_PART_ARM946:
+ mvOsSPrintf(pNameBuff, "%s (Rev %d)", "ARM946",mvOsCpuRevGet());
+ break;
+ default:
+ mvOsSPrintf(pNameBuff,"??? (0x%04x) (Rev %d)",cpuModel,mvOsCpuRevGet());
+ break;
+ } /* switch */
+
+ return;
+}
+
+
+#define MV_PROC_STR_SIZE 50
+
+static void mvCpuIfGetL2EccMode(MV_8 *buf)
+{
+ MV_U32 regVal = MV_REG_READ(CPU_L2_CONFIG_REG);
+ if (regVal & BIT2)
+ mvOsSPrintf(buf, "L2 ECC Enabled");
+ else
+ mvOsSPrintf(buf, "L2 ECC Disabled");
+}
+
+static void mvCpuIfGetL2Mode(MV_8 *buf)
+{
+ MV_U32 regVal = 0;
+ __asm volatile ("mrc p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+ if (regVal & BIT22)
+ mvOsSPrintf(buf, "L2 Enabled");
+ else
+ mvOsSPrintf(buf, "L2 Disabled");
+}
+
+static void mvCpuIfGetL2PrefetchMode(MV_8 *buf)
+{
+ MV_U32 regVal = 0;
+ __asm volatile ("mrc p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+ if (regVal & BIT24)
+ mvOsSPrintf(buf, "L2 Prefetch Disabled");
+ else
+ mvOsSPrintf(buf, "L2 Prefetch Enabled");
+}
+
+static void mvCpuIfGetWriteAllocMode(MV_8 *buf)
+{
+ MV_U32 regVal = 0;
+ __asm volatile ("mrc p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+ if (regVal & BIT28)
+ mvOsSPrintf(buf, "Write Allocate Enabled");
+ else
+ mvOsSPrintf(buf, "Write Allocate Disabled");
+}
+
+static void mvCpuIfGetCpuStreamMode(MV_8 *buf)
+{
+ MV_U32 regVal = 0;
+ __asm volatile ("mrc p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+ if (regVal & BIT29)
+ mvOsSPrintf(buf, "CPU Streaming Enabled");
+ else
+ mvOsSPrintf(buf, "CPU Streaming Disabled");
+}
+
+static void mvCpuIfPrintCpuRegs(void)
+{
+ MV_U32 regVal = 0;
+
+ __asm volatile ("mrc p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+ mvOsPrintf("Extra Feature Reg = 0x%x\n",regVal);
+
+ __asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (regVal)); /* Read Control register */
+ mvOsPrintf("Control Reg = 0x%x\n",regVal);
+
+ __asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r" (regVal)); /* Read ID Code register */
+ mvOsPrintf("ID Code Reg = 0x%x\n",regVal);
+
+ __asm volatile ("mrc p15, 0, %0, c0, c0, 1" : "=r" (regVal)); /* Read Cache Type register */
+ mvOsPrintf("Cache Type Reg = 0x%x\n",regVal);
+
+}
+
+MV_U32 mvCpuIfPrintSystemConfig(MV_8 *buffer, MV_U32 index)
+{
+ MV_U32 count = 0;
+
+ MV_8 L2_ECC_str[MV_PROC_STR_SIZE];
+ MV_8 L2_En_str[MV_PROC_STR_SIZE];
+ MV_8 L2_Prefetch_str[MV_PROC_STR_SIZE];
+ MV_8 Write_Alloc_str[MV_PROC_STR_SIZE];
+ MV_8 Cpu_Stream_str[MV_PROC_STR_SIZE];
+
+ mvCpuIfGetL2Mode(L2_En_str);
+ mvCpuIfGetL2EccMode(L2_ECC_str);
+ mvCpuIfGetL2PrefetchMode(L2_Prefetch_str);
+ mvCpuIfGetWriteAllocMode(Write_Alloc_str);
+ mvCpuIfGetCpuStreamMode(Cpu_Stream_str);
+ mvCpuIfPrintCpuRegs();
+
+ count += mvOsSPrintf(buffer + count + index, "%s\n", L2_En_str);
+ count += mvOsSPrintf(buffer + count + index, "%s\n", L2_ECC_str);
+ count += mvOsSPrintf(buffer + count + index, "%s\n", L2_Prefetch_str);
+ count += mvOsSPrintf(buffer + count + index, "%s\n", Write_Alloc_str);
+ count += mvOsSPrintf(buffer + count + index, "%s\n", Cpu_Stream_str);
+ return count;
+}
+
+MV_U32 whoAmI(MV_VOID)
+{
+ return 0;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h
new file mode 100644
index 000000000..7f58b03f1
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h
@@ -0,0 +1,99 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCpuh
+#define __INCmvCpuh
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+/* defines */
+#define CPU_PART_MRVL131 0x131
+#define CPU_PART_ARM926 0x926
+#define CPU_PART_ARM946 0x946
+#define MV_CPU_ARM_CLK_ELM_SIZE 12
+#define MV_CPU_ARM_CLK_RATIO_OFF 8
+#define MV_CPU_ARM_CLK_DDR_OFF 4
+
+#ifndef MV_ASMLANGUAGE
+typedef struct _mvCpuArmClk
+{
+ MV_U32 cpuClk; /* CPU clock in MHz */
+ MV_U32 ddrClk; /* DDR clock in MHz */
+ MV_U32 l2Clk; /* CPU DDR clock ratio */
+
+}MV_CPU_ARM_CLK;
+
+MV_U32 mvCpuPclkGet(MV_VOID);
+MV_VOID mvCpuNameGet(char *pNameBuff);
+MV_U32 mvCpuL2ClkGet(MV_VOID);
+MV_U32 mvCpuIfPrintSystemConfig(MV_8 *buffer, MV_U32 index);
+MV_U32 whoAmI(MV_VOID);
+
+#endif /* MV_ASMLANGUAGE */
+
+
+#endif /* __INCmvCpuh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.c
new file mode 100644
index 000000000..fbe7c566d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.c
@@ -0,0 +1,296 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvCtrlEnvAddrDec.h - Marvell controller address decode library
+*
+* DESCRIPTION:
+*
+* DEPENDENCIES:
+* None.
+*
+*******************************************************************************/
+
+/* includes */
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+#include "ctrlEnv/sys/mvAhbToMbusRegs.h"
+#include "ddr2/mvDramIfRegs.h"
+#include "pex/mvPexRegs.h"
+
+#define MV_DEBUG
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+/* Default Attributes array */
+MV_TARGET_ATTRIB mvTargetDefaultsArray[] = TARGETS_DEF_ARRAY;
+extern MV_TARGET *sampleAtResetTargetArray;
+/* Dram\AHBToMbus\PEX share regsiter */
+
+#define CTRL_DEC_BASE_OFFS 16
+#define CTRL_DEC_BASE_MASK (0xffff << CTRL_DEC_BASE_OFFS)
+#define CTRL_DEC_BASE_ALIGNMENT 0x10000
+
+#define CTRL_DEC_SIZE_OFFS 16
+#define CTRL_DEC_SIZE_MASK (0xffff << CTRL_DEC_SIZE_OFFS)
+#define CTRL_DEC_SIZE_ALIGNMENT 0x10000
+
+#define CTRL_DEC_WIN_EN BIT0
+
+
+
+/*******************************************************************************
+* mvCtrlAddrDecToReg - Get address decode register format values
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvCtrlAddrDecToReg(MV_ADDR_WIN *pAddrDecWin, MV_DEC_REGS *pAddrDecRegs)
+{
+
+ MV_U32 baseToReg=0 , sizeToReg=0;
+
+ /* BaseLow[31:16] => base register [31:16] */
+ baseToReg = pAddrDecWin->baseLow & CTRL_DEC_BASE_MASK;
+
+ /* Write to address decode Base Address Register */
+ pAddrDecRegs->baseReg &= ~CTRL_DEC_BASE_MASK;
+ pAddrDecRegs->baseReg |= baseToReg;
+
+ /* Get size register value according to window size */
+ sizeToReg = ctrlSizeToReg(pAddrDecWin->size, CTRL_DEC_SIZE_ALIGNMENT);
+
+ /* Size parameter validity check. */
+ if (-1 == sizeToReg)
+ {
+ return MV_BAD_PARAM;
+ }
+
+ /* set size */
+ pAddrDecRegs->sizeReg &= ~CTRL_DEC_SIZE_MASK;
+ pAddrDecRegs->sizeReg |= (sizeToReg << CTRL_DEC_SIZE_OFFS);
+
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCtrlRegToAddrDec - Extract address decode struct from registers.
+*
+* DESCRIPTION:
+* This function extract address decode struct from address decode
+* registers given as parameters.
+*
+* INPUT:
+* pAddrDecRegs - Address decode register struct.
+*
+* OUTPUT:
+* pAddrDecWin - Target window data structure.
+*
+* RETURN:
+* MV_BAD_PARAM if address decode registers data is invalid.
+*
+*******************************************************************************/
+MV_STATUS mvCtrlRegToAddrDec(MV_DEC_REGS *pAddrDecRegs, MV_ADDR_WIN *pAddrDecWin)
+{
+ MV_U32 sizeRegVal;
+
+ sizeRegVal = (pAddrDecRegs->sizeReg & CTRL_DEC_SIZE_MASK) >>
+ CTRL_DEC_SIZE_OFFS;
+
+ pAddrDecWin->size = ctrlRegToSize(sizeRegVal, CTRL_DEC_SIZE_ALIGNMENT);
+
+
+ /* Extract base address */
+ /* Base register [31:16] ==> baseLow[31:16] */
+ pAddrDecWin->baseLow = pAddrDecRegs->baseReg & CTRL_DEC_BASE_MASK;
+
+ pAddrDecWin->baseHigh = 0;
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCtrlAttribGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+
+MV_STATUS mvCtrlAttribGet(MV_TARGET target,
+ MV_TARGET_ATTRIB *targetAttrib)
+{
+
+ targetAttrib->attrib = mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(target)].attrib;
+ targetAttrib->targetId = mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(target)].targetId;
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCtrlGetAttrib -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_TARGET mvCtrlTargetGet(MV_TARGET_ATTRIB *targetAttrib)
+{
+ MV_TARGET target;
+ MV_TARGET x;
+ for (target = SDRAM_CS0; target < MAX_TARGETS ; target ++)
+ {
+ x = MV_CHANGE_BOOT_CS(target);
+ if ((mvTargetDefaultsArray[x].attrib == targetAttrib->attrib) &&
+ (mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(target)].targetId == targetAttrib->targetId))
+ {
+ /* found it */
+ break;
+ }
+ }
+
+ return target;
+}
+
+MV_STATUS mvCtrlAddrDecToParams(MV_DEC_WIN *pAddrDecWin,
+ MV_DEC_WIN_PARAMS *pWinParam)
+{
+ MV_U32 baseToReg=0, sizeToReg=0;
+
+ /* BaseLow[31:16] => base register [31:16] */
+ baseToReg = pAddrDecWin->addrWin.baseLow & CTRL_DEC_BASE_MASK;
+
+ /* Write to address decode Base Address Register */
+ pWinParam->baseAddr &= ~CTRL_DEC_BASE_MASK;
+ pWinParam->baseAddr |= baseToReg;
+
+ /* Get size register value according to window size */
+ sizeToReg = ctrlSizeToReg(pAddrDecWin->addrWin.size, CTRL_DEC_SIZE_ALIGNMENT);
+
+ /* Size parameter validity check. */
+ if (-1 == sizeToReg)
+ {
+ mvOsPrintf("mvCtrlAddrDecToParams: ERR. ctrlSizeToReg failed.\n");
+ return MV_BAD_PARAM;
+ }
+ pWinParam->size = sizeToReg;
+
+ pWinParam->attrib = mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(pAddrDecWin->target)].attrib;
+ pWinParam->targetId = mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(pAddrDecWin->target)].targetId;
+
+ return MV_OK;
+}
+
+MV_STATUS mvCtrlParamsToAddrDec(MV_DEC_WIN_PARAMS *pWinParam,
+ MV_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttrib;
+
+ pAddrDecWin->addrWin.baseLow = pWinParam->baseAddr;
+
+ /* Upper 32bit address base is supported under PCI High Address remap */
+ pAddrDecWin->addrWin.baseHigh = 0;
+
+ /* Prepare sizeReg to ctrlRegToSize function */
+ pAddrDecWin->addrWin.size = ctrlRegToSize(pWinParam->size, CTRL_DEC_SIZE_ALIGNMENT);
+
+ if (-1 == pAddrDecWin->addrWin.size)
+ {
+ DB(mvOsPrintf("mvCtrlParamsToAddrDec: ERR. ctrlRegToSize failed.\n"));
+ return MV_BAD_PARAM;
+ }
+ targetAttrib.targetId = pWinParam->targetId;
+ targetAttrib.attrib = pWinParam->attrib;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ return MV_OK;
+}
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.h
new file mode 100644
index 000000000..946737f58
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.h
@@ -0,0 +1,203 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCtrlEnvAddrDech
+#define __INCmvCtrlEnvAddrDech
+
+/* includes */
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvRegs.h"
+
+
+/* defines */
+/* DUnit attributes */
+#define ATMWCR_WIN_DUNIT_CS0_OFFS 0
+#define ATMWCR_WIN_DUNIT_CS0_MASK BIT0
+#define ATMWCR_WIN_DUNIT_CS0_REQ (0 << ATMWCR_WIN_DUNIT_CS0_OFFS)
+
+#define ATMWCR_WIN_DUNIT_CS1_OFFS 1
+#define ATMWCR_WIN_DUNIT_CS1_MASK BIT1
+#define ATMWCR_WIN_DUNIT_CS1_REQ (0 << ATMWCR_WIN_DUNIT_CS1_OFFS)
+
+#define ATMWCR_WIN_DUNIT_CS2_OFFS 2
+#define ATMWCR_WIN_DUNIT_CS2_MASK BIT2
+#define ATMWCR_WIN_DUNIT_CS2_REQ (0 << ATMWCR_WIN_DUNIT_CS2_OFFS)
+
+#define ATMWCR_WIN_DUNIT_CS3_OFFS 3
+#define ATMWCR_WIN_DUNIT_CS3_MASK BIT3
+#define ATMWCR_WIN_DUNIT_CS3_REQ (0 << ATMWCR_WIN_DUNIT_CS3_OFFS)
+
+/* RUnit (Device) attributes */
+#define ATMWCR_WIN_RUNIT_DEVCS0_OFFS 0
+#define ATMWCR_WIN_RUNIT_DEVCS0_MASK BIT0
+#define ATMWCR_WIN_RUNIT_DEVCS0_REQ (0 << ATMWCR_WIN_RUNIT_DEVCS0_OFFS)
+
+#define ATMWCR_WIN_RUNIT_DEVCS1_OFFS 1
+#define ATMWCR_WIN_RUNIT_DEVCS1_MASK BIT1
+#define ATMWCR_WIN_RUNIT_DEVCS1_REQ (0 << ATMWCR_WIN_RUNIT_DEVCS1_OFFS)
+
+#define ATMWCR_WIN_RUNIT_DEVCS2_OFFS 2
+#define ATMWCR_WIN_RUNIT_DEVCS2_MASK BIT2
+#define ATMWCR_WIN_RUNIT_DEVCS2_REQ (0 << ATMWCR_WIN_RUNIT_DEVCS2_OFFS)
+
+#define ATMWCR_WIN_RUNIT_BOOTCS_OFFS 4
+#define ATMWCR_WIN_RUNIT_BOOTCS_MASK BIT4
+#define ATMWCR_WIN_RUNIT_BOOTCS_REQ (0 << ATMWCR_WIN_RUNIT_BOOTCS_OFFS)
+
+/* LMaster (PCI) attributes */
+#define ATMWCR_WIN_LUNIT_BYTE_SWP_OFFS 0
+#define ATMWCR_WIN_LUNIT_BYTE_SWP_MASK BIT0
+#define ATMWCR_WIN_LUNIT_BYTE_SWP (0 << ATMWCR_WIN_LUNIT_BYTE_SWP_OFFS)
+#define ATMWCR_WIN_LUNIT_BYTE_NO_SWP (1 << ATMWCR_WIN_LUNIT_BYTE_SWP_OFFS)
+
+
+#define ATMWCR_WIN_LUNIT_WORD_SWP_OFFS 1
+#define ATMWCR_WIN_LUNIT_WORD_SWP_MASK BIT1
+#define ATMWCR_WIN_LUNIT_WORD_SWP (0 << ATMWCR_WIN_LUNIT_WORD_SWP_OFFS)
+#define ATMWCR_WIN_LUNIT_WORD_NO_SWP (1 << ATMWCR_WIN_LUNIT_WORD_SWP_OFFS)
+
+#define ATMWCR_WIN_LUNIT_NO_SNOOP BIT2
+
+#define ATMWCR_WIN_LUNIT_TYPE_OFFS 3
+#define ATMWCR_WIN_LUNIT_TYPE_MASK BIT3
+#define ATMWCR_WIN_LUNIT_TYPE_IO (0 << ATMWCR_WIN_LUNIT_TYPE_OFFS)
+#define ATMWCR_WIN_LUNIT_TYPE_MEM (1 << ATMWCR_WIN_LUNIT_TYPE_OFFS)
+
+#define ATMWCR_WIN_LUNIT_FORCE64_OFFS 4
+#define ATMWCR_WIN_LUNIT_FORCE64_MASK BIT4
+#define ATMWCR_WIN_LUNIT_FORCE64 (0 << ATMWCR_WIN_LUNIT_FORCE64_OFFS)
+
+#define ATMWCR_WIN_LUNIT_ORDERING_OFFS 6
+#define ATMWCR_WIN_LUNIT_ORDERING_MASK BIT6
+#define ATMWCR_WIN_LUNIT_ORDERING (1 << ATMWCR_WIN_LUNIT_FORCE64_OFFS)
+
+/* PEX Attributes */
+#define ATMWCR_WIN_PEX_TYPE_OFFS 3
+#define ATMWCR_WIN_PEX_TYPE_MASK BIT3
+#define ATMWCR_WIN_PEX_TYPE_IO (0 << ATMWCR_WIN_PEX_TYPE_OFFS)
+#define ATMWCR_WIN_PEX_TYPE_MEM (1 << ATMWCR_WIN_PEX_TYPE_OFFS)
+
+/* typedefs */
+
+/* Unsupported attributes for address decode: */
+/* 2) PCI0/1_REQ64n control */
+
+typedef struct _mvDecRegs
+{
+ MV_U32 baseReg;
+ MV_U32 baseRegHigh;
+ MV_U32 sizeReg;
+
+}MV_DEC_REGS;
+
+typedef struct _mvTargetAttrib
+{
+ MV_U8 attrib; /* chip select attributes */
+ MV_TARGET_ID targetId; /* Target Id of this MV_TARGET */
+
+}MV_TARGET_ATTRIB;
+
+
+/* This structure describes address decode window */
+typedef struct _mvDecWin
+{
+ MV_TARGET target; /* Target for addr decode window */
+ MV_ADDR_WIN addrWin; /* Address window of target */
+ MV_BOOL enable; /* Window enable/disable */
+}MV_DEC_WIN;
+
+typedef struct _mvDecWinParams
+{
+ MV_TARGET_ID targetId; /* Target ID field */
+ MV_U8 attrib; /* Attribute field */
+ MV_U32 baseAddr; /* Base address in register format */
+ MV_U32 size; /* Size in register format */
+}MV_DEC_WIN_PARAMS;
+
+
+/* mvCtrlEnvAddrDec API list */
+
+MV_STATUS mvCtrlAddrDecToReg(MV_ADDR_WIN *pAddrDecWin,
+ MV_DEC_REGS *pAddrDecRegs);
+
+MV_STATUS mvCtrlRegToAddrDec(MV_DEC_REGS *pAddrDecRegs,
+ MV_ADDR_WIN *pAddrDecWin);
+
+MV_STATUS mvCtrlAttribGet(MV_TARGET target,
+ MV_TARGET_ATTRIB *targetAttrib);
+
+MV_TARGET mvCtrlTargetGet(MV_TARGET_ATTRIB *targetAttrib);
+
+
+MV_STATUS mvCtrlAddrDecToParams(MV_DEC_WIN *pAddrDecWin,
+ MV_DEC_WIN_PARAMS *pWinParam);
+
+MV_STATUS mvCtrlParamsToAddrDec(MV_DEC_WIN_PARAMS *pWinParam,
+ MV_DEC_WIN *pAddrDecWin);
+
+
+
+
+#endif /* __INCmvCtrlEnvAddrDech */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h
new file mode 100644
index 000000000..6f6367a2f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCtrlEnvAsmh
+#define __INCmvCtrlEnvAsmh
+#include "pex/mvPexRegs.h"
+
+#define CHIP_BOND_REG 0x10034
+#define PCKG_OPT_MASK_AS #3
+#define PXCCARI_REVID_MASK_AS #PXCCARI_REVID_MASK
+
+/* Read device ID into toReg bits 15:0 from 0xd0000000 */
+/* defines */
+#define MV_DV_CTRL_MODEL_GET_ASM(toReg, tmpReg) \
+ MV_DV_REG_READ_ASM(toReg, tmpReg, CHIP_BOND_REG);\
+ and toReg, toReg, PCKG_OPT_MASK_AS /* Mask for package ID */
+
+/* Read device ID into toReg bits 15:0 from 0xf1000000*/
+#define MV_CTRL_MODEL_GET_ASM(toReg, tmpReg) \
+ MV_REG_READ_ASM(toReg, tmpReg, CHIP_BOND_REG);\
+ and toReg, toReg, PCKG_OPT_MASK_AS /* Mask for package ID */
+
+/* Read Revision into toReg bits 7:0 0xd0000000*/
+#define MV_DV_CTRL_REV_GET_ASM(toReg, tmpReg) \
+ /* Read device revision */ \
+ MV_DV_REG_READ_ASM(toReg, tmpReg, PEX_CFG_DIRECT_ACCESS(0,PEX_CLASS_CODE_AND_REVISION_ID));\
+ and toReg, toReg, PXCCARI_REVID_MASK_AS /* Mask for calss ID */
+
+/* Read Revision into toReg bits 7:0 0xf1000000*/
+#define MV_CTRL_REV_GET_ASM(toReg, tmpReg) \
+ /* Read device revision */ \
+ MV_REG_READ_ASM(toReg, tmpReg, PEX_CFG_DIRECT_ACCESS(0,PEX_CLASS_CODE_AND_REVISION_ID));\
+ and toReg, toReg, PXCCARI_REVID_MASK_AS /* Mask for calss ID */
+
+
+#endif /* __INCmvCtrlEnvAsmh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c
new file mode 100644
index 000000000..adf451d3e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c
@@ -0,0 +1,1825 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "mvCommon.h"
+#include "mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+#if defined(MV_INCLUDE_PEX)
+#include "pex/mvPex.h"
+#include "ctrlEnv/sys/mvSysPex.h"
+#endif
+
+#if defined(MV_INCLUDE_GIG_ETH)
+#include "ctrlEnv/sys/mvSysGbe.h"
+#endif
+
+#if defined(MV_INCLUDE_XOR)
+#include "ctrlEnv/sys/mvSysXor.h"
+#endif
+
+#if defined(MV_INCLUDE_SATA)
+#include "ctrlEnv/sys/mvSysSata.h"
+#endif
+
+#if defined(MV_INCLUDE_USB)
+#include "ctrlEnv/sys/mvSysUsb.h"
+#endif
+
+#if defined(MV_INCLUDE_AUDIO)
+#include "ctrlEnv/sys/mvSysAudio.h"
+#endif
+
+#if defined(MV_INCLUDE_CESA)
+#include "ctrlEnv/sys/mvSysCesa.h"
+#endif
+
+#if defined(MV_INCLUDE_TS)
+#include "ctrlEnv/sys/mvSysTs.h"
+#endif
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+/*******************************************************************************
+* mvCtrlEnvInit - Initialize Marvell controller environment.
+*
+* DESCRIPTION:
+* This function get environment information and initialize controller
+* internal/external environment. For example
+* 1) MPP settings according to board MPP macros.
+* NOTE: It is the user responsibility to shut down all DMA channels
+* in device and disable controller sub units interrupts during
+* boot process.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvCtrlEnvInit(MV_VOID)
+{
+ MV_U32 mppGroup;
+ MV_U32 devId;
+ MV_U32 boardId;
+ MV_U32 i;
+ MV_U32 maxMppGrp = 1;
+ MV_U32 mppVal = 0;
+ MV_U32 bootVal = 0;
+ MV_U32 mppGroupType = 0;
+ MV_U32 mppGroup1[][3] = MPP_GROUP_1_TYPE;
+ MV_U32 mppGroup2[][3] = MPP_GROUP_2_TYPE;
+
+ devId = mvCtrlModelGet();
+ boardId= mvBoardIdGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ maxMppGrp = MV_6281_MPP_MAX_GROUP;
+ break;
+ case MV_6192_DEV_ID:
+ maxMppGrp = MV_6192_MPP_MAX_GROUP;
+ break;
+ case MV_6190_DEV_ID:
+ maxMppGrp = MV_6190_MPP_MAX_GROUP;
+ break;
+ case MV_6180_DEV_ID:
+ maxMppGrp = MV_6180_MPP_MAX_GROUP;
+ break;
+ }
+
+ /* MPP Init */
+ /* We split mpp init to 3 phases:
+ * 1. We init mpp[19:0] from the board info. mpp[23:20] will be over write
+ * in phase 2.
+ * 2. We detect the mpp group type and according the mpp values [35:20].
+ * 3. We detect the mpp group type and according the mpp values [49:36].
+ */
+ /* Mpp phase 1 mpp[19:0] */
+ /* Read MPP group from board level and assign to MPP register */
+ for (mppGroup = 0; mppGroup < 3; mppGroup++)
+ {
+ mppVal = mvBoardMppGet(mppGroup);
+ if (mppGroup == 0)
+ {
+ bootVal = MV_REG_READ(mvCtrlMppRegGet(mppGroup));
+ if (mvCtrlIsBootFromSPI())
+ {
+ mppVal &= ~0xffff;
+ bootVal &= 0xffff;
+ mppVal |= bootVal;
+ }
+ else if (mvCtrlIsBootFromSPIUseNAND())
+ {
+ mppVal &= ~0xf0000000;
+ bootVal &= 0xf0000000;
+ mppVal |= bootVal;
+ }
+ else if (mvCtrlIsBootFromNAND())
+ {
+ mppVal &= ~0xffffff;
+ bootVal &= 0xffffff;
+ mppVal |= bootVal;
+ }
+ }
+
+ if (mppGroup == 2)
+ {
+ bootVal = MV_REG_READ(mvCtrlMppRegGet(mppGroup));
+ if (mvCtrlIsBootFromNAND())
+ {
+ mppVal &= ~0xff00;
+ bootVal &= 0xff00;
+ mppVal |= bootVal;
+ }
+ }
+
+ MV_REG_WRITE(mvCtrlMppRegGet(mppGroup), mppVal);
+ }
+
+ /* Identify MPPs group */
+ mvBoardMppGroupIdUpdate();
+
+ /* Update MPPs mux relevent only on Marvell DB */
+ if ((boardId == DB_88F6281A_BP_ID) ||
+ (boardId == DB_88F6180A_BP_ID))
+ mvBoardMppMuxSet();
+
+ mppGroupType = mvBoardMppGroupTypeGet(MV_BOARD_MPP_GROUP_1);
+
+ /* Mpp phase 2 */
+ /* Read MPP group from board level and assign to MPP register */
+ if (devId != MV_6180_DEV_ID)
+ {
+ i = 0;
+ for (mppGroup = 2; mppGroup < 5; mppGroup++)
+ {
+ if ((mppGroupType == MV_BOARD_OTHER) ||
+ (boardId == RD_88F6281A_ID) ||
+ (boardId == RD_88F6192A_ID) ||
+ (boardId == RD_88F6190A_ID) ||
+ (boardId == RD_88F6281A_PCAC_ID) ||
+ (boardId == SHEEVA_PLUG_ID))
+ mppVal = mvBoardMppGet(mppGroup);
+ else
+ {
+ mppVal = mppGroup1[mppGroupType][i];
+ i++;
+ }
+
+ /* Group 2 is shared mpp[23:16] */
+ if (mppGroup == 2)
+ {
+ bootVal = MV_REG_READ(mvCtrlMppRegGet(mppGroup));
+ mppVal &= ~0xffff;
+ bootVal &= 0xffff;
+ mppVal |= bootVal;
+ }
+
+ MV_REG_WRITE(mvCtrlMppRegGet(mppGroup), mppVal);
+ }
+ }
+
+ if ((devId == MV_6192_DEV_ID) || (devId == MV_6190_DEV_ID))
+ return MV_OK;
+
+ /* Mpp phase 3 */
+ mppGroupType = mvBoardMppGroupTypeGet(MV_BOARD_MPP_GROUP_2);
+ /* Read MPP group from board level and assign to MPP register */
+ i = 0;
+ for (mppGroup = 4; mppGroup < 7; mppGroup++)
+ {
+ if ((mppGroupType == MV_BOARD_OTHER) ||
+ (boardId == RD_88F6281A_ID) ||
+ (boardId == RD_88F6281A_PCAC_ID) ||
+ (boardId == SHEEVA_PLUG_ID))
+ mppVal = mvBoardMppGet(mppGroup);
+ else
+ {
+ mppVal = mppGroup2[mppGroupType][i];
+ i++;
+ }
+
+ /* Group 4 is shared mpp[35:32] */
+ if (mppGroup == 4)
+ {
+ bootVal = MV_REG_READ(mvCtrlMppRegGet(mppGroup));
+ mppVal &= ~0xffff;
+ bootVal &= 0xffff;
+ mppVal |= bootVal;
+ }
+
+ MV_REG_WRITE(mvCtrlMppRegGet(mppGroup), mppVal);
+ }
+ /* Update SSCG configuration register*/
+ if(mvBoardIdGet() == DB_88F6281A_BP_ID || mvBoardIdGet() == DB_88F6192A_BP_ID ||
+ mvBoardIdGet() == DB_88F6190A_BP_ID || mvBoardIdGet() == DB_88F6180A_BP_ID)
+ MV_REG_WRITE(0x100d8, 0x53);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCtrlMppRegGet - return reg address of mpp group
+*
+* DESCRIPTION:
+*
+* INPUT:
+* mppGroup - MPP group.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_U32 - Register address.
+*
+*******************************************************************************/
+MV_U32 mvCtrlMppRegGet(MV_U32 mppGroup)
+{
+ MV_U32 ret;
+
+ switch(mppGroup){
+ case (0): ret = MPP_CONTROL_REG0;
+ break;
+ case (1): ret = MPP_CONTROL_REG1;
+ break;
+ case (2): ret = MPP_CONTROL_REG2;
+ break;
+ case (3): ret = MPP_CONTROL_REG3;
+ break;
+ case (4): ret = MPP_CONTROL_REG4;
+ break;
+ case (5): ret = MPP_CONTROL_REG5;
+ break;
+ case (6): ret = MPP_CONTROL_REG6;
+ break;
+ default: ret = MPP_CONTROL_REG0;
+ break;
+ }
+ return ret;
+}
+#if defined(MV_INCLUDE_PEX)
+/*******************************************************************************
+* mvCtrlPexMaxIfGet - Get Marvell controller number of PEX interfaces.
+*
+* DESCRIPTION:
+* This function returns Marvell controller number of PEX interfaces.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Marvell controller number of PEX interfaces. If controller
+* ID is undefined the function returns '0'.
+*
+*******************************************************************************/
+MV_U32 mvCtrlPexMaxIfGet(MV_VOID)
+{
+
+ return MV_PEX_MAX_IF;
+}
+#endif
+
+#if defined(MV_INCLUDE_GIG_ETH)
+/*******************************************************************************
+* mvCtrlEthMaxPortGet - Get Marvell controller number of etherent ports.
+*
+* DESCRIPTION:
+* This function returns Marvell controller number of etherent port.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Marvell controller number of etherent port.
+*
+*******************************************************************************/
+MV_U32 mvCtrlEthMaxPortGet(MV_VOID)
+{
+ MV_U32 devId;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ return MV_6281_ETH_MAX_PORTS;
+ break;
+ case MV_6192_DEV_ID:
+ return MV_6192_ETH_MAX_PORTS;
+ break;
+ case MV_6190_DEV_ID:
+ return MV_6190_ETH_MAX_PORTS;
+ break;
+ case MV_6180_DEV_ID:
+ return MV_6180_ETH_MAX_PORTS;
+ break;
+ }
+ return 0;
+
+}
+#endif
+
+#if defined(MV_INCLUDE_XOR)
+/*******************************************************************************
+* mvCtrlXorMaxChanGet - Get Marvell controller number of XOR channels.
+*
+* DESCRIPTION:
+* This function returns Marvell controller number of XOR channels.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Marvell controller number of XOR channels.
+*
+*******************************************************************************/
+MV_U32 mvCtrlXorMaxChanGet(MV_VOID)
+{
+ return MV_XOR_MAX_CHAN;
+}
+#endif
+
+#if defined(MV_INCLUDE_USB)
+/*******************************************************************************
+* mvCtrlUsbHostMaxGet - Get number of Marvell Usb controllers
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* returns number of Marvell USB controllers.
+*
+*******************************************************************************/
+MV_U32 mvCtrlUsbMaxGet(void)
+{
+ return MV_USB_MAX_PORTS;
+}
+#endif
+
+
+#if defined(MV_INCLUDE_NAND)
+/*******************************************************************************
+* mvCtrlNandSupport - Return if this controller has integrated NAND flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if NAND is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32 mvCtrlNandSupport(MV_VOID)
+{
+ MV_U32 devId;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ return MV_6281_NAND;
+ break;
+ case MV_6192_DEV_ID:
+ return MV_6192_NAND;
+ break;
+ case MV_6190_DEV_ID:
+ return MV_6190_NAND;
+ break;
+ case MV_6180_DEV_ID:
+ return MV_6180_NAND;
+ break;
+ }
+ return 0;
+
+}
+#endif
+
+#if defined(MV_INCLUDE_SDIO)
+/*******************************************************************************
+* mvCtrlSdioSupport - Return if this controller has integrated SDIO flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if SDIO is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32 mvCtrlSdioSupport(MV_VOID)
+{
+ MV_U32 devId;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ return MV_6281_SDIO;
+ break;
+ case MV_6192_DEV_ID:
+ return MV_6192_SDIO;
+ break;
+ case MV_6190_DEV_ID:
+ return MV_6190_SDIO;
+ break;
+ case MV_6180_DEV_ID:
+ return MV_6180_SDIO;
+ break;
+ }
+ return 0;
+
+}
+#endif
+
+#if defined(MV_INCLUDE_TS)
+/*******************************************************************************
+* mvCtrlTsSupport - Return if this controller has integrated TS flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if TS is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32 mvCtrlTsSupport(MV_VOID)
+{
+ MV_U32 devId;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ return MV_6281_TS;
+ break;
+ case MV_6192_DEV_ID:
+ return MV_6192_TS;
+ break;
+ case MV_6190_DEV_ID:
+ return MV_6190_TS;
+ break;
+ case MV_6180_DEV_ID:
+ return MV_6180_TS;
+ break;
+ }
+ return 0;
+}
+#endif
+
+#if defined(MV_INCLUDE_AUDIO)
+/*******************************************************************************
+* mvCtrlAudioSupport - Return if this controller has integrated AUDIO flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if AUDIO is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32 mvCtrlAudioSupport(MV_VOID)
+{
+ MV_U32 devId;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ return MV_6281_AUDIO;
+ break;
+ case MV_6192_DEV_ID:
+ return MV_6192_AUDIO;
+ break;
+ case MV_6190_DEV_ID:
+ return MV_6190_AUDIO;
+ break;
+ case MV_6180_DEV_ID:
+ return MV_6180_AUDIO;
+ break;
+ }
+ return 0;
+
+}
+#endif
+
+#if defined(MV_INCLUDE_TDM)
+/*******************************************************************************
+* mvCtrlTdmSupport - Return if this controller has integrated TDM flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if TDM is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32 mvCtrlTdmSupport(MV_VOID)
+{
+ MV_U32 devId;
+
+ devId = mvCtrlModelGet();
+
+ switch(devId){
+ case MV_6281_DEV_ID:
+ return MV_6281_TDM;
+ break;
+ case MV_6192_DEV_ID:
+ return MV_6192_TDM;
+ break;
+ case MV_6190_DEV_ID:
+ return MV_6190_TDM;
+ break;
+ case MV_6180_DEV_ID:
+ return MV_6180_TDM;
+ break;
+ }
+ return 0;
+
+}
+#endif
+
+/*******************************************************************************
+* mvCtrlModelGet - Get Marvell controller device model (Id)
+*
+* DESCRIPTION:
+* This function returns 16bit describing the device model (ID) as defined
+* in PCI Device and Vendor ID configuration register offset 0x0.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 16bit desscribing Marvell controller ID
+*
+*******************************************************************************/
+MV_U16 mvCtrlModelGet(MV_VOID)
+{
+ MV_U32 devId;
+
+ devId = MV_REG_READ(CHIP_BOND_REG);
+ devId &= PCKG_OPT_MASK;
+
+ switch(devId){
+ case 2:
+ return MV_6281_DEV_ID;
+ break;
+ case 1:
+ if (((MV_REG_READ(PEX_CFG_DIRECT_ACCESS(0,PEX_DEVICE_AND_VENDOR_ID))& 0xffff0000) >> 16)
+ == MV_6190_DEV_ID)
+ return MV_6190_DEV_ID;
+ else
+ return MV_6192_DEV_ID;
+ break;
+ case 0:
+ return MV_6180_DEV_ID;
+ break;
+ }
+
+ return 0;
+}
+/*******************************************************************************
+* mvCtrlRevGet - Get Marvell controller device revision number
+*
+* DESCRIPTION:
+* This function returns 8bit describing the device revision as defined
+* in PCI Express Class Code and Revision ID Register.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 8bit desscribing Marvell controller revision number
+*
+*******************************************************************************/
+MV_U8 mvCtrlRevGet(MV_VOID)
+{
+ MV_U8 revNum;
+#if defined(MV_INCLUDE_CLK_PWR_CNTRL)
+ /* Check pex power state */
+ MV_U32 pexPower;
+ pexPower = mvCtrlPwrClckGet(PEX_UNIT_ID,0);
+ if (pexPower == MV_FALSE)
+ mvCtrlPwrClckSet(PEX_UNIT_ID, 0, MV_TRUE);
+#endif
+ revNum = (MV_U8)MV_REG_READ(PEX_CFG_DIRECT_ACCESS(0,PCI_CLASS_CODE_AND_REVISION_ID));
+#if defined(MV_INCLUDE_CLK_PWR_CNTRL)
+ /* Return to power off state */
+ if (pexPower == MV_FALSE)
+ mvCtrlPwrClckSet(PEX_UNIT_ID, 0, MV_FALSE);
+#endif
+ return ((revNum & PCCRIR_REVID_MASK) >> PCCRIR_REVID_OFFS);
+}
+
+/*******************************************************************************
+* mvCtrlNameGet - Get Marvell controller name
+*
+* DESCRIPTION:
+* This function returns a string describing the device model and revision.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* pNameBuff - Buffer to contain device name string. Minimum size 30 chars.
+*
+* RETURN:
+*
+* MV_ERROR if informantion can not be read.
+*******************************************************************************/
+MV_STATUS mvCtrlNameGet(char *pNameBuff)
+{
+ mvOsSPrintf (pNameBuff, "%s%x Rev %d", SOC_NAME_PREFIX,
+ mvCtrlModelGet(), mvCtrlRevGet());
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCtrlModelRevGet - Get Controller Model (Device ID) and Revision
+*
+* DESCRIPTION:
+* This function returns 32bit value describing both Device ID and Revision
+* as defined in PCI Express Device and Vendor ID Register and device revision
+* as defined in PCI Express Class Code and Revision ID Register.
+
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit describing both controller device ID and revision number
+*
+*******************************************************************************/
+MV_U32 mvCtrlModelRevGet(MV_VOID)
+{
+ return ((mvCtrlModelGet() << 16) | mvCtrlRevGet());
+}
+
+/*******************************************************************************
+* mvCtrlModelRevNameGet - Get Marvell controller name
+*
+* DESCRIPTION:
+* This function returns a string describing the device model and revision.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* pNameBuff - Buffer to contain device name string. Minimum size 30 chars.
+*
+* RETURN:
+*
+* MV_ERROR if informantion can not be read.
+*******************************************************************************/
+
+MV_STATUS mvCtrlModelRevNameGet(char *pNameBuff)
+{
+
+ switch (mvCtrlModelRevGet())
+ {
+ case MV_6281_A0_ID:
+ mvOsSPrintf (pNameBuff, "%s",MV_6281_A0_NAME);
+ break;
+ case MV_6192_A0_ID:
+ mvOsSPrintf (pNameBuff, "%s",MV_6192_A0_NAME);
+ break;
+ case MV_6180_A0_ID:
+ mvOsSPrintf (pNameBuff, "%s",MV_6180_A0_NAME);
+ break;
+ case MV_6190_A0_ID:
+ mvOsSPrintf (pNameBuff, "%s",MV_6190_A0_NAME);
+ break;
+ case MV_6281_A1_ID:
+ mvOsSPrintf (pNameBuff, "%s",MV_6281_A1_NAME);
+ break;
+ case MV_6192_A1_ID:
+ mvOsSPrintf (pNameBuff, "%s",MV_6192_A1_NAME);
+ break;
+ case MV_6180_A1_ID:
+ mvOsSPrintf (pNameBuff, "%s",MV_6180_A1_NAME);
+ break;
+ case MV_6190_A1_ID:
+ mvOsSPrintf (pNameBuff, "%s",MV_6190_A1_NAME);
+ break;
+ default:
+ mvCtrlNameGet(pNameBuff);
+ break;
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* ctrlWinOverlapTest - Test address windows for overlaping.
+*
+* DESCRIPTION:
+* This function checks the given two address windows for overlaping.
+*
+* INPUT:
+* pAddrWin1 - Address window 1.
+* pAddrWin2 - Address window 2.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+* MV_TRUE if address window overlaps, MV_FALSE otherwise.
+*******************************************************************************/
+MV_STATUS ctrlWinOverlapTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2)
+{
+ MV_U32 winBase1, winBase2;
+ MV_U32 winTop1, winTop2;
+
+ /* check if we have overflow than 4G*/
+ if (((0xffffffff - pAddrWin1->baseLow) < pAddrWin1->size-1)||
+ ((0xffffffff - pAddrWin2->baseLow) < pAddrWin2->size-1))
+ {
+ return MV_TRUE;
+ }
+
+ winBase1 = pAddrWin1->baseLow;
+ winBase2 = pAddrWin2->baseLow;
+ winTop1 = winBase1 + pAddrWin1->size-1;
+ winTop2 = winBase2 + pAddrWin2->size-1;
+
+
+ if (((winBase1 <= winTop2 ) && ( winTop2 <= winTop1)) ||
+ ((winBase1 <= winBase2) && (winBase2 <= winTop1)))
+ {
+ return MV_TRUE;
+ }
+ else
+ {
+ return MV_FALSE;
+ }
+}
+
+/*******************************************************************************
+* ctrlWinWithinWinTest - Test address windows for overlaping.
+*
+* DESCRIPTION:
+* This function checks the given win1 boundries is within
+* win2 boundries.
+*
+* INPUT:
+* pAddrWin1 - Address window 1.
+* pAddrWin2 - Address window 2.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+* MV_TRUE if found win1 inside win2, MV_FALSE otherwise.
+*******************************************************************************/
+MV_STATUS ctrlWinWithinWinTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2)
+{
+ MV_U32 winBase1, winBase2;
+ MV_U32 winTop1, winTop2;
+
+ winBase1 = pAddrWin1->baseLow;
+ winBase2 = pAddrWin2->baseLow;
+ winTop1 = winBase1 + pAddrWin1->size -1;
+ winTop2 = winBase2 + pAddrWin2->size -1;
+
+ if (((winBase1 >= winBase2 ) && ( winBase1 <= winTop2)) ||
+ ((winTop1 >= winBase2) && (winTop1 <= winTop2)))
+ {
+ return MV_TRUE;
+ }
+ else
+ {
+ return MV_FALSE;
+ }
+}
+
+static const char* cntrlName[] = TARGETS_NAME_ARRAY;
+
+/*******************************************************************************
+* mvCtrlTargetNameGet - Get Marvell controller target name
+*
+* DESCRIPTION:
+* This function convert the trget enumeration to string.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Target name (const MV_8 *)
+*******************************************************************************/
+const MV_8* mvCtrlTargetNameGet( MV_TARGET target )
+{
+
+ if (target >= MAX_TARGETS)
+ {
+ return "target unknown";
+ }
+
+ return cntrlName[target];
+}
+
+/*******************************************************************************
+* mvCtrlAddrDecShow - Print the Controller units address decode map.
+*
+* DESCRIPTION:
+* This function the Controller units address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvCtrlAddrDecShow(MV_VOID)
+{
+ mvCpuIfAddDecShow();
+ mvAhbToMbusAddDecShow();
+#if defined(MV_INCLUDE_PEX)
+ mvPexAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_USB)
+ mvUsbAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+ mvEthAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_XOR)
+ mvXorAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_SATA)
+ mvSataAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+ mvAudioAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_TS)
+ mvTsuAddrDecShow();
+#endif
+}
+
+/*******************************************************************************
+* ctrlSizeToReg - Extract size value for register assignment.
+*
+* DESCRIPTION:
+* Address decode size parameter must be programed from LSB to MSB as
+* sequence of 1's followed by sequence of 0's. The number of 1's
+* specifies the size of the window in 64 KB granularity (e.g. a
+* value of 0x00ff specifies 256x64k = 16 MB).
+* This function extract the size value from the size parameter according
+* to given aligment paramter. For example for size 0x1000000 (16MB) and
+* aligment 0x10000 (64KB) the function will return 0x00FF.
+*
+* INPUT:
+* size - Size.
+* alignment - Size alignment. Note that alignment must be power of 2!
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit describing size register value correspond to size parameter.
+* If value is '-1' size parameter or aligment are invalid.
+*******************************************************************************/
+MV_U32 ctrlSizeToReg(MV_U32 size, MV_U32 alignment)
+{
+ MV_U32 retVal;
+
+ /* Check size parameter alignment */
+ if ((0 == size) || (MV_IS_NOT_ALIGN(size, alignment)))
+ {
+ DB(mvOsPrintf("ctrlSizeToReg: ERR. Size is zero or not aligned.\n"));
+ return -1;
+ }
+
+ /* Take out the "alignment" portion out of the size parameter */
+ alignment--; /* Now the alignmet is a sequance of '1' (e.g. 0xffff) */
+ /* and size is 0x1000000 (16MB) for example */
+ while(alignment & 1) /* Check that alignmet LSB is set */
+ {
+ size = (size >> 1); /* If LSB is set, move 'size' one bit to right */
+ alignment = (alignment >> 1);
+ }
+
+ /* If after the alignment first '0' was met we still have '1' in */
+ /* it then aligment is invalid (not power of 2) */
+ if (alignment)
+ {
+ DB(mvOsPrintf("ctrlSizeToReg: ERR. Alignment parameter 0x%x invalid.\n",
+ (MV_U32)alignment));
+ return -1;
+ }
+
+ /* Now the size is shifted right according to aligment: 0x0100 */
+ size--; /* Now the size is a sequance of '1': 0x00ff */
+
+ retVal = size ;
+
+ /* Check that LSB to MSB is sequence of 1's followed by sequence of 0's */
+ while(size & 1) /* Check that LSB is set */
+ {
+ size = (size >> 1); /* If LSB is set, move one bit to the right */
+ }
+
+ if (size) /* Sequance of 1's is over. Check that we have no other 1's */
+ {
+ DB(mvOsPrintf("ctrlSizeToReg: ERR. Size parameter 0x%x invalid.\n",
+ size));
+ return -1;
+ }
+
+ return retVal;
+
+}
+
+/*******************************************************************************
+* ctrlRegToSize - Extract size value from register value.
+*
+* DESCRIPTION:
+* This function extract a size value from the register size parameter
+* according to given aligment paramter. For example for register size
+* value 0xff and aligment 0x10000 the function will return 0x01000000.
+*
+* INPUT:
+* regSize - Size as in register format. See ctrlSizeToReg.
+* alignment - Size alignment. Note that alignment must be power of 2!
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit describing size.
+* If value is '-1' size parameter or aligment are invalid.
+*******************************************************************************/
+MV_U32 ctrlRegToSize(MV_U32 regSize, MV_U32 alignment)
+{
+ MV_U32 temp;
+
+ /* Check that LSB to MSB is sequence of 1's followed by sequence of 0's */
+ temp = regSize; /* Now the size is a sequance of '1': 0x00ff */
+
+ while(temp & 1) /* Check that LSB is set */
+ {
+ temp = (temp >> 1); /* If LSB is set, move one bit to the right */
+ }
+
+ if (temp) /* Sequance of 1's is over. Check that we have no other 1's */
+ {
+ DB(mvOsPrintf("ctrlRegToSize: ERR. Size parameter 0x%x invalid.\n",
+ regSize));
+ return -1;
+ }
+
+
+ /* Check that aligment is a power of two */
+ temp = alignment - 1;/* Now the alignmet is a sequance of '1' (0xffff) */
+
+ while(temp & 1) /* Check that alignmet LSB is set */
+ {
+ temp = (temp >> 1); /* If LSB is set, move 'size' one bit to right */
+ }
+
+ /* If after the 'temp' first '0' was met we still have '1' in 'temp' */
+ /* then 'temp' is invalid (not power of 2) */
+ if (temp)
+ {
+ DB(mvOsPrintf("ctrlSizeToReg: ERR. Alignment parameter 0x%x invalid.\n",
+ alignment));
+ return -1;
+ }
+
+ regSize++; /* Now the size is 0x0100 */
+
+ /* Add in the "alignment" portion to the register size parameter */
+ alignment--; /* Now the alignmet is a sequance of '1' (e.g. 0xffff) */
+
+ while(alignment & 1) /* Check that alignmet LSB is set */
+ {
+ regSize = (regSize << 1); /* LSB is set, move 'size' one bit left */
+ alignment = (alignment >> 1);
+ }
+
+ return regSize;
+}
+
+
+/*******************************************************************************
+* ctrlSizeRegRoundUp - Round up given size
+*
+* DESCRIPTION:
+* This function round up a given size to a size that fits the
+* restrictions of size format given an aligment parameter.
+* to given aligment paramter. For example for size parameter 0xa1000 and
+* aligment 0x1000 the function will return 0xFF000.
+*
+* INPUT:
+* size - Size.
+* alignment - Size alignment. Note that alignment must be power of 2!
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit describing size value correspond to size in register.
+*******************************************************************************/
+MV_U32 ctrlSizeRegRoundUp(MV_U32 size, MV_U32 alignment)
+{
+ MV_U32 msbBit = 0;
+ MV_U32 retSize;
+
+ /* Check if size parameter is already comply with restriction */
+ if (!(-1 == ctrlSizeToReg(size, alignment)))
+ {
+ return size;
+ }
+
+ while(size)
+ {
+ size = (size >> 1);
+ msbBit++;
+ }
+
+ retSize = (1 << msbBit);
+
+ if (retSize < alignment)
+ {
+ return alignment;
+ }
+ else
+ {
+ return retSize;
+ }
+}
+/*******************************************************************************
+* mvCtrlSysRstLengthCounterGet - Return number of milliseconds the reset button
+* was pressed and clear counter
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN: number of milliseconds the reset button was pressed
+*******************************************************************************/
+MV_U32 mvCtrlSysRstLengthCounterGet(MV_VOID)
+{
+ static volatile MV_U32 Count = 0;
+
+ if(!Count) {
+ Count = (MV_REG_READ(SYSRST_LENGTH_COUNTER_REG) & SLCR_COUNT_MASK);
+ Count = (Count / (MV_BOARD_REFCLK_25MHZ / 1000));
+ /* clear counter for next boot */
+ MV_REG_BIT_SET(SYSRST_LENGTH_COUNTER_REG, SLCR_CLR_MASK);
+ }
+
+ DB(mvOsPrintf("mvCtrlSysRstLengthCounterGet: Reset button was pressed for %u milliseconds\n", Count));
+
+ return Count;
+}
+
+MV_BOOL mvCtrlIsBootFromSPI(MV_VOID)
+{
+ MV_U32 satr = 0;
+ satr = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ if(mvCtrlModelGet() == MV_6180_DEV_ID)
+ {
+ if (MSAR_BOOT_MODE_6180(satr) == MSAR_BOOT_SPI_WITH_BOOTROM_6180)
+ return MV_TRUE;
+ else
+ return MV_FALSE;
+ }
+ satr = satr & MSAR_BOOT_MODE_MASK;
+ if (satr == MSAR_BOOT_SPI_WITH_BOOTROM)
+ return MV_TRUE;
+ else
+ return MV_FALSE;
+}
+
+MV_BOOL mvCtrlIsBootFromSPIUseNAND(MV_VOID)
+{
+ MV_U32 satr = 0;
+ if(mvCtrlModelGet() == MV_6180_DEV_ID)
+ return MV_FALSE;
+ satr = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ satr = satr & MSAR_BOOT_MODE_MASK;
+
+ if (satr == MSAR_BOOT_SPI_USE_NAND_WITH_BOOTROM)
+ return MV_TRUE;
+ else
+ return MV_FALSE;
+}
+
+MV_BOOL mvCtrlIsBootFromNAND(MV_VOID)
+{
+ MV_U32 satr = 0;
+ satr = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+ if(mvCtrlModelGet() == MV_6180_DEV_ID)
+ {
+ if (MSAR_BOOT_MODE_6180(satr) == MSAR_BOOT_NAND_WITH_BOOTROM_6180)
+ return MV_TRUE;
+ else
+ return MV_FALSE;
+ }
+ satr = satr & MSAR_BOOT_MODE_MASK;
+ if ((satr == MSAR_BOOT_NAND_WITH_BOOTROM))
+ return MV_TRUE;
+ else
+ return MV_FALSE;
+}
+
+#if defined(MV_INCLUDE_CLK_PWR_CNTRL)
+/*******************************************************************************
+* mvCtrlPwrSaveOn - Set Power save mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_VOID mvCtrlPwrSaveOn(MV_VOID)
+{
+ unsigned long old,temp;
+ /* Disable int */
+ __asm__ __volatile__("mrs %0, cpsr\n"
+ "orr %1, %0, #0xc0\n"
+ "msr cpsr_c, %1"
+ : "=r" (old), "=r" (temp)
+ :
+ : "memory");
+
+ /* Set SoC in power save */
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, BIT11);
+ /* Wait for int */
+ __asm__ __volatile__("mcr p15, 0, r0, c7, c0, 4");
+
+ /* Enabled int */
+ __asm__ __volatile__("msr cpsr_c, %0"
+ :
+ : "r" (old)
+ : "memory");
+}
+
+
+
+/*******************************************************************************
+* mvCtrlPwrSaveOff - Go out of power save mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_VOID mvCtrlPwrSaveOff(MV_VOID)
+{
+ unsigned long old,temp;
+ /* Disable int */
+ __asm__ __volatile__("mrs %0, cpsr\n"
+ "orr %1, %0, #0xc0\n"
+ "msr cpsr_c, %1"
+ : "=r" (old), "=r" (temp)
+ :
+ : "memory");
+
+ /* Set SoC in power save */
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, BIT11);
+ /* Wait for int */
+ __asm__ __volatile__("mcr p15, 0, r0, c7, c0, 4");
+
+ /* Enabled int */
+ __asm__ __volatile__("msr cpsr_c, %0"
+ :
+ : "r" (old)
+ : "memory");
+}
+
+/*******************************************************************************
+* mvCtrlPwrClckSet - Set Power State for specific Unit
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_VOID mvCtrlPwrClckSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable)
+{
+ switch (unitId)
+ {
+#if defined(MV_INCLUDE_PEX)
+ case PEX_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_PEXSTOPCLOCK_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_PEXSTOPCLOCK_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+ case ETH_GIG_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_GESTOPCLOCK_MASK(index));
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_GESTOPCLOCK_MASK(index));
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_INTEG_SATA)
+ case SATA_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_SATASTOPCLOCK_MASK(index));
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_SATASTOPCLOCK_MASK(index));
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_CESA)
+ case CESA_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_SESTOPCLOCK_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_SESTOPCLOCK_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_USB)
+ case USB_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_USBSTOPCLOCK_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_USBSTOPCLOCK_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+ case AUDIO_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_AUDIOSTOPCLOCK_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_AUDIOSTOPCLOCK_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_TS)
+ case TS_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_TSSTOPCLOCK_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_TSSTOPCLOCK_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_SDIO)
+ case SDIO_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_SDIOSTOPCLOCK_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_SDIOSTOPCLOCK_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_TDM)
+ case TDM_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_TDMSTOPCLOCK_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_TDMSTOPCLOCK_MASK);
+ }
+ break;
+#endif
+
+ default:
+
+ break;
+
+ }
+}
+
+/*******************************************************************************
+* mvCtrlPwrClckGet - Get Power State of specific Unit
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+******************************************************************************/
+MV_BOOL mvCtrlPwrClckGet(MV_UNIT_ID unitId, MV_U32 index)
+{
+ MV_U32 reg = MV_REG_READ(POWER_MNG_CTRL_REG);
+ MV_BOOL state = MV_TRUE;
+
+ switch (unitId)
+ {
+#if defined(MV_INCLUDE_PEX)
+ case PEX_UNIT_ID:
+ if ((reg & PMC_PEXSTOPCLOCK_MASK) == PMC_PEXSTOPCLOCK_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+
+ break;
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+ case ETH_GIG_UNIT_ID:
+ if ((reg & PMC_GESTOPCLOCK_MASK(index)) == PMC_GESTOPCLOCK_STOP(index))
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_SATA)
+ case SATA_UNIT_ID:
+ if ((reg & PMC_SATASTOPCLOCK_MASK(index)) == PMC_SATASTOPCLOCK_STOP(index))
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_CESA)
+ case CESA_UNIT_ID:
+ if ((reg & PMC_SESTOPCLOCK_MASK) == PMC_SESTOPCLOCK_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_USB)
+ case USB_UNIT_ID:
+ if ((reg & PMC_USBSTOPCLOCK_MASK) == PMC_USBSTOPCLOCK_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+ case AUDIO_UNIT_ID:
+ if ((reg & PMC_AUDIOSTOPCLOCK_MASK) == PMC_AUDIOSTOPCLOCK_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_TS)
+ case TS_UNIT_ID:
+ if ((reg & PMC_TSSTOPCLOCK_MASK) == PMC_TSSTOPCLOCK_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_SDIO)
+ case SDIO_UNIT_ID:
+ if ((reg & PMC_SDIOSTOPCLOCK_MASK)== PMC_SDIOSTOPCLOCK_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_TDM)
+ case TDM_UNIT_ID:
+ if ((reg & PMC_TDMSTOPCLOCK_MASK) == PMC_TDMSTOPCLOCK_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+
+ default:
+ state = MV_TRUE;
+ break;
+ }
+
+
+ return state;
+}
+/*******************************************************************************
+* mvCtrlPwrMemSet - Set Power State for memory on specific Unit
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_VOID mvCtrlPwrMemSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable)
+{
+ switch (unitId)
+ {
+#if defined(MV_INCLUDE_PEX)
+ case PEX_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_PEXSTOPMEM_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_PEXSTOPMEM_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+ case ETH_GIG_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_GESTOPMEM_MASK(index));
+ }
+ else
+ {
+ MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_GESTOPMEM_MASK(index));
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_INTEG_SATA)
+ case SATA_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_SATASTOPMEM_MASK(index));
+ }
+ else
+ {
+ MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_SATASTOPMEM_MASK(index));
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_CESA)
+ case CESA_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_SESTOPMEM_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_SESTOPMEM_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_USB)
+ case USB_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_USBSTOPMEM_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_USBSTOPMEM_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+ case AUDIO_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_AUDIOSTOPMEM_MASK);
+ }
+ else
+ {
+ MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_AUDIOSTOPMEM_MASK);
+ }
+ break;
+#endif
+#if defined(MV_INCLUDE_XOR)
+ case XOR_UNIT_ID:
+ if (enable == MV_FALSE)
+ {
+ MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_XORSTOPMEM_MASK(index));
+ }
+ else
+ {
+ MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_XORSTOPMEM_MASK(index));
+ }
+ break;
+#endif
+ default:
+
+ break;
+
+ }
+}
+
+/*******************************************************************************
+* mvCtrlPwrMemGet - Get Power State of memory on specific Unit
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+******************************************************************************/
+MV_BOOL mvCtrlPwrMemGet(MV_UNIT_ID unitId, MV_U32 index)
+{
+ MV_U32 reg = MV_REG_READ(POWER_MNG_MEM_CTRL_REG);
+ MV_BOOL state = MV_TRUE;
+
+ switch (unitId)
+ {
+#if defined(MV_INCLUDE_PEX)
+ case PEX_UNIT_ID:
+ if ((reg & PMC_PEXSTOPMEM_MASK) == PMC_PEXSTOPMEM_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+
+ break;
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+ case ETH_GIG_UNIT_ID:
+ if ((reg & PMC_GESTOPMEM_MASK(index)) == PMC_GESTOPMEM_STOP(index))
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_SATA)
+ case SATA_UNIT_ID:
+ if ((reg & PMC_SATASTOPMEM_MASK(index)) == PMC_SATASTOPMEM_STOP(index))
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_CESA)
+ case CESA_UNIT_ID:
+ if ((reg & PMC_SESTOPMEM_MASK) == PMC_SESTOPMEM_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_USB)
+ case USB_UNIT_ID:
+ if ((reg & PMC_USBSTOPMEM_MASK) == PMC_USBSTOPMEM_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+ case AUDIO_UNIT_ID:
+ if ((reg & PMC_AUDIOSTOPMEM_MASK) == PMC_AUDIOSTOPMEM_STOP)
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+#if defined(MV_INCLUDE_XOR)
+ case XOR_UNIT_ID:
+ if ((reg & PMC_XORSTOPMEM_MASK(index)) == PMC_XORSTOPMEM_STOP(index))
+ {
+ state = MV_FALSE;
+ }
+ else state = MV_TRUE;
+ break;
+#endif
+
+ default:
+ state = MV_TRUE;
+ break;
+ }
+
+
+ return state;
+}
+#else
+MV_VOID mvCtrlPwrClckSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable) {return;}
+MV_BOOL mvCtrlPwrClckGet(MV_UNIT_ID unitId, MV_U32 index) {return MV_TRUE;}
+#endif /* #if defined(MV_INCLUDE_CLK_PWR_CNTRL) */
+
+
+/*******************************************************************************
+* mvMPPConfigToSPI - Change MPP[3:0] configuration to SPI mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+******************************************************************************/
+MV_VOID mvMPPConfigToSPI(MV_VOID)
+{
+ MV_U32 mppVal = 0;
+ MV_U32 bootVal = 0;
+
+ if(!mvCtrlIsBootFromSPIUseNAND())
+ return;
+ mppVal = 0x00002220; /* Set MPP [3:1] to SPI mode */
+ bootVal = MV_REG_READ(mvCtrlMppRegGet(0));
+ bootVal &= 0xffff000f;
+ mppVal |= bootVal;
+
+ MV_REG_WRITE(mvCtrlMppRegGet(0), mppVal);
+}
+
+
+/*******************************************************************************
+* mvMPPConfigToDefault - Change MPP[7:0] configuration to default configuration
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+******************************************************************************/
+MV_VOID mvMPPConfigToDefault(MV_VOID)
+{
+ MV_U32 mppVal = 0;
+ MV_U32 bootVal = 0;
+
+ if(!mvCtrlIsBootFromSPIUseNAND())
+ return;
+ mppVal = mvBoardMppGet(0);
+ bootVal = MV_REG_READ(mvCtrlMppRegGet(0));
+ mppVal &= ~0xffff000f;
+ bootVal &= 0xffff000f;
+ mppVal |= bootVal;
+
+ MV_REG_WRITE(mvCtrlMppRegGet(0), mppVal);
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h
new file mode 100644
index 000000000..6e2e8137a
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h
@@ -0,0 +1,185 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCtrlEnvLibh
+#define __INCmvCtrlEnvLibh
+
+/* includes */
+#include "mvSysHwConfig.h"
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "mvOs.h"
+#include "boardEnv/mvBoardEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/mvCtrlEnvRegs.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+
+/* typedefs */
+
+/* This enumerator describes the possible HW cache coherency policies the */
+/* controllers supports. */
+typedef enum _mvCachePolicy
+{
+ NO_COHERENCY, /* No HW cache coherency support */
+ WT_COHERENCY, /* HW cache coherency supported in Write Through policy */
+ WB_COHERENCY /* HW cache coherency supported in Write Back policy */
+}MV_CACHE_POLICY;
+
+
+/* The swapping is referred to a 64-bit words (as this is the controller */
+/* internal data path width). This enumerator describes the possible */
+/* data swap types. Below is an example of the data 0x0011223344556677 */
+typedef enum _mvSwapType
+{
+ MV_BYTE_SWAP, /* Byte Swap 77 66 55 44 33 22 11 00 */
+ MV_NO_SWAP, /* No swapping 00 11 22 33 44 55 66 77 */
+ MV_BYTE_WORD_SWAP, /* Both byte and word swap 33 22 11 00 77 66 55 44 */
+ MV_WORD_SWAP, /* Word swap 44 55 66 77 00 11 22 33 */
+ SWAP_TYPE_MAX /* Delimiter for this enumerator */
+}MV_SWAP_TYPE;
+
+/* This structure describes access rights for Access protection windows */
+/* that can be found in IDMA, XOR, Ethernet and MPSC units. */
+/* Note that the permission enumerator coresponds to its register format. */
+/* For example, Read only premission is presented as "1" in register field. */
+typedef enum _mvAccessRights
+{
+ NO_ACCESS_ALLOWED = 0, /* No access allowed */
+ READ_ONLY = 1, /* Read only permission */
+ ACC_RESERVED = 2, /* Reserved access right */
+ FULL_ACCESS = 3, /* Read and Write permission */
+ MAX_ACC_RIGHTS
+}MV_ACCESS_RIGHTS;
+
+
+/* mcspLib.h API list */
+
+MV_STATUS mvCtrlEnvInit(MV_VOID);
+MV_U32 mvCtrlMppRegGet(MV_U32 mppGroup);
+
+#if defined(MV_INCLUDE_PEX)
+MV_U32 mvCtrlPexMaxIfGet(MV_VOID);
+#else
+#define mvCtrlPexMaxIfGet() (0)
+#endif
+
+#define mvCtrlPciIfMaxIfGet() (0)
+
+#if defined(MV_INCLUDE_GIG_ETH)
+MV_U32 mvCtrlEthMaxPortGet(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_XOR)
+MV_U32 mvCtrlXorMaxChanGet(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_USB)
+MV_U32 mvCtrlUsbMaxGet(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_NAND)
+MV_U32 mvCtrlNandSupport(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_SDIO)
+MV_U32 mvCtrlSdioSupport(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_TS)
+MV_U32 mvCtrlTsSupport(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+MV_U32 mvCtrlAudioSupport(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_TDM)
+MV_U32 mvCtrlTdmSupport(MV_VOID);
+#endif
+
+MV_U16 mvCtrlModelGet(MV_VOID);
+MV_U8 mvCtrlRevGet(MV_VOID);
+MV_STATUS mvCtrlNameGet(char *pNameBuff);
+MV_U32 mvCtrlModelRevGet(MV_VOID);
+MV_STATUS mvCtrlModelRevNameGet(char *pNameBuff);
+MV_VOID mvCtrlAddrDecShow(MV_VOID);
+const MV_8* mvCtrlTargetNameGet(MV_TARGET target);
+MV_U32 ctrlSizeToReg(MV_U32 size, MV_U32 alignment);
+MV_U32 ctrlRegToSize(MV_U32 regSize, MV_U32 alignment);
+MV_U32 ctrlSizeRegRoundUp(MV_U32 size, MV_U32 alignment);
+MV_U32 mvCtrlSysRstLengthCounterGet(MV_VOID);
+MV_STATUS ctrlWinOverlapTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2);
+MV_STATUS ctrlWinWithinWinTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2);
+
+MV_VOID mvCtrlPwrClckSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable);
+MV_BOOL mvCtrlPwrClckGet(MV_UNIT_ID unitId, MV_U32 index);
+MV_VOID mvCtrlPwrMemSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable);
+MV_BOOL mvCtrlIsBootFromSPI(MV_VOID);
+MV_BOOL mvCtrlIsBootFromSPIUseNAND(MV_VOID);
+MV_BOOL mvCtrlIsBootFromNAND(MV_VOID);
+#if defined(MV_INCLUDE_CLK_PWR_CNTRL)
+MV_VOID mvCtrlPwrSaveOn(MV_VOID);
+MV_VOID mvCtrlPwrSaveOff(MV_VOID);
+#endif
+MV_BOOL mvCtrlPwrMemGet(MV_UNIT_ID unitId, MV_U32 index);
+MV_VOID mvMPPConfigToSPI(MV_VOID);
+MV_VOID mvMPPConfigToDefault(MV_VOID);
+
+
+#endif /* __INCmvCtrlEnvLibh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h
new file mode 100644
index 000000000..ae3f141a0
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h
@@ -0,0 +1,419 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvCtrlEnvRegsh
+#define __INCmvCtrlEnvRegsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* CV Support */
+#define PEX0_MEM0 PEX0_MEM
+#define PCI0_MEM0 PEX0_MEM
+
+/* Controller revision info */
+#define PCI_CLASS_CODE_AND_REVISION_ID 0x008
+#define PCCRIR_REVID_OFFS 0 /* Revision ID */
+#define PCCRIR_REVID_MASK (0xff << PCCRIR_REVID_OFFS)
+
+/* Controler environment registers offsets */
+
+/* Power Managment Control */
+#define POWER_MNG_MEM_CTRL_REG 0x20118
+
+#define PMC_GESTOPMEM_OFFS(port) ((port)? 13 : 0)
+#define PMC_GESTOPMEM_MASK(port) (1 << PMC_GESTOPMEM_OFFS(port))
+#define PMC_GESTOPMEM_EN(port) (0 << PMC_GESTOPMEM_OFFS(port))
+#define PMC_GESTOPMEM_STOP(port) (1 << PMC_GESTOPMEM_OFFS(port))
+
+#define PMC_PEXSTOPMEM_OFFS 1
+#define PMC_PEXSTOPMEM_MASK (1 << PMC_PEXSTOPMEM_OFFS)
+#define PMC_PEXSTOPMEM_EN (0 << PMC_PEXSTOPMEM_OFFS)
+#define PMC_PEXSTOPMEM_STOP (1 << PMC_PEXSTOPMEM_OFFS)
+
+#define PMC_USBSTOPMEM_OFFS 2
+#define PMC_USBSTOPMEM_MASK (1 << PMC_USBSTOPMEM_OFFS)
+#define PMC_USBSTOPMEM_EN (0 << PMC_USBSTOPMEM_OFFS)
+#define PMC_USBSTOPMEM_STOP (1 << PMC_USBSTOPMEM_OFFS)
+
+#define PMC_DUNITSTOPMEM_OFFS 3
+#define PMC_DUNITSTOPMEM_MASK (1 << PMC_DUNITSTOPMEM_OFFS)
+#define PMC_DUNITSTOPMEM_EN (0 << PMC_DUNITSTOPMEM_OFFS)
+#define PMC_DUNITSTOPMEM_STOP (1 << PMC_DUNITSTOPMEM_OFFS)
+
+#define PMC_RUNITSTOPMEM_OFFS 4
+#define PMC_RUNITSTOPMEM_MASK (1 << PMC_RUNITSTOPMEM_OFFS)
+#define PMC_RUNITSTOPMEM_EN (0 << PMC_RUNITSTOPMEM_OFFS)
+#define PMC_RUNITSTOPMEM_STOP (1 << PMC_RUNITSTOPMEM_OFFS)
+
+#define PMC_XORSTOPMEM_OFFS(port) (5+(port*2))
+#define PMC_XORSTOPMEM_MASK(port) (1 << PMC_XORSTOPMEM_OFFS(port))
+#define PMC_XORSTOPMEM_EN(port) (0 << PMC_XORSTOPMEM_OFFS(port))
+#define PMC_XORSTOPMEM_STOP(port) (1 << PMC_XORSTOPMEM_OFFS(port))
+
+#define PMC_SATASTOPMEM_OFFS(port) (6+(port*5))
+#define PMC_SATASTOPMEM_MASK(port) (1 << PMC_SATASTOPMEM_OFFS(port))
+#define PMC_SATASTOPMEM_EN(port) (0 << PMC_SATASTOPMEM_OFFS(port))
+#define PMC_SATASTOPMEM_STOP(port) (1 << PMC_SATASTOPMEM_OFFS(port))
+
+#define PMC_SESTOPMEM_OFFS 8
+#define PMC_SESTOPMEM_MASK (1 << PMC_SESTOPMEM_OFFS)
+#define PMC_SESTOPMEM_EN (0 << PMC_SESTOPMEM_OFFS)
+#define PMC_SESTOPMEM_STOP (1 << PMC_SESTOPMEM_OFFS)
+
+#define PMC_AUDIOSTOPMEM_OFFS 9
+#define PMC_AUDIOSTOPMEM_MASK (1 << PMC_AUDIOSTOPMEM_OFFS)
+#define PMC_AUDIOSTOPMEM_EN (0 << PMC_AUDIOSTOPMEM_OFFS)
+#define PMC_AUDIOSTOPMEM_STOP (1 << PMC_AUDIOSTOPMEM_OFFS)
+
+#define POWER_MNG_CTRL_REG 0x2011C
+
+#define PMC_GESTOPCLOCK_OFFS(port) ((port)? 19 : 0)
+#define PMC_GESTOPCLOCK_MASK(port) (1 << PMC_GESTOPCLOCK_OFFS(port))
+#define PMC_GESTOPCLOCK_EN(port) (1 << PMC_GESTOPCLOCK_OFFS(port))
+#define PMC_GESTOPCLOCK_STOP(port) (0 << PMC_GESTOPCLOCK_OFFS(port))
+
+#define PMC_PEXPHYSTOPCLOCK_OFFS 1
+#define PMC_PEXPHYSTOPCLOCK_MASK (1 << PMC_PEXPHYSTOPCLOCK_OFFS)
+#define PMC_PEXPHYSTOPCLOCK_EN (1 << PMC_PEXPHYSTOPCLOCK_OFFS)
+#define PMC_PEXPHYSTOPCLOCK_STOP (0 << PMC_PEXPHYSTOPCLOCK_OFFS)
+
+#define PMC_PEXSTOPCLOCK_OFFS 2
+#define PMC_PEXSTOPCLOCK_MASK (1 << PMC_PEXSTOPCLOCK_OFFS)
+#define PMC_PEXSTOPCLOCK_EN (1 << PMC_PEXSTOPCLOCK_OFFS)
+#define PMC_PEXSTOPCLOCK_STOP (0 << PMC_PEXSTOPCLOCK_OFFS)
+
+#define PMC_USBSTOPCLOCK_OFFS 3
+#define PMC_USBSTOPCLOCK_MASK (1 << PMC_USBSTOPCLOCK_OFFS)
+#define PMC_USBSTOPCLOCK_EN (1 << PMC_USBSTOPCLOCK_OFFS)
+#define PMC_USBSTOPCLOCK_STOP (0 << PMC_USBSTOPCLOCK_OFFS)
+
+#define PMC_SDIOSTOPCLOCK_OFFS 4
+#define PMC_SDIOSTOPCLOCK_MASK (1 << PMC_SDIOSTOPCLOCK_OFFS)
+#define PMC_SDIOSTOPCLOCK_EN (1 << PMC_SDIOSTOPCLOCK_OFFS)
+#define PMC_SDIOSTOPCLOCK_STOP (0 << PMC_SDIOSTOPCLOCK_OFFS)
+
+#define PMC_TSSTOPCLOCK_OFFS 5
+#define PMC_TSSTOPCLOCK_MASK (1 << PMC_TSSTOPCLOCK_OFFS)
+#define PMC_TSSTOPCLOCK_EN (1 << PMC_TSSTOPCLOCK_OFFS)
+#define PMC_TSSTOPCLOCK_STOP (0 << PMC_TSSTOPCLOCK_OFFS)
+
+#define PMC_AUDIOSTOPCLOCK_OFFS 9
+#define PMC_AUDIOSTOPCLOCK_MASK (1 << PMC_AUDIOSTOPCLOCK_OFFS)
+#define PMC_AUDIOSTOPCLOCK_EN (1 << PMC_AUDIOSTOPCLOCK_OFFS)
+#define PMC_AUDIOSTOPCLOCK_STOP (0 << PMC_AUDIOSTOPCLOCK_OFFS)
+
+#define PMC_POWERSAVE_OFFS 11
+#define PMC_POWERSAVE_MASK (1 << PMC_POWERSAVE_OFFS)
+#define PMC_POWERSAVE_EN (1 << PMC_POWERSAVE_OFFS)
+#define PMC_POWERSAVE_STOP (0 << PMC_POWERSAVE_OFFS)
+
+
+
+
+#define PMC_SATASTOPCLOCK_OFFS(port) (14+(port))
+#define PMC_SATASTOPCLOCK_MASK(port) (1 << PMC_SATASTOPCLOCK_OFFS(port))
+#define PMC_SATASTOPCLOCK_EN(port) (1 << PMC_SATASTOPCLOCK_OFFS(port))
+#define PMC_SATASTOPCLOCK_STOP(port) (0 << PMC_SATASTOPCLOCK_OFFS(port))
+
+#define PMC_SESTOPCLOCK_OFFS 17
+#define PMC_SESTOPCLOCK_MASK (1 << PMC_SESTOPCLOCK_OFFS)
+#define PMC_SESTOPCLOCK_EN (1 << PMC_SESTOPCLOCK_OFFS)
+#define PMC_SESTOPCLOCK_STOP (0 << PMC_SESTOPCLOCK_OFFS)
+
+#define PMC_TDMSTOPCLOCK_OFFS 20
+#define PMC_TDMSTOPCLOCK_MASK (1 << PMC_TDMSTOPCLOCK_OFFS)
+#define PMC_TDMSTOPCLOCK_EN (1 << PMC_TDMSTOPCLOCK_OFFS)
+#define PMC_TDMSTOPCLOCK_STOP (0 << PMC_TDMSTOPCLOCK_OFFS)
+
+
+/* Controler environment registers offsets */
+#define MPP_CONTROL_REG0 0x10000
+#define MPP_CONTROL_REG1 0x10004
+#define MPP_CONTROL_REG2 0x10008
+#define MPP_CONTROL_REG3 0x1000C
+#define MPP_CONTROL_REG4 0x10010
+#define MPP_CONTROL_REG5 0x10014
+#define MPP_CONTROL_REG6 0x10018
+#define MPP_SAMPLE_AT_RESET 0x10030
+#define CHIP_BOND_REG 0x10034
+#define SYSRST_LENGTH_COUNTER_REG 0x10050
+#define SLCR_COUNT_OFFS 0
+#define SLCR_COUNT_MASK (0x1FFFFFFF << SLCR_COUNT_OFFS)
+#define SLCR_CLR_OFFS 31
+#define SLCR_CLR_MASK (1 << SLCR_CLR_OFFS)
+#define PCKG_OPT_MASK 0x3
+#define MPP_OUTPUT_DRIVE_REG 0x100E0
+#define MPP_RGMII0_OUTPUT_DRIVE_OFFS 7
+#define MPP_3_3_RGMII0_OUTPUT_DRIVE (0x0 << MPP_RGMII0_OUTPUT_DRIVE_OFFS)
+#define MPP_1_8_RGMII0_OUTPUT_DRIVE (0x1 << MPP_RGMII0_OUTPUT_DRIVE_OFFS)
+#define MPP_RGMII1_OUTPUT_DRIVE_OFFS 15
+#define MPP_3_3_RGMII1_OUTPUT_DRIVE (0x0 << MPP_RGMII1_OUTPUT_DRIVE_OFFS)
+#define MPP_1_8_RGMII1_OUTPUT_DRIVE (0x1 << MPP_RGMII1_OUTPUT_DRIVE_OFFS)
+
+#define MSAR_BOOT_MODE_OFFS 12
+#define MSAR_BOOT_MODE_MASK (0x7 << MSAR_BOOT_MODE_OFFS)
+#define MSAR_BOOT_NAND_WITH_BOOTROM (0x5 << MSAR_BOOT_MODE_OFFS)
+#define MSAR_BOOT_SPI_WITH_BOOTROM (0x4 << MSAR_BOOT_MODE_OFFS)
+#define MSAR_BOOT_SPI_USE_NAND_WITH_BOOTROM (0x2 << MSAR_BOOT_MODE_OFFS)
+
+#define MSAR_BOOT_MODE_6180(X) (((X & 0x3000) >> 12) | \
+ ((X & 0x2) << 1))
+#define MSAR_BOOT_SPI_WITH_BOOTROM_6180 0x1
+#define MSAR_BOOT_NAND_WITH_BOOTROM_6180 0x5
+
+#define MSAR_TCLCK_OFFS 21
+#define MSAR_TCLCK_MASK (0x1 << MSAR_TCLCK_OFFS)
+#define MSAR_TCLCK_166 (0x1 << MSAR_TCLCK_OFFS)
+#define MSAR_TCLCK_200 (0x0 << MSAR_TCLCK_OFFS)
+
+
+#define MSAR_CPUCLCK_EXTRACT(X) (((X & 0x2) >> 1) | ((X & 0x400000) >> 21) | \
+ ((X & 0x18) >> 1))
+
+#define MSAR_CPUCLCK_OFFS_6180 2
+#define MSAR_CPUCLCK_MASK_6180 (0x7 << MSAR_CPUCLCK_OFFS_6180)
+
+#define MSAR_DDRCLCK_RTIO_OFFS 5
+#define MSAR_DDRCLCK_RTIO_MASK (0xF << MSAR_DDRCLCK_RTIO_OFFS)
+
+#define MSAR_L2CLCK_EXTRACT(X) (((X & 0x600) >> 9) | ((X & 0x80000) >> 17))
+
+#ifndef MV_ASMLANGUAGE
+/* CPU clock for 6281,6192 0->Resereved */
+#define MV_CPU_CLCK_TBL { 0, 0, 0, 0, \
+ 600000000, 0, 800000000, 1000000000, \
+ 0, 1200000000, 0, 0, \
+ 1500000000, 0, 0, 0}
+
+/* DDR clock RATIO for 6281,6192 {0,0}->Reserved */
+#define MV_DDR_CLCK_RTIO_TBL {\
+ {0, 0}, {0, 0}, {2, 1}, {0, 0}, \
+ {3, 1}, {0, 0}, {4, 1}, {9, 2}, \
+ {5, 1}, {6, 1}, {0, 0}, {0, 0}, \
+ {0, 0}, {0, 0}, {0, 0}, {0, 0} \
+}
+
+/* L2 clock RATIO for 6281,6192 {1,1}->Reserved */
+#define MV_L2_CLCK_RTIO_TBL {\
+ {0, 0}, {2, 1}, {0, 0}, {3, 1}, \
+ {0, 0}, {0, 0}, {0, 0}, {0, 0} \
+}
+
+/* 6180 have different clk reset sampling */
+/* ARM CPU, DDR, L2 clock for 6180 {0,0,0}->Reserved */
+#define MV_CPU6180_DDR_L2_CLCK_TBL { \
+ {0, 0, 0 },\
+ {0, 0, 0 },\
+ {0, 0, 0 },\
+ {0, 0, 0 },\
+ {0, 0, 0 },\
+ {600000000, 200000000, 300000000 },\
+ {800000000, 200000000, 400000000 },\
+ {0, 0, 0 }\
+}
+
+
+
+/* These macros help units to identify a target Mbus Arbiter group */
+#define MV_TARGET_IS_DRAM(target) \
+ ((target >= SDRAM_CS0) && (target <= SDRAM_CS3))
+
+#define MV_TARGET_IS_PEX0(target) \
+ ((target >= PEX0_MEM) && (target <= PEX0_IO))
+
+#define MV_TARGET_IS_PEX1(target) 0
+
+#define MV_TARGET_IS_PEX(target) (MV_TARGET_IS_PEX0(target) || MV_TARGET_IS_PEX1(target))
+
+#define MV_TARGET_IS_DEVICE(target) \
+ ((target >= DEVICE_CS0) && (target <= DEVICE_CS3))
+
+#define MV_PCI_DRAM_BAR_TO_DRAM_TARGET(bar) 0
+
+#define MV_TARGET_IS_AS_BOOT(target) ((target) == (sampleAtResetTargetArray[ \
+ (mvCtrlModelGet() == MV_6180_DEV_ID)? MSAR_BOOT_MODE_6180 \
+ (MV_REG_READ(MPP_SAMPLE_AT_RESET)):((MV_REG_READ(MPP_SAMPLE_AT_RESET)\
+ & MSAR_BOOT_MODE_MASK) >> MSAR_BOOT_MODE_OFFS)]))
+
+
+#define MV_CHANGE_BOOT_CS(target) (((target) == DEV_BOOCS)?\
+ sampleAtResetTargetArray[(mvCtrlModelGet() == MV_6180_DEV_ID)? \
+ MSAR_BOOT_MODE_6180(MV_REG_READ(MPP_SAMPLE_AT_RESET)): \
+ ((MV_REG_READ(MPP_SAMPLE_AT_RESET) & MSAR_BOOT_MODE_MASK)\
+ >> MSAR_BOOT_MODE_OFFS)]:(target))
+
+#define TCLK_TO_COUNTER_RATIO 1 /* counters running in Tclk */
+
+#define BOOT_TARGETS_NAME_ARRAY { \
+ TBL_TERM, \
+ TBL_TERM, \
+ BOOT_ROM_CS, \
+ TBL_TERM, \
+ BOOT_ROM_CS, \
+ BOOT_ROM_CS, \
+ TBL_TERM, \
+ TBL_TERM \
+}
+
+#define BOOT_TARGETS_NAME_ARRAY_6180 { \
+ TBL_TERM, \
+ BOOT_ROM_CS, \
+ TBL_TERM, \
+ TBL_TERM, \
+ TBL_TERM, \
+ BOOT_ROM_CS, \
+ TBL_TERM, \
+ TBL_TERM \
+}
+
+
+/* For old competability */
+#define DEVICE_CS0 NFLASH_CS
+#define DEVICE_CS1 SPI_CS
+#define DEVICE_CS2 BOOT_ROM_CS
+#define DEVICE_CS3 DEV_BOOCS
+#define MV_BOOTDEVICE_INDEX 0
+
+#define START_DEV_CS DEV_CS0
+#define DEV_TO_TARGET(dev) ((dev) + DEVICE_CS0)
+
+#define PCI_IF0_MEM0 PEX0_MEM
+#define PCI_IF0_IO PEX0_IO
+
+
+/* This enumerator defines the Marvell controller target ID */
+typedef enum _mvTargetId
+{
+ DRAM_TARGET_ID = 0 , /* Port 0 -> DRAM interface */
+ DEV_TARGET_ID = 1, /* Port 1 -> Nand/SPI */
+ PEX0_TARGET_ID = 4 , /* Port 4 -> PCI Express0 */
+ CRYPT_TARGET_ID = 3 , /* Port 3 --> Crypto Engine */
+ SAGE_TARGET_ID = 12 , /* Port 12 -> SAGE Unit */
+ MAX_TARGETS_ID
+}MV_TARGET_ID;
+
+
+/* This enumerator described the possible Controller paripheral targets. */
+/* Controller peripherals are designated memory/IO address spaces that the */
+/* controller can access. They are also refered as "targets" */
+typedef enum _mvTarget
+{
+ TBL_TERM = -1, /* none valid target, used as targets list terminator*/
+ SDRAM_CS0, /* SDRAM chip select 0 */
+ SDRAM_CS1, /* SDRAM chip select 1 */
+ SDRAM_CS2, /* SDRAM chip select 2 */
+ SDRAM_CS3, /* SDRAM chip select 3 */
+ PEX0_MEM, /* PCI Express 0 Memory */
+ PEX0_IO, /* PCI Express 0 IO */
+ INTER_REGS, /* Internal registers */
+ NFLASH_CS, /* NFLASH_CS */
+ SPI_CS, /* SPI_CS */
+ BOOT_ROM_CS, /* BOOT_ROM_CS */
+ DEV_BOOCS, /* DEV_BOOCS */
+ CRYPT_ENG, /* Crypto Engine */
+#ifdef MV_INCLUDE_SAGE
+ SAGE_UNIT, /* SAGE Unit */
+#endif
+ MAX_TARGETS
+
+}MV_TARGET;
+
+#define TARGETS_DEF_ARRAY { \
+ {0x0E, DRAM_TARGET_ID }, /* SDRAM_CS0 */ \
+ {0x0D, DRAM_TARGET_ID }, /* SDRAM_CS1 */ \
+ {0x0B, DRAM_TARGET_ID }, /* SDRAM_CS0 */ \
+ {0x07, DRAM_TARGET_ID }, /* SDRAM_CS1 */ \
+ {0xE8, PEX0_TARGET_ID }, /* PEX0_MEM */ \
+ {0xE0, PEX0_TARGET_ID }, /* PEX0_IO */ \
+ {0xFF, 0xFF }, /* INTER_REGS */ \
+ {0x2F, DEV_TARGET_ID }, /* NFLASH_CS */ \
+ {0x1E, DEV_TARGET_ID }, /* SPI_CS */ \
+ {0x1D, DEV_TARGET_ID }, /* BOOT_ROM_CS */ \
+ {0x1E, DEV_TARGET_ID }, /* DEV_BOOCS */ \
+ {0x01, CRYPT_TARGET_ID}, /* CRYPT_ENG */ \
+ {0x00, SAGE_TARGET_ID } \
+}
+
+
+#define TARGETS_NAME_ARRAY { \
+ "SDRAM_CS0", /* SDRAM_CS0 */ \
+ "SDRAM_CS1", /* SDRAM_CS1 */ \
+ "SDRAM_CS2", /* SDRAM_CS2 */ \
+ "SDRAM_CS3", /* SDRAM_CS3 */ \
+ "PEX0_MEM", /* PEX0_MEM */ \
+ "PEX0_IO", /* PEX0_IO */ \
+ "INTER_REGS", /* INTER_REGS */ \
+ "NFLASH_CS", /* NFLASH_CS */ \
+ "SPI_CS", /* SPI_CS */ \
+ "BOOT_ROM_CS", /* BOOT_ROM_CS */ \
+ "DEV_BOOTCS", /* DEV_BOOCS */ \
+ "CRYPT_ENG", /* CRYPT_ENG */ \
+ "SAGE_UNIT" /* SAGE_UNIT */ \
+}
+#endif /* MV_ASMLANGUAGE */
+
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h
new file mode 100644
index 000000000..e41d80a42
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h
@@ -0,0 +1,257 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvCtrlEnvSpech
+#define __INCmvCtrlEnvSpech
+
+#include "mvDeviceId.h"
+#include "mvSysHwConfig.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define MV_ARM_SOC
+#define SOC_NAME_PREFIX "MV88F"
+
+
+/* units base and port numbers */
+#ifdef MV_ASMLANGUAGE
+#define XOR_UNIT_BASE(unit) 0x60800
+#else
+#define MV_XOR_REG_BASE 0x60000
+#define XOR_UNIT_BASE(unit) ((unit)? 0x60900:0x60800)
+#endif
+
+#define TDM_REG_BASE 0xD0000
+#define USB_REG_BASE(dev) 0x50000
+#define AUDIO_REG_BASE 0xA0000
+#define SATA_REG_BASE 0x80000
+#define MV_CESA_REG_BASE 0x3D000
+#define MV_CESA_TDMA_REG_BASE 0x30000
+#define MV_SDIO_REG_BASE 0x90000
+#define MV_ETH_REG_BASE(port) (((port) == 0) ? 0x72000 : 0x76000)
+#define MV_UART_CHAN_BASE(chanNum) (0x12000 + (chanNum * 0x100))
+#define DRAM_BASE 0x0
+#define CNTMR_BASE 0x20300
+#define TWSI_SLAVE_BASE(chanNum) 0x11000
+#define PEX_IF_BASE(pexIf) 0x40000
+#define MPP_REG_BASE 0x10000
+#define TSU_GLOBAL_REG_BASE 0xB4000
+#define MAX_AHB_TO_MBUS_REG_BASE 0x20000
+
+#define INTER_REGS_SIZE _1M
+/* This define describes the TWSI interrupt bit and location */
+#define TWSI_CPU_MAIN_INT_CAUSE_REG 0x20200
+#define TWSI0_CPU_MAIN_INT_BIT (1<<29)
+#define TWSI_SPEED 100000
+
+#define MV_GPP_MAX_GROUP 2
+#define MV_CNTMR_MAX_COUNTER 2
+#define MV_UART_MAX_CHAN 2
+#define MV_XOR_MAX_UNIT 2
+#define MV_XOR_MAX_CHAN 4 /* total channels for all units together*/
+#define MV_XOR_MAX_CHAN_PER_UNIT 2 /* channels for units */
+#define MV_SATA_MAX_CHAN 2
+
+#define MV_6281_MPP_MAX_MODULE 2
+#define MV_6192_MPP_MAX_MODULE 1
+#define MV_6190_MPP_MAX_MODULE 1
+#define MV_6180_MPP_MAX_MODULE 2
+#define MV_6281_MPP_MAX_GROUP 7
+#define MV_6192_MPP_MAX_GROUP 4
+#define MV_6190_MPP_MAX_GROUP 4
+#define MV_6180_MPP_MAX_GROUP 3
+
+#define MV_DRAM_MAX_CS 4
+
+/* This define describes the maximum number of supported PCI\PCIX Interfaces*/
+#define MV_PCI_MAX_IF 0
+#define MV_PCI_START_IF 0
+
+/* This define describes the maximum number of supported PEX Interfaces */
+#define MV_INCLUDE_PEX0
+#define MV_DISABLE_PEX_DEVICE_BAR
+#define MV_PEX_MAX_IF 1
+#define MV_PEX_START_IF MV_PCI_MAX_IF
+
+/* This define describes the maximum number of supported PCI Interfaces */
+#define MV_PCI_IF_MAX_IF (MV_PEX_MAX_IF+MV_PCI_MAX_IF)
+
+#define MV_ETH_MAX_PORTS 2
+#define MV_6281_ETH_MAX_PORTS 2
+#define MV_6192_ETH_MAX_PORTS 2
+#define MV_6190_ETH_MAX_PORTS 1
+#define MV_6180_ETH_MAX_PORTS 1
+
+#define MV_IDMA_MAX_CHAN 0
+
+#define MV_USB_MAX_PORTS 1
+
+#define MV_USB_VERSION 1
+
+
+#define MV_6281_NAND 1
+#define MV_6192_NAND 1
+#define MV_6190_NAND 1
+#define MV_6180_NAND 0
+
+#define MV_6281_SDIO 1
+#define MV_6192_SDIO 1
+#define MV_6190_SDIO 1
+#define MV_6180_SDIO 1
+
+#define MV_6281_TS 1
+#define MV_6192_TS 1
+#define MV_6190_TS 0
+#define MV_6180_TS 0
+
+#define MV_6281_AUDIO 1
+#define MV_6192_AUDIO 1
+#define MV_6190_AUDIO 0
+#define MV_6180_AUDIO 1
+
+#define MV_6281_TDM 1
+#define MV_6192_TDM 1
+#define MV_6190_TDM 0
+#define MV_6180_TDM 0
+
+#define MV_DEVICE_MAX_CS 4
+
+/* Others */
+#define PEX_HOST_BUS_NUM(pciIf) (pciIf)
+#define PEX_HOST_DEV_NUM(pciIf) 0
+
+#define PCI_IO(pciIf) (PEX0_IO)
+#define PCI_MEM(pciIf, memNum) (PEX0_MEM0)
+/* CESA version #2: One channel, 2KB SRAM, TDMA */
+#if defined(MV_CESA_CHAIN_MODE_SUPPORT)
+ #define MV_CESA_VERSION 3
+#else
+#define MV_CESA_VERSION 2
+#endif
+#define MV_CESA_SRAM_SIZE 2*1024
+/* This define describes the maximum number of supported Ethernet ports */
+#define MV_ETH_VERSION 4
+#define MV_ETH_MAX_RXQ 8
+#define MV_ETH_MAX_TXQ 8
+#define MV_ETH_PORT_SGMII { MV_FALSE, MV_FALSE }
+/* This define describes the the support of USB */
+#define MV_USB_VERSION 1
+
+#define MV_INCLUDE_SDRAM_CS0
+#define MV_INCLUDE_SDRAM_CS1
+#define MV_INCLUDE_SDRAM_CS2
+#define MV_INCLUDE_SDRAM_CS3
+
+#define MV_INCLUDE_DEVICE_CS0
+#define MV_INCLUDE_DEVICE_CS1
+#define MV_INCLUDE_DEVICE_CS2
+#define MV_INCLUDE_DEVICE_CS3
+
+#define MPP_GROUP_1_TYPE {\
+ {0, 0, 0}, /* Reserved for AUTO */ \
+ {0x22220000, 0x22222222, 0x2222}, /* TDM */ \
+ {0x44440000, 0x00044444, 0x0000}, /* AUDIO */ \
+ {0x33330000, 0x33003333, 0x0033}, /* RGMII */ \
+ {0x33330000, 0x03333333, 0x0033}, /* GMII */ \
+ {0x11110000, 0x11111111, 0x0001}, /* TS */ \
+ {0x33330000, 0x33333333, 0x3333} /* MII */ \
+}
+
+#define MPP_GROUP_2_TYPE {\
+ {0, 0, 0}, /* Reserved for AUTO */ \
+ {0x22220000, 0x22222222, 0x22}, /* TDM */ \
+ {0x44440000, 0x00044444, 0x0}, /* AUDIO */ \
+ {0, 0, 0}, /* N_A */ \
+ {0, 0, 0}, /* N_A */ \
+ {0x11110000, 0x11111111, 0x01} /* TS */ \
+}
+
+#ifndef MV_ASMLANGUAGE
+
+/* This enumerator defines the Marvell Units ID */
+typedef enum _mvUnitId
+{
+ DRAM_UNIT_ID,
+ PEX_UNIT_ID,
+ ETH_GIG_UNIT_ID,
+ USB_UNIT_ID,
+ IDMA_UNIT_ID,
+ XOR_UNIT_ID,
+ SATA_UNIT_ID,
+ TDM_UNIT_ID,
+ UART_UNIT_ID,
+ CESA_UNIT_ID,
+ SPI_UNIT_ID,
+ AUDIO_UNIT_ID,
+ SDIO_UNIT_ID,
+ TS_UNIT_ID,
+ MAX_UNITS_ID
+
+}MV_UNIT_ID;
+
+#endif
+
+#endif /* __INCmvCtrlEnvSpech */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c
new file mode 100644
index 000000000..d21bb074d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c
@@ -0,0 +1,1048 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "ctrlEnv/sys/mvAhbToMbus.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+#undef MV_DEBUG
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+/* typedefs */
+
+
+/* CPU address remap registers offsets are inconsecutive. This struct */
+/* describes address remap register offsets */
+typedef struct _ahbToMbusRemapRegOffs
+{
+ MV_U32 lowRegOffs; /* Low 32-bit remap register offset */
+ MV_U32 highRegOffs; /* High 32 bit remap register offset */
+}AHB_TO_MBUS_REMAP_REG_OFFS;
+
+/* locals */
+static MV_STATUS ahbToMbusRemapRegOffsGet (MV_U32 winNum,
+ AHB_TO_MBUS_REMAP_REG_OFFS *pRemapRegs);
+
+/*******************************************************************************
+* mvAhbToMbusInit - Initialize Ahb To Mbus Address Map !
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK laways.
+*
+*******************************************************************************/
+MV_STATUS mvAhbToMbusInit(void)
+{
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvAhbToMbusWinSet - Set CPU-to-peripheral winNum address window
+*
+* DESCRIPTION:
+* This function sets
+* address window, also known as address decode window.
+* A new address decode window is set for specified winNum address window.
+* If address decode window parameter structure enables the window,
+* the routine will also enable the winNum window, allowing CPU to access
+* the winNum window.
+*
+* INPUT:
+* winNum - Windows number.
+* pAddrDecWin - CPU winNum window data structure.
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_OK if CPU winNum window was set correctly, MV_ERROR in case of
+* address window overlapps with other active CPU winNum window or
+* trying to assign 36bit base address while CPU does not support that.
+* The function returns MV_NOT_SUPPORTED, if the winNum is unsupported.
+*
+*******************************************************************************/
+MV_STATUS mvAhbToMbusWinSet(MV_U32 winNum, MV_AHB_TO_MBUS_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttribs;
+ MV_DEC_REGS decRegs;
+
+ /* Parameter checking */
+ if (winNum >= MAX_AHB_TO_MBUS_WINS)
+ {
+ mvOsPrintf("mvAhbToMbusWinSet: ERR. Invalid winNum %d\n", winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+
+ /* read base register*/
+ if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ decRegs.baseReg = MV_REG_READ(AHB_TO_MBUS_WIN_BASE_REG(winNum));
+ }
+ else
+ {
+ decRegs.baseReg = MV_REG_READ(AHB_TO_MBUS_WIN_INTEREG_REG);
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvAhbToMbusWinSet:Error setting AHB to MBUS window %d to "\
+ "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum,
+ mvCtrlTargetNameGet(pAddrDecWin->target),
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ /* read control register*/
+ if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ decRegs.sizeReg = MV_REG_READ(AHB_TO_MBUS_WIN_CTRL_REG(winNum));
+ }
+
+ if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+ {
+ mvOsPrintf("mvAhbToMbusWinSet:mvCtrlAddrDecToReg Failed\n");
+ return MV_ERROR;
+ }
+
+ /* enable\Disable */
+ if (MV_TRUE == pAddrDecWin->enable)
+ {
+ decRegs.sizeReg |= ATMWCR_WIN_ENABLE;
+ }
+ else
+ {
+ decRegs.sizeReg &= ~ATMWCR_WIN_ENABLE;
+ }
+
+ mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+ /* set attributes */
+ decRegs.sizeReg &= ~ATMWCR_WIN_ATTR_MASK;
+ decRegs.sizeReg |= targetAttribs.attrib << ATMWCR_WIN_ATTR_OFFS;
+ /* set target ID */
+ decRegs.sizeReg &= ~ATMWCR_WIN_TARGET_MASK;
+ decRegs.sizeReg |= targetAttribs.targetId << ATMWCR_WIN_TARGET_OFFS;
+
+#if !defined(MV_RUN_FROM_FLASH)
+ /* To be on the safe side we disable the window before writing the */
+ /* new values. */
+ if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ mvAhbToMbusWinEnable(winNum,MV_FALSE);
+ }
+#endif
+
+ /* 3) Write to address decode Base Address Register */
+ if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_BASE_REG(winNum), decRegs.baseReg);
+ }
+ else
+ {
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_INTEREG_REG, decRegs.baseReg);
+ }
+
+
+ /* Internal register space have no size */
+ /* register. Do not perform size register assigment for those targets */
+ if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ /* Write to address decode Size Register */
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum), decRegs.sizeReg);
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvAhbToMbusWinGet - Get CPU-to-peripheral winNum address window
+*
+* DESCRIPTION:
+* Get the CPU peripheral winNum address window.
+*
+* INPUT:
+* winNum - Peripheral winNum enumerator
+*
+* OUTPUT:
+* pAddrDecWin - CPU winNum window information data structure.
+*
+* RETURN:
+* MV_OK if winNum exist, MV_ERROR otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvAhbToMbusWinGet(MV_U32 winNum, MV_AHB_TO_MBUS_DEC_WIN *pAddrDecWin)
+{
+ MV_DEC_REGS decRegs;
+ MV_TARGET_ATTRIB targetAttrib;
+
+
+ /* Parameter checking */
+ if (winNum >= MAX_AHB_TO_MBUS_WINS)
+ {
+ mvOsPrintf("mvAhbToMbusWinGet: ERR. Invalid winNum %d\n", winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+
+ /* Internal register space size have no size register*/
+ if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ decRegs.sizeReg = MV_REG_READ(AHB_TO_MBUS_WIN_CTRL_REG(winNum));
+ }
+ else
+ {
+ decRegs.sizeReg = 0;
+ }
+
+
+ /* Read base and size */
+ if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ decRegs.baseReg = MV_REG_READ(AHB_TO_MBUS_WIN_BASE_REG(winNum));
+ }
+ else
+ {
+ decRegs.baseReg = MV_REG_READ(AHB_TO_MBUS_WIN_INTEREG_REG);
+ }
+
+
+
+ if (MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+ {
+ mvOsPrintf("mvAhbToMbusWinGet: mvCtrlRegToAddrDec Failed \n");
+ return MV_ERROR;
+ }
+
+ if (winNum == MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ pAddrDecWin->addrWin.size = INTER_REGS_SIZE;
+ pAddrDecWin->target = INTER_REGS;
+ pAddrDecWin->enable = MV_TRUE;
+
+ return MV_OK;
+ }
+
+
+ if (decRegs.sizeReg & ATMWCR_WIN_ENABLE)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+
+ }
+
+
+
+ if (-1 == pAddrDecWin->addrWin.size)
+ {
+ return MV_ERROR;
+ }
+
+ /* attrib and targetId */
+ targetAttrib.attrib = (decRegs.sizeReg & ATMWCR_WIN_ATTR_MASK) >>
+ ATMWCR_WIN_ATTR_OFFS;
+ targetAttrib.targetId = (decRegs.sizeReg & ATMWCR_WIN_TARGET_MASK) >>
+ ATMWCR_WIN_TARGET_OFFS;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvAhbToMbusWinTargetGet - Get Window number associated with target
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_U32 mvAhbToMbusWinTargetGet(MV_TARGET target)
+{
+ MV_AHB_TO_MBUS_DEC_WIN decWin;
+ MV_U32 winNum;
+
+ /* Check parameters */
+ if (target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetGet: target %d is Illigal\n", target);
+ return 0xffffffff;
+ }
+
+ if (INTER_REGS == target)
+ {
+ return MV_AHB_TO_MBUS_INTREG_WIN;
+ }
+
+ for (winNum = 0; winNum < MAX_AHB_TO_MBUS_WINS ; winNum++)
+ {
+ if (winNum == MV_AHB_TO_MBUS_INTREG_WIN)
+ continue;
+
+ if (mvAhbToMbusWinGet(winNum,&decWin) != MV_OK)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetGet: mvAhbToMbusWinGet fail\n");
+ return 0xffffffff;
+
+ }
+
+ if (decWin.enable == MV_TRUE)
+ {
+ if (decWin.target == target)
+ {
+ return winNum;
+ }
+
+ }
+
+ }
+
+ return 0xFFFFFFFF;
+
+
+}
+
+/*******************************************************************************
+* mvAhbToMbusWinAvailGet - Get First Available window number.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_U32 mvAhbToMbusWinAvailGet(MV_VOID)
+{
+ MV_AHB_TO_MBUS_DEC_WIN decWin;
+ MV_U32 winNum;
+
+ for (winNum = 0; winNum < MAX_AHB_TO_MBUS_WINS ; winNum++)
+ {
+ if (winNum == MV_AHB_TO_MBUS_INTREG_WIN)
+ continue;
+
+ if (mvAhbToMbusWinGet(winNum,&decWin) != MV_OK)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetGet: mvAhbToMbusWinGet fail\n");
+ return 0xffffffff;
+
+ }
+
+ if (decWin.enable == MV_FALSE)
+ {
+ return winNum;
+ }
+
+ }
+
+ return 0xFFFFFFFF;
+}
+
+
+/*******************************************************************************
+* mvAhbToMbusWinEnable - Enable/disable a CPU address decode window
+*
+* DESCRIPTION:
+* This function enable/disable a CPU address decode window.
+* if parameter 'enable' == MV_TRUE the routine will enable the
+* window, thus enabling CPU accesses (before enabling the window it is
+* tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+* winNum - Peripheral winNum enumerator.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_ERROR if protection window number was wrong, or the window
+* overlapps other winNum window.
+*
+*******************************************************************************/
+MV_STATUS mvAhbToMbusWinEnable(MV_U32 winNum, MV_BOOL enable)
+{
+
+ /* Parameter checking */
+ if (winNum >= MAX_AHB_TO_MBUS_WINS)
+ {
+ mvOsPrintf("mvAhbToMbusWinEnable: ERR. Invalid winNum %d\n", winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ /* Internal registers bar can't be disable or enabled */
+ if (winNum == MV_AHB_TO_MBUS_INTREG_WIN)
+ {
+ return (enable ? MV_OK : MV_ERROR);
+ }
+
+ if (enable == MV_TRUE)
+ {
+ /* enable the window */
+ MV_REG_BIT_SET(AHB_TO_MBUS_WIN_CTRL_REG(winNum), ATMWCR_WIN_ENABLE);
+ }
+ else
+ { /* Disable address decode winNum window */
+ MV_REG_BIT_RESET(AHB_TO_MBUS_WIN_CTRL_REG(winNum), ATMWCR_WIN_ENABLE);
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvAhbToMbusWinRemap - Set CPU remap register for address windows.
+*
+* DESCRIPTION:
+* After a CPU address hits one of PCI address decode windows there is an
+* option to remap the address to a different one. For example, CPU
+* executes a read from PCI winNum window address 0x1200.0000. This
+* can be modified so the address on the PCI bus would be 0x1400.0000
+* Using the PCI address remap mechanism.
+*
+* INPUT:
+* winNum - Peripheral winNum enumerator. Must be a PCI winNum.
+* pAddrDecWin - CPU winNum window information data structure.
+* Note that caller has to fill in the base field only. The
+* size field is ignored.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if winNum is not a PCI one, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_U32 mvAhbToMbusWinRemap(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 baseAddr;
+ AHB_TO_MBUS_REMAP_REG_OFFS remapRegOffs;
+
+ MV_U32 effectiveBaseAddress=0,
+ baseAddrValue=0,windowSizeValue=0;
+
+
+ /* Get registers offsets of given winNum */
+ if (MV_NO_SUCH == ahbToMbusRemapRegOffsGet(winNum, &remapRegOffs))
+ {
+ return 0xffffffff;
+ }
+
+ /* 1) Set address remap low */
+ baseAddr = pAddrWin->baseLow;
+
+ /* Check base address aligment */
+ /*
+ if (MV_IS_NOT_ALIGN(baseAddr, ATMWRLR_REMAP_LOW_ALIGNMENT))
+ {
+ mvOsPrintf("mvAhbToMbusPciRemap: Warning. Target base 0x%x unaligned\n",
+ baseAddr);
+ return MV_ERROR;
+ }
+ */
+
+ /* BaseLow[31:16] => base register [31:16] */
+ baseAddr = baseAddr & ATMWRLR_REMAP_LOW_MASK;
+
+ MV_REG_WRITE(remapRegOffs.lowRegOffs, baseAddr);
+
+ MV_REG_WRITE(remapRegOffs.highRegOffs, pAddrWin->baseHigh);
+
+
+ baseAddrValue = MV_REG_READ(AHB_TO_MBUS_WIN_BASE_REG(winNum));
+ windowSizeValue = MV_REG_READ(AHB_TO_MBUS_WIN_CTRL_REG(winNum));
+
+ baseAddrValue &= ATMWBR_BASE_MASK;
+ windowSizeValue &=ATMWCR_WIN_SIZE_MASK;
+
+ /* Start calculating the effective Base Address */
+ effectiveBaseAddress = baseAddrValue ;
+
+ /* The effective base address will be combined from the chopped (if any)
+ remap value (according to the size value and remap mechanism) and the
+ window's base address */
+ effectiveBaseAddress |= (((windowSizeValue) | 0xffff) & pAddrWin->baseLow);
+ /* If the effectiveBaseAddress exceed the window boundaries return an
+ invalid value. */
+
+ if (effectiveBaseAddress > (baseAddrValue + (windowSizeValue | 0xffff)))
+ {
+ mvOsPrintf("mvAhbToMbusPciRemap: Error\n");
+ return 0xffffffff;
+ }
+
+ return effectiveBaseAddress;
+
+
+}
+/*******************************************************************************
+* mvAhbToMbusWinTargetSwap - Swap AhbToMbus windows between targets
+*
+* DESCRIPTION:
+*
+* INPUT:
+* target1 - CPU Interface target 1
+* target2 - CPU Interface target 2
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if targets are illigal, or if one of the targets is not
+* associated to a valid window .
+* MV_OK otherwise.
+*
+*******************************************************************************/
+
+
+MV_STATUS mvAhbToMbusWinTargetSwap(MV_TARGET target1,MV_TARGET target2)
+{
+ MV_U32 winNum1,winNum2;
+ MV_AHB_TO_MBUS_DEC_WIN winDec1,winDec2,winDecTemp;
+ AHB_TO_MBUS_REMAP_REG_OFFS remapRegs1,remapRegs2;
+ MV_U32 remapBaseLow1=0,remapBaseLow2=0;
+ MV_U32 remapBaseHigh1=0,remapBaseHigh2=0;
+
+
+ /* Check parameters */
+ if (target1 >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetSwap: target %d is Illigal\n", target1);
+ return MV_ERROR;
+ }
+
+ if (target2 >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetSwap: target %d is Illigal\n", target1);
+ return MV_ERROR;
+ }
+
+
+ /* get window associated with this target */
+ winNum1 = mvAhbToMbusWinTargetGet(target1);
+
+ if (winNum1 == 0xffffffff)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetSwap: target %d has illigal win %d\n",
+ target1,winNum1);
+ return MV_ERROR;
+
+ }
+
+ /* get window associated with this target */
+ winNum2 = mvAhbToMbusWinTargetGet(target2);
+
+ if (winNum2 == 0xffffffff)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetSwap: target %d has illigal win %d\n",
+ target2,winNum2);
+ return MV_ERROR;
+
+ }
+
+ /* now Get original values of both Windows */
+ if (MV_OK != mvAhbToMbusWinGet(winNum1,&winDec1))
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetSwap: mvAhbToMbusWinGet failed win %d\n",
+ winNum1);
+ return MV_ERROR;
+
+ }
+ if (MV_OK != mvAhbToMbusWinGet(winNum2,&winDec2))
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetSwap: mvAhbToMbusWinGet failed win %d\n",
+ winNum2);
+ return MV_ERROR;
+
+ }
+
+
+ /* disable both windows */
+ if (MV_OK != mvAhbToMbusWinEnable(winNum1,MV_FALSE))
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetSwap: failed to enable window %d\n",
+ winNum1);
+ return MV_ERROR;
+
+ }
+ if (MV_OK != mvAhbToMbusWinEnable(winNum2,MV_FALSE))
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetSwap: failed to enable windo %d\n",
+ winNum2);
+ return MV_ERROR;
+
+ }
+
+
+ /* now swap targets */
+
+ /* first save winDec2 values */
+ winDecTemp.addrWin.baseHigh = winDec2.addrWin.baseHigh;
+ winDecTemp.addrWin.baseLow = winDec2.addrWin.baseLow;
+ winDecTemp.addrWin.size = winDec2.addrWin.size;
+ winDecTemp.enable = winDec2.enable;
+ winDecTemp.target = winDec2.target;
+
+ /* winDec2 = winDec1 */
+ winDec2.addrWin.baseHigh = winDec1.addrWin.baseHigh;
+ winDec2.addrWin.baseLow = winDec1.addrWin.baseLow;
+ winDec2.addrWin.size = winDec1.addrWin.size;
+ winDec2.enable = winDec1.enable;
+ winDec2.target = winDec1.target;
+
+
+ /* winDec1 = winDecTemp */
+ winDec1.addrWin.baseHigh = winDecTemp.addrWin.baseHigh;
+ winDec1.addrWin.baseLow = winDecTemp.addrWin.baseLow;
+ winDec1.addrWin.size = winDecTemp.addrWin.size;
+ winDec1.enable = winDecTemp.enable;
+ winDec1.target = winDecTemp.target;
+
+
+ /* now set the new values */
+
+
+ mvAhbToMbusWinSet(winNum1,&winDec1);
+ mvAhbToMbusWinSet(winNum2,&winDec2);
+
+
+
+
+
+ /* now we will treat the remap windows if exist */
+
+
+ /* now check if one or both windows has a remap window
+ as well after the swap ! */
+
+ /* if a window had a remap value differnt than the base value
+ before the swap , then after the swap the remap value will be
+ equal to the base value unless both windows has a remap windows*/
+
+ /* first get old values */
+ if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum1,&remapRegs1))
+ {
+ remapBaseLow1 = MV_REG_READ(remapRegs1.lowRegOffs);
+ remapBaseHigh1 = MV_REG_READ(remapRegs1.highRegOffs);
+
+ }
+ if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum2,&remapRegs2))
+ {
+ remapBaseLow2 = MV_REG_READ(remapRegs2.lowRegOffs);
+ remapBaseHigh2 = MV_REG_READ(remapRegs2.highRegOffs);
+
+
+ }
+
+ /* now do the swap */
+ if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum1,&remapRegs1))
+ {
+ if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum2,&remapRegs2))
+ {
+ /* Two windows has a remap !!! so swap */
+
+ MV_REG_WRITE(remapRegs2.highRegOffs,remapBaseHigh1);
+ MV_REG_WRITE(remapRegs2.lowRegOffs,remapBaseLow1);
+
+ MV_REG_WRITE(remapRegs1.highRegOffs,remapBaseHigh2);
+ MV_REG_WRITE(remapRegs1.lowRegOffs,remapBaseLow2);
+
+
+
+ }
+ else
+ {
+ /* remap == base */
+ MV_REG_WRITE(remapRegs1.highRegOffs,winDec1.addrWin.baseHigh);
+ MV_REG_WRITE(remapRegs1.lowRegOffs,winDec1.addrWin.baseLow);
+
+ }
+
+ }
+ else if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum2,&remapRegs2))
+ {
+ /* remap == base */
+ MV_REG_WRITE(remapRegs2.highRegOffs,winDec2.addrWin.baseHigh);
+ MV_REG_WRITE(remapRegs2.lowRegOffs,winDec2.addrWin.baseLow);
+
+ }
+
+
+
+ return MV_OK;
+
+
+}
+
+
+
+#if defined(MV_88F1181)
+
+/*******************************************************************************
+* mvAhbToMbusXbarCtrlSet - Set The CPU master Xbar arbitration.
+*
+* DESCRIPTION:
+* This function sets CPU Mbus Arbiter
+*
+* INPUT:
+* pPizzaArbArray - A priority Structure describing 16 "pizza slices". At
+* each clock cycle, the crossbar arbiter samples all
+* requests and gives the bus to the next agent according
+* to the "pizza".
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_ERROR if paramers to function invalid.
+*
+*******************************************************************************/
+MV_STATUS mvMbusArbSet(MV_MBUS_ARB_TARGET *pPizzaArbArray)
+{
+ MV_U32 sliceNum;
+ MV_U32 xbarCtrl = 0;
+ MV_MBUS_ARB_TARGET xbarTarget;
+
+ /* 1) Set crossbar control low register */
+ for (sliceNum = 0; sliceNum < MRLR_SLICE_NUM; sliceNum++)
+ {
+ xbarTarget = pPizzaArbArray[sliceNum];
+
+ /* sliceNum parameter check */
+ if (xbarTarget > MAX_MBUS_ARB_TARGETS)
+ {
+ mvOsPrintf("mvAhbToMbusXbarCtrlSet: ERR. Can't set Target %d\n",
+ xbarTarget);
+ return MV_ERROR;
+ }
+ xbarCtrl |= (xbarTarget << MRLR_LOW_ARB_OFFS(sliceNum));
+ }
+ /* Write to crossbar control low register */
+ MV_REG_WRITE(MBUS_ARBITER_LOW_REG, xbarCtrl);
+
+ xbarCtrl = 0;
+
+ /* 2) Set crossbar control high register */
+ for (sliceNum = MRLR_SLICE_NUM;
+ sliceNum < MRLR_SLICE_NUM+MRHR_SLICE_NUM;
+ sliceNum++)
+ {
+
+ xbarTarget = pPizzaArbArray[sliceNum];
+
+ /* sliceNum parameter check */
+ if (xbarTarget > MAX_MBUS_ARB_TARGETS)
+ {
+ mvOsPrintf("mvAhbToMbusXbarCtrlSet: ERR. Can't set Target %d\n",
+ xbarTarget);
+ return MV_ERROR;
+ }
+ xbarCtrl |= (xbarTarget << MRHR_HIGH_ARB_OFFS(sliceNum));
+ }
+ /* Write to crossbar control high register */
+ MV_REG_WRITE(MBUS_ARBITER_HIGH_REG, xbarCtrl);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvMbusArbCtrlSet - Set MBus Arbiter control register
+*
+* DESCRIPTION:
+*
+* INPUT:
+* ctrl - pointer to MV_MBUS_ARB_CTRL register
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_ERROR if paramers to function invalid.
+*
+*******************************************************************************/
+MV_STATUS mvMbusArbCtrlSet(MV_MBUS_ARB_CTRL *ctrl)
+{
+
+ if (ctrl->highPrio == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(MBUS_ARBITER_CTRL_REG, MACR_ARB_ARM_TOP);
+ }
+ else
+ {
+ MV_REG_BIT_SET(MBUS_ARBITER_CTRL_REG, MACR_ARB_ARM_TOP);
+ }
+
+ if (ctrl->fixedRoundRobin == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(MBUS_ARBITER_CTRL_REG, MACR_ARB_TARGET_FIXED);
+ }
+ else
+ {
+ MV_REG_BIT_SET(MBUS_ARBITER_CTRL_REG, MACR_ARB_TARGET_FIXED);
+ }
+
+ if (ctrl->starvEn == MV_FALSE)
+ {
+ MV_REG_BIT_RESET(MBUS_ARBITER_CTRL_REG, MACR_ARB_REQ_CTRL_EN);
+ }
+ else
+ {
+ MV_REG_BIT_SET(MBUS_ARBITER_CTRL_REG, MACR_ARB_REQ_CTRL_EN);
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvMbusArbCtrlGet - Get MBus Arbiter control register
+*
+* DESCRIPTION:
+*
+* INPUT:
+* ctrl - pointer to MV_MBUS_ARB_CTRL register
+*
+* OUTPUT:
+* ctrl - pointer to MV_MBUS_ARB_CTRL register
+*
+* RETURN:
+* MV_ERROR if paramers to function invalid.
+*
+*******************************************************************************/
+MV_STATUS mvMbusArbCtrlGet(MV_MBUS_ARB_CTRL *ctrl)
+{
+
+ MV_U32 ctrlReg = MV_REG_READ(MBUS_ARBITER_CTRL_REG);
+
+ if (ctrlReg & MACR_ARB_ARM_TOP)
+ {
+ ctrl->highPrio = MV_TRUE;
+ }
+ else
+ {
+ ctrl->highPrio = MV_FALSE;
+ }
+
+ if (ctrlReg & MACR_ARB_TARGET_FIXED)
+ {
+ ctrl->fixedRoundRobin = MV_TRUE;
+ }
+ else
+ {
+ ctrl->fixedRoundRobin = MV_FALSE;
+ }
+
+ if (ctrlReg & MACR_ARB_REQ_CTRL_EN)
+ {
+ ctrl->starvEn = MV_TRUE;
+ }
+ else
+ {
+ ctrl->starvEn = MV_FALSE;
+ }
+
+
+ return MV_OK;
+}
+
+#endif /* #if defined(MV_88F1181) */
+
+
+
+/*******************************************************************************
+* ahbToMbusRemapRegOffsGet - Get CPU address remap register offsets
+*
+* DESCRIPTION:
+* CPU to PCI address remap registers offsets are inconsecutive.
+* This function returns PCI address remap registers offsets.
+*
+* INPUT:
+* winNum - Address decode window number. See MV_U32 enumerator.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if winNum is not a PCI one.
+*
+*******************************************************************************/
+static MV_STATUS ahbToMbusRemapRegOffsGet(MV_U32 winNum,
+ AHB_TO_MBUS_REMAP_REG_OFFS *pRemapRegs)
+{
+ switch (winNum)
+ {
+ case 0:
+ case 1:
+ pRemapRegs->lowRegOffs = AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum);
+ pRemapRegs->highRegOffs = AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum);
+ break;
+ case 2:
+ case 3:
+ if((mvCtrlModelGet() == MV_5281_DEV_ID) ||
+ (mvCtrlModelGet() == MV_1281_DEV_ID) ||
+ (mvCtrlModelGet() == MV_6183_DEV_ID) ||
+ (mvCtrlModelGet() == MV_6183L_DEV_ID))
+ {
+ pRemapRegs->lowRegOffs = AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum);
+ pRemapRegs->highRegOffs = AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum);
+ break;
+ }
+ else
+ {
+ pRemapRegs->lowRegOffs = 0;
+ pRemapRegs->highRegOffs = 0;
+
+ DB(mvOsPrintf("ahbToMbusRemapRegOffsGet: ERR. Invalid winNum %d\n",
+ winNum));
+ return MV_NO_SUCH;
+ }
+ default:
+ {
+ pRemapRegs->lowRegOffs = 0;
+ pRemapRegs->highRegOffs = 0;
+
+ DB(mvOsPrintf("ahbToMbusRemapRegOffsGet: ERR. Invalid winNum %d\n",
+ winNum));
+ return MV_NO_SUCH;
+ }
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvAhbToMbusAddDecShow - Print the AHB to MBus bridge address decode map.
+*
+* DESCRIPTION:
+* This function print the CPU address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvAhbToMbusAddDecShow(MV_VOID)
+{
+ MV_AHB_TO_MBUS_DEC_WIN win;
+ MV_U32 winNum;
+ mvOsOutput( "\n" );
+ mvOsOutput( "AHB To MBUS Bridge:\n" );
+ mvOsOutput( "-------------------\n" );
+
+ for( winNum = 0; winNum < MAX_AHB_TO_MBUS_WINS; winNum++ )
+ {
+ memset( &win, 0, sizeof(MV_AHB_TO_MBUS_DEC_WIN) );
+
+ mvOsOutput( "win%d - ", winNum );
+
+ if( mvAhbToMbusWinGet( winNum, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ mvOsOutput( "%s base %08x, ",
+ mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+ mvOsOutput( "...." );
+ mvSizePrint( win.addrWin.size );
+
+ mvOsOutput( "\n" );
+
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h
new file mode 100644
index 000000000..1b352a1f6
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h
@@ -0,0 +1,130 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvAhbToMbush
+#define __INCmvAhbToMbush
+
+/* includes */
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvAhbToMbusRegs.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+/* defines */
+
+#if defined(MV_88F1181)
+/* This enumerator defines the Marvell controller possible MBUS arbiter */
+/* target ports. It is used to define crossbar priority scheame (pizza) */
+typedef enum _mvMBusArbTargetId
+{
+ DRAM_MBUS_ARB_TARGET = 0, /* Port 0 -> DRAM interface */
+ TWSI_MBUS_ARB_TARGET = 1, /* Port 1 -> TWSI */
+ ARM_MBUS_ARB_TARGET = 2, /* Port 2 -> ARM */
+ PEX1_MBUS_ARB_TARGET = 3, /* Port 3 -> PCI Express 1 */
+ PEX0_MBUS_ARB_TARGET = 4, /* Port 4 -> PCI Express0 */
+ MAX_MBUS_ARB_TARGETS
+}MV_MBUS_ARB_TARGET;
+
+typedef struct _mvMBusArbCtrl
+{
+ MV_BOOL starvEn;
+ MV_BOOL highPrio;
+ MV_BOOL fixedRoundRobin;
+
+}MV_MBUS_ARB_CTRL;
+
+#endif /* #if defined(MV_88F1181) */
+
+typedef struct _mvAhbtoMbusDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+}MV_AHB_TO_MBUS_DEC_WIN;
+
+/* mvAhbToMbus.h API list */
+
+MV_STATUS mvAhbToMbusInit(MV_VOID);
+MV_STATUS mvAhbToMbusWinSet(MV_U32 winNum, MV_AHB_TO_MBUS_DEC_WIN *pAddrDecWin);
+MV_STATUS mvAhbToMbusWinGet(MV_U32 winNum, MV_AHB_TO_MBUS_DEC_WIN *pAddrDecWin);
+MV_STATUS mvAhbToMbusWinEnable(MV_U32 winNum,MV_BOOL enable);
+MV_U32 mvAhbToMbusWinRemap(MV_U32 winNum, MV_ADDR_WIN *pAddrDecWin);
+MV_U32 mvAhbToMbusWinTargetGet(MV_TARGET target);
+MV_U32 mvAhbToMbusWinAvailGet(MV_VOID);
+MV_STATUS mvAhbToMbusWinTargetSwap(MV_TARGET target1,MV_TARGET target2);
+
+#if defined(MV_88F1181)
+
+MV_STATUS mvMbusArbSet(MV_MBUS_ARB_TARGET *pPizzaArbArray);
+MV_STATUS mvMbusArbCtrlSet(MV_MBUS_ARB_CTRL *ctrl);
+MV_STATUS mvMbusArbCtrlGet(MV_MBUS_ARB_CTRL *ctrl);
+
+#endif /* #if defined(MV_88F1181) */
+
+
+MV_VOID mvAhbToMbusAddDecShow(MV_VOID);
+
+
+#endif /* __INCmvAhbToMbush */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h
new file mode 100644
index 000000000..97dc63189
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h
@@ -0,0 +1,143 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvAhbToMbusRegsh
+#define __INCmvAhbToMbusRegsh
+
+/******************************/
+/* ARM Address Map Registers */
+/******************************/
+
+#define MAX_AHB_TO_MBUS_WINS 9
+#define MV_AHB_TO_MBUS_INTREG_WIN 8
+
+
+#define AHB_TO_MBUS_WIN_CTRL_REG(winNum) (0x20000 + (winNum)*0x10)
+#define AHB_TO_MBUS_WIN_BASE_REG(winNum) (0x20004 + (winNum)*0x10)
+#define AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum) (0x20008 + (winNum)*0x10)
+#define AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum) (0x2000C + (winNum)*0x10)
+#define AHB_TO_MBUS_WIN_INTEREG_REG 0x20080
+
+/* Window Control Register */
+/* AHB_TO_MBUS_WIN_CTRL_REG (ATMWCR)*/
+#define ATMWCR_WIN_ENABLE BIT0 /* Window Enable */
+
+#define ATMWCR_WIN_TARGET_OFFS 4 /* The target interface associated
+ with this window*/
+#define ATMWCR_WIN_TARGET_MASK (0xf << ATMWCR_WIN_TARGET_OFFS)
+
+#define ATMWCR_WIN_ATTR_OFFS 8 /* The target interface attributes
+ Associated with this window */
+#define ATMWCR_WIN_ATTR_MASK (0xff << ATMWCR_WIN_ATTR_OFFS)
+
+
+/*
+Used with the Base register to set the address window size and location
+Must be programed from LSB to MSB as sequence of 1’s followed
+by sequence of 0’s. The number of 1’s specifies the size of the window
+in 64 KB granularity (e.g. a value of 0x00FF specifies 256 = 16 MB).
+
+NOTE: A value of 0x0 specifies 64KB size.
+*/
+#define ATMWCR_WIN_SIZE_OFFS 16 /* Window Size */
+#define ATMWCR_WIN_SIZE_MASK (0xffff << ATMWCR_WIN_SIZE_OFFS)
+#define ATMWCR_WIN_SIZE_ALIGNMENT 0x10000
+
+/* Window Base Register */
+/* AHB_TO_MBUS_WIN_BASE_REG (ATMWBR) */
+
+/*
+Used with the size field to set the address window size and location.
+Corresponds to transaction address[31:16]
+*/
+#define ATMWBR_BASE_OFFS 16 /* Base Address */
+#define ATMWBR_BASE_MASK (0xffff << ATMWBR_BASE_OFFS)
+#define ATMWBR_BASE_ALIGNMENT 0x10000
+
+/* Window Remap Low Register */
+/* AHB_TO_MBUS_WIN_REMAP_LOW_REG (ATMWRLR) */
+
+/*
+Used with the size field to specifies address bits[31:0] to be driven to
+the target interface.:
+target_addr[31:16] = (addr[31:16] & size[15:0]) | (remap[31:16] & ~size[15:0])
+*/
+#define ATMWRLR_REMAP_LOW_OFFS 16 /* Remap Address */
+#define ATMWRLR_REMAP_LOW_MASK (0xffff << ATMWRLR_REMAP_LOW_OFFS)
+#define ATMWRLR_REMAP_LOW_ALIGNMENT 0x10000
+
+/* Window Remap High Register */
+/* AHB_TO_MBUS_WIN_REMAP_HIGH_REG (ATMWRHR) */
+
+/*
+Specifies address bits[63:32] to be driven to the target interface.
+target_addr[63:32] = (RemapHigh[31:0]
+*/
+#define ATMWRHR_REMAP_HIGH_OFFS 0 /* Remap Address */
+#define ATMWRHR_REMAP_HIGH_MASK (0xffffffff << ATMWRHR_REMAP_HIGH_OFFS)
+
+
+#endif /* __INCmvAhbToMbusRegsh */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c
new file mode 100644
index 000000000..872dc6e9d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c
@@ -0,0 +1,1036 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/sys/mvAhbToMbusRegs.h"
+#include "cpu/mvCpu.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "mvSysHwConfig.h"
+#include "mvSysDram.h"
+
+/*#define MV_DEBUG*/
+/* defines */
+
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+/* locals */
+/* static functions */
+static MV_BOOL cpuTargetWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin);
+
+MV_TARGET * sampleAtResetTargetArray;
+MV_TARGET sampleAtResetTargetArrayP[] = BOOT_TARGETS_NAME_ARRAY;
+MV_TARGET sampleAtResetTargetArray6180P[] = BOOT_TARGETS_NAME_ARRAY_6180;
+/*******************************************************************************
+* mvCpuIfInit - Initialize Controller CPU interface
+*
+* DESCRIPTION:
+* This function initialize Controller CPU interface:
+* 1. Set CPU interface configuration registers.
+* 2. Set CPU master Pizza arbiter control according to static
+* configuration described in configuration file.
+* 3. Opens CPU address decode windows. DRAM windows are assumed to be
+* already set (auto detection).
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvCpuIfInit(MV_CPU_DEC_WIN *cpuAddrWinMap)
+{
+ MV_U32 regVal;
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin;
+
+ if (cpuAddrWinMap == NULL)
+ {
+ DB(mvOsPrintf("mvCpuIfInit:ERR. cpuAddrWinMap == NULL\n"));
+ return MV_ERROR;
+ }
+
+ /*Initialize the boot target array according to device type*/
+ if(mvCtrlModelGet() == MV_6180_DEV_ID)
+ sampleAtResetTargetArray = sampleAtResetTargetArray6180P;
+ else
+ sampleAtResetTargetArray = sampleAtResetTargetArrayP;
+
+ /* Set ARM Configuration register */
+ regVal = MV_REG_READ(CPU_CONFIG_REG);
+ regVal &= ~CPU_CONFIG_DEFAULT_MASK;
+ regVal |= CPU_CONFIG_DEFAULT;
+ MV_REG_WRITE(CPU_CONFIG_REG,regVal);
+
+ /* First disable all CPU target windows */
+ for (target = 0; cpuAddrWinMap[target].enable != TBL_TERM; target++)
+ {
+ if ((MV_TARGET_IS_DRAM(target))||(target == INTER_REGS))
+ {
+ continue;
+ }
+
+#if defined(MV_MEM_OVER_PCI_WA) || defined(MV_UART_OVER_PCI_WA)
+ /* If the target PEX or PCI and memory is over PEX or PCI we don't touch this CPU windows */
+ if (MV_TARGET_IS_PCI(target))
+ {
+ continue;
+ }
+#endif
+
+#if defined(MV_MEM_OVER_PEX_WA) || defined(MV_UART_OVER_PEX_WA)
+ /* If the target PEX or PCI and memory is over PEX or PCI we don't touch this CPU windows */
+ if (MV_TARGET_IS_PEX(target))
+ {
+ continue;
+ }
+#endif
+#if defined(MV_RUN_FROM_FLASH)
+ /* Don't disable the boot device. */
+ if (target == DEV_BOOCS)
+ {
+ continue;
+ }
+#endif /* MV_RUN_FROM_FLASH */
+ mvCpuIfTargetWinEnable(MV_CHANGE_BOOT_CS(target),MV_FALSE);
+ }
+
+#if defined(MV_RUN_FROM_FLASH)
+ /* Resize the bootcs windows before other windows, because this */
+ /* window is enabled and will cause an overlap if not resized. */
+ target = DEV_BOOCS;
+
+ if (MV_OK != mvCpuIfTargetWinSet(target, &cpuAddrWinMap[target]))
+ {
+ DB(mvOsPrintf("mvCpuIfInit:ERR. mvCpuIfTargetWinSet fail\n"));
+ return MV_ERROR;
+ }
+
+ addrWin.baseLow = cpuAddrWinMap[target].addrWin.baseLow;
+ addrWin.baseHigh = cpuAddrWinMap[target].addrWin.baseHigh;
+ if (0xffffffff == mvAhbToMbusWinRemap(cpuAddrWinMap[target].winNum ,&addrWin))
+ {
+ DB(mvOsPrintf("mvCpuIfInit:WARN. mvAhbToMbusWinRemap can't remap winNum=%d\n",
+ cpuAddrWinMap[target].winNum));
+ }
+
+#endif /* MV_RUN_FROM_FLASH */
+
+ /* Go through all targets in user table until table terminator */
+ for (target = 0; cpuAddrWinMap[target].enable != TBL_TERM; target++)
+ {
+
+#if defined(MV_RUN_FROM_FLASH)
+ if (target == DEV_BOOCS)
+ {
+ continue;
+ }
+#endif /* MV_RUN_FROM_FLASH */
+
+ /* if DRAM auto sizing is used do not initialized DRAM target windows, */
+ /* assuming this already has been done earlier. */
+#ifdef MV_DRAM_AUTO_SIZE
+ if (MV_TARGET_IS_DRAM(target))
+ {
+ continue;
+ }
+#endif
+
+#if defined(MV_MEM_OVER_PCI_WA) || defined(MV_UART_OVER_PCI_WA)
+ /* If the target PEX or PCI and memory is over PEX or PCI we don't touch this CPU windows */
+ if (MV_TARGET_IS_PCI(target))
+ {
+ continue;
+ }
+#endif
+
+#if defined(MV_MEM_OVER_PEX_WA) || defined(MV_UART_OVER_PEX_WA)
+ /* If the target PEX or PCI and memory is over PEX or PCI we don't touch this CPU windows */
+ if (MV_TARGET_IS_PEX(target))
+ {
+ continue;
+ }
+#endif
+ /* If the target attribute is the same as the boot device attribute */
+ /* then it's stays disable */
+ if (MV_TARGET_IS_AS_BOOT(target))
+ {
+ continue;
+ }
+
+ if((0 == cpuAddrWinMap[target].addrWin.size) ||
+ (DIS == cpuAddrWinMap[target].enable))
+
+ {
+ if (MV_OK != mvCpuIfTargetWinEnable(target, MV_FALSE))
+ {
+ DB(mvOsPrintf("mvCpuIfInit:ERR. mvCpuIfTargetWinEnable fail\n"));
+ return MV_ERROR;
+ }
+
+ }
+ else
+ {
+ if (MV_OK != mvCpuIfTargetWinSet(target, &cpuAddrWinMap[target]))
+ {
+ DB(mvOsPrintf("mvCpuIfInit:ERR. mvCpuIfTargetWinSet fail\n"));
+ return MV_ERROR;
+ }
+
+ addrWin.baseLow = cpuAddrWinMap[target].addrWin.baseLow;
+ addrWin.baseHigh = cpuAddrWinMap[target].addrWin.baseHigh;
+ if (0xffffffff == mvAhbToMbusWinRemap(cpuAddrWinMap[target].winNum ,&addrWin))
+ {
+ DB(mvOsPrintf("mvCpuIfInit:WARN. mvAhbToMbusWinRemap can't remap winNum=%d\n",
+ cpuAddrWinMap[target].winNum));
+ }
+
+
+ }
+ }
+
+ return MV_OK;
+
+
+}
+
+
+/*******************************************************************************
+* mvCpuIfTargetWinSet - Set CPU-to-peripheral target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI0_MEM0)
+* address window, also known as address decode window.
+* A new address decode window is set for specified target address window.
+* If address decode window parameter structure enables the window,
+* the routine will also enable the target window, allowing CPU to access
+* the target window.
+*
+* INPUT:
+* target - Peripheral target enumerator.
+* pAddrDecWin - CPU target window data structure.
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_OK if CPU target window was set correctly, MV_ERROR in case of
+* address window overlapps with other active CPU target window or
+* trying to assign 36bit base address while CPU does not support that.
+* The function returns MV_NOT_SUPPORTED, if the target is unsupported.
+*
+*******************************************************************************/
+MV_STATUS mvCpuIfTargetWinSet(MV_TARGET target, MV_CPU_DEC_WIN *pAddrDecWin)
+{
+ MV_AHB_TO_MBUS_DEC_WIN decWin;
+ MV_U32 existingWinNum;
+ MV_DRAM_DEC_WIN addrDecWin;
+
+ target = MV_CHANGE_BOOT_CS(target);
+
+ /* Check parameters */
+ if (target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvCpuIfTargetWinSet: target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ /* 2) Check if the requested window overlaps with current windows */
+ if (MV_TRUE == cpuTargetWinOverlap(target, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("mvCpuIfTargetWinSet: ERR. Target %d overlap\n", target);
+ return MV_BAD_PARAM;
+ }
+
+ if (MV_TARGET_IS_DRAM(target))
+ {
+ /* copy relevant data to MV_DRAM_DEC_WIN structure */
+ addrDecWin.addrWin.baseHigh = pAddrDecWin->addrWin.baseHigh;
+ addrDecWin.addrWin.baseLow = pAddrDecWin->addrWin.baseLow;
+ addrDecWin.addrWin.size = pAddrDecWin->addrWin.size;
+ addrDecWin.enable = pAddrDecWin->enable;
+
+
+ if (mvDramIfWinSet(target,&addrDecWin) != MV_OK);
+ {
+ mvOsPrintf("mvCpuIfTargetWinSet: mvDramIfWinSet Failed\n");
+ return MV_ERROR;
+ }
+
+ }
+ else
+ {
+ /* copy relevant data to MV_AHB_TO_MBUS_DEC_WIN structure */
+ decWin.addrWin.baseLow = pAddrDecWin->addrWin.baseLow;
+ decWin.addrWin.baseHigh = pAddrDecWin->addrWin.baseHigh;
+ decWin.addrWin.size = pAddrDecWin->addrWin.size;
+ decWin.enable = pAddrDecWin->enable;
+ decWin.target = target;
+
+ existingWinNum = mvAhbToMbusWinTargetGet(target);
+
+ /* check if there is already another Window configured
+ for this target */
+ if ((existingWinNum < MAX_AHB_TO_MBUS_WINS )&&
+ (existingWinNum != pAddrDecWin->winNum))
+ {
+ /* if we want to enable the new winow number
+ passed by the user , then the old one should
+ be disabled */
+ if (MV_TRUE == pAddrDecWin->enable)
+ {
+ /* be sure it is disabled */
+ mvAhbToMbusWinEnable(existingWinNum , MV_FALSE);
+ }
+ }
+
+ if (mvAhbToMbusWinSet(pAddrDecWin->winNum,&decWin) != MV_OK)
+ {
+ mvOsPrintf("mvCpuIfTargetWinSet: mvAhbToMbusWinSet Failed\n");
+ return MV_ERROR;
+ }
+
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCpuIfTargetWinGet - Get CPU-to-peripheral target address window
+*
+* DESCRIPTION:
+* Get the CPU peripheral target address window.
+*
+* INPUT:
+* target - Peripheral target enumerator
+*
+* OUTPUT:
+* pAddrDecWin - CPU target window information data structure.
+*
+* RETURN:
+* MV_OK if target exist, MV_ERROR otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvCpuIfTargetWinGet(MV_TARGET target, MV_CPU_DEC_WIN *pAddrDecWin)
+{
+
+ MV_U32 winNum=0xffffffff;
+ MV_AHB_TO_MBUS_DEC_WIN decWin;
+ MV_DRAM_DEC_WIN addrDecWin;
+
+ target = MV_CHANGE_BOOT_CS(target);
+
+ /* Check parameters */
+ if (target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvCpuIfTargetWinGet: target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ if (MV_TARGET_IS_DRAM(target))
+ {
+ if (mvDramIfWinGet(target,&addrDecWin) != MV_OK)
+ {
+ mvOsPrintf("mvCpuIfTargetWinGet: Failed to get window target %d\n",
+ target);
+ return MV_ERROR;
+ }
+
+ /* copy relevant data to MV_CPU_DEC_WIN structure */
+ pAddrDecWin->addrWin.baseLow = addrDecWin.addrWin.baseLow;
+ pAddrDecWin->addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+ pAddrDecWin->addrWin.size = addrDecWin.addrWin.size;
+ pAddrDecWin->enable = addrDecWin.enable;
+ pAddrDecWin->winNum = 0xffffffff;
+
+ }
+ else
+ {
+ /* get the Window number associated with this target */
+
+ winNum = mvAhbToMbusWinTargetGet(target);
+ if (winNum >= MAX_AHB_TO_MBUS_WINS)
+ {
+ return MV_NO_SUCH;
+
+ }
+
+ if (mvAhbToMbusWinGet(winNum , &decWin) != MV_OK)
+ {
+ mvOsPrintf("%s: mvAhbToMbusWinGet Failed at winNum = %d\n",
+ __FUNCTION__, winNum);
+ return MV_ERROR;
+
+ }
+
+ /* copy relevant data to MV_CPU_DEC_WIN structure */
+ pAddrDecWin->addrWin.baseLow = decWin.addrWin.baseLow;
+ pAddrDecWin->addrWin.baseHigh = decWin.addrWin.baseHigh;
+ pAddrDecWin->addrWin.size = decWin.addrWin.size;
+ pAddrDecWin->enable = decWin.enable;
+ pAddrDecWin->winNum = winNum;
+
+ }
+
+
+
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvCpuIfTargetWinEnable - Enable/disable a CPU address decode window
+*
+* DESCRIPTION:
+* This function enable/disable a CPU address decode window.
+* if parameter 'enable' == MV_TRUE the routine will enable the
+* window, thus enabling CPU accesses (before enabling the window it is
+* tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+* target - Peripheral target enumerator.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_ERROR if protection window number was wrong, or the window
+* overlapps other target window.
+*
+*******************************************************************************/
+MV_STATUS mvCpuIfTargetWinEnable(MV_TARGET target,MV_BOOL enable)
+{
+ MV_U32 winNum, temp;
+ MV_CPU_DEC_WIN addrDecWin;
+
+ target = MV_CHANGE_BOOT_CS(target);
+
+ /* Check parameters */
+ if (target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvCpuIfTargetWinEnable: target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ /* get the window and check if it exist */
+ temp = mvCpuIfTargetWinGet(target, &addrDecWin);
+ if (MV_NO_SUCH == temp)
+ {
+ return (enable? MV_ERROR: MV_OK);
+ }
+ else if( MV_OK != temp)
+ {
+ mvOsPrintf("%s: ERR. Getting target %d failed.\n",__FUNCTION__, target);
+ return MV_ERROR;
+ }
+
+
+ /* check overlap */
+
+ if (MV_TRUE == enable)
+ {
+ if (MV_TRUE == cpuTargetWinOverlap(target, &addrDecWin.addrWin))
+ {
+ DB(mvOsPrintf("%s: ERR. Target %d overlap\n",__FUNCTION__, target));
+ return MV_ERROR;
+ }
+
+ }
+
+
+ if (MV_TARGET_IS_DRAM(target))
+ {
+ if (mvDramIfWinEnable(target , enable) != MV_OK)
+ {
+ mvOsPrintf("mvCpuIfTargetWinGet: mvDramIfWinEnable Failed at \n");
+ return MV_ERROR;
+
+ }
+
+ }
+ else
+ {
+ /* get the Window number associated with this target */
+
+ winNum = mvAhbToMbusWinTargetGet(target);
+
+ if (winNum >= MAX_AHB_TO_MBUS_WINS)
+ {
+ return (enable? MV_ERROR: MV_OK);
+ }
+
+ if (mvAhbToMbusWinEnable(winNum , enable) != MV_OK)
+ {
+ mvOsPrintf("mvCpuIfTargetWinGet: Failed to enable window = %d\n",
+ winNum);
+ return MV_ERROR;
+
+ }
+
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvCpuIfTargetWinSizeGet - Get CPU target address window size
+*
+* DESCRIPTION:
+* Get the size of CPU-to-peripheral target window.
+*
+* INPUT:
+* target - Peripheral target enumerator
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit size. Function also returns '0' if window is closed.
+* Function returns 0xFFFFFFFF in case of an error.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfTargetWinSizeGet(MV_TARGET target)
+{
+ MV_CPU_DEC_WIN addrDecWin;
+
+ target = MV_CHANGE_BOOT_CS(target);
+
+ /* Check parameters */
+ if (target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvCpuIfTargetWinSizeGet: target %d is Illigal\n", target);
+ return 0;
+ }
+
+ /* Get the winNum window */
+ if (MV_OK != mvCpuIfTargetWinGet(target, &addrDecWin))
+ {
+ mvOsPrintf("mvCpuIfTargetWinSizeGet:ERR. Getting target %d failed.\n",
+ target);
+ return 0;
+ }
+
+ /* Check if window is enabled */
+ if (addrDecWin.enable == MV_TRUE)
+ {
+ return (addrDecWin.addrWin.size);
+ }
+ else
+ {
+ return 0; /* Window disabled. return 0 */
+ }
+}
+
+/*******************************************************************************
+* mvCpuIfTargetWinBaseLowGet - Get CPU target address window base low
+*
+* DESCRIPTION:
+* CPU-to-peripheral target address window base is constructed of
+* two parts: Low and high.
+* This function gets the CPU peripheral target low base address.
+*
+* INPUT:
+* target - Peripheral target enumerator
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit low base address.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfTargetWinBaseLowGet(MV_TARGET target)
+{
+ MV_CPU_DEC_WIN addrDecWin;
+
+ target = MV_CHANGE_BOOT_CS(target);
+
+ /* Check parameters */
+ if (target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvCpuIfTargetWinBaseLowGet: target %d is Illigal\n", target);
+ return 0xffffffff;
+ }
+
+ /* Get the target window */
+ if (MV_OK != mvCpuIfTargetWinGet(target, &addrDecWin))
+ {
+ mvOsPrintf("mvCpuIfTargetWinBaseLowGet:ERR. Getting target %d failed.\n",
+ target);
+ return 0xffffffff;
+ }
+
+ if (MV_FALSE == addrDecWin.enable)
+ {
+ return 0xffffffff;
+ }
+ return (addrDecWin.addrWin.baseLow);
+}
+
+/*******************************************************************************
+* mvCpuIfTargetWinBaseHighGet - Get CPU target address window base high
+*
+* DESCRIPTION:
+* CPU-to-peripheral target address window base is constructed of
+* two parts: Low and high.
+* This function gets the CPU peripheral target high base address.
+*
+* INPUT:
+* target - Peripheral target enumerator
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit high base address.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfTargetWinBaseHighGet(MV_TARGET target)
+{
+ MV_CPU_DEC_WIN addrDecWin;
+
+ target = MV_CHANGE_BOOT_CS(target);
+
+ /* Check parameters */
+ if (target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvCpuIfTargetWinBaseLowGet: target %d is Illigal\n", target);
+ return 0xffffffff;
+ }
+
+ /* Get the target window */
+ if (MV_OK != mvCpuIfTargetWinGet(target, &addrDecWin))
+ {
+ mvOsPrintf("mvCpuIfTargetWinBaseHighGet:ERR. Getting target %d failed.\n",
+ target);
+ return 0xffffffff;
+ }
+
+ if (MV_FALSE == addrDecWin.enable)
+ {
+ return 0;
+ }
+
+ return (addrDecWin.addrWin.baseHigh);
+}
+
+#if defined(MV_INCLUDE_PEX)
+/*******************************************************************************
+* mvCpuIfPexRemap - Set CPU remap register for address windows.
+*
+* DESCRIPTION:
+*
+* INPUT:
+* pexTarget - Peripheral target enumerator. Must be a PEX target.
+* pAddrDecWin - CPU target window information data structure.
+* Note that caller has to fill in the base field only. The
+* size field is ignored.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if target is not a PEX one, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfPexRemap(MV_TARGET pexTarget, MV_ADDR_WIN *pAddrDecWin)
+{
+ MV_U32 winNum;
+
+ /* Check parameters */
+
+ if (mvCtrlPexMaxIfGet() > 1)
+ {
+ if ((!MV_TARGET_IS_PEX1(pexTarget))&&(!MV_TARGET_IS_PEX0(pexTarget)))
+ {
+ mvOsPrintf("mvCpuIfPexRemap: target %d is Illigal\n",pexTarget);
+ return 0xffffffff;
+ }
+
+ }
+ else
+ {
+ if (!MV_TARGET_IS_PEX0(pexTarget))
+ {
+ mvOsPrintf("mvCpuIfPexRemap: target %d is Illigal\n",pexTarget);
+ return 0xffffffff;
+ }
+
+ }
+
+ /* get the Window number associated with this target */
+ winNum = mvAhbToMbusWinTargetGet(pexTarget);
+
+ if (winNum >= MAX_AHB_TO_MBUS_WINS)
+ {
+ mvOsPrintf("mvCpuIfPexRemap: mvAhbToMbusWinTargetGet Failed\n");
+ return 0xffffffff;
+
+ }
+
+ return mvAhbToMbusWinRemap(winNum , pAddrDecWin);
+}
+
+#endif
+
+#if defined(MV_INCLUDE_PCI)
+/*******************************************************************************
+* mvCpuIfPciRemap - Set CPU remap register for address windows.
+*
+* DESCRIPTION:
+*
+* INPUT:
+* pciTarget - Peripheral target enumerator. Must be a PCI target.
+* pAddrDecWin - CPU target window information data structure.
+* Note that caller has to fill in the base field only. The
+* size field is ignored.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if target is not a PCI one, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfPciRemap(MV_TARGET pciTarget, MV_ADDR_WIN *pAddrDecWin)
+{
+ MV_U32 winNum;
+
+ /* Check parameters */
+ if (!MV_TARGET_IS_PCI(pciTarget))
+ {
+ mvOsPrintf("mvCpuIfPciRemap: target %d is Illigal\n",pciTarget);
+ return 0xffffffff;
+ }
+
+ /* get the Window number associated with this target */
+ winNum = mvAhbToMbusWinTargetGet(pciTarget);
+
+ if (winNum >= MAX_AHB_TO_MBUS_WINS)
+ {
+ mvOsPrintf("mvCpuIfPciRemap: mvAhbToMbusWinTargetGet Failed\n");
+ return 0xffffffff;
+
+ }
+
+ return mvAhbToMbusWinRemap(winNum , pAddrDecWin);
+}
+#endif /* MV_INCLUDE_PCI */
+
+
+/*******************************************************************************
+* mvCpuIfPciIfRemap - Set CPU remap register for address windows.
+*
+* DESCRIPTION:
+*
+* INPUT:
+* pciTarget - Peripheral target enumerator. Must be a PCI target.
+* pAddrDecWin - CPU target window information data structure.
+* Note that caller has to fill in the base field only. The
+* size field is ignored.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if target is not a PCI one, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfPciIfRemap(MV_TARGET pciIfTarget, MV_ADDR_WIN *pAddrDecWin)
+{
+#if defined(MV_INCLUDE_PEX)
+ if (MV_TARGET_IS_PEX(pciIfTarget))
+ {
+ return mvCpuIfPexRemap(pciIfTarget,pAddrDecWin);
+ }
+#endif
+#if defined(MV_INCLUDE_PCI)
+
+ if (MV_TARGET_IS_PCI(pciIfTarget))
+ {
+ return mvCpuIfPciRemap(pciIfTarget,pAddrDecWin);
+ }
+#endif
+ return 0;
+}
+
+
+
+/*******************************************************************************
+* mvCpuIfTargetOfBaseAddressGet - Get the target according to base address
+*
+* DESCRIPTION:
+*
+* INPUT:
+* baseAddress - base address to be checked
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* the target number that baseAddress belongs to or MAX_TARGETS is not
+* found
+*
+*******************************************************************************/
+
+MV_TARGET mvCpuIfTargetOfBaseAddressGet(MV_U32 baseAddress)
+{
+ MV_CPU_DEC_WIN win;
+ MV_U32 target;
+
+ for( target = 0; target < MAX_TARGETS; target++ )
+ {
+ if( mvCpuIfTargetWinGet( target, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ if ((baseAddress >= win.addrWin.baseLow) &&
+ (baseAddress < win.addrWin.baseLow + win.addrWin.size)) break;
+ }
+ }
+ else return MAX_TARGETS;
+
+ }
+
+ return target;
+}
+/*******************************************************************************
+* cpuTargetWinOverlap - Detect CPU address decode windows overlapping
+*
+* DESCRIPTION:
+* An unpredicted behaviur is expected in case CPU address decode
+* windows overlapps.
+* This function detects CPU address decode windows overlapping of a
+* specified target. The function does not check the target itself for
+* overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* target - Peripheral target enumerator.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlaps current address
+* decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL cpuTargetWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 targetNum;
+ MV_CPU_DEC_WIN addrDecWin;
+ MV_STATUS status;
+
+
+ for(targetNum = 0; targetNum < MAX_TARGETS; targetNum++)
+ {
+#if defined(MV_RUN_FROM_FLASH)
+ if(MV_TARGET_IS_AS_BOOT(target))
+ {
+ if (MV_CHANGE_BOOT_CS(targetNum) == target)
+ continue;
+ }
+#endif /* MV_RUN_FROM_FLASH */
+
+ /* don't check our target or illegal targets */
+ if (targetNum == target)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ status = mvCpuIfTargetWinGet(targetNum, &addrDecWin);
+ if(MV_NO_SUCH == status)
+ {
+ continue;
+ }
+ if(MV_OK != status)
+ {
+ DB(mvOsPrintf("cpuTargetWinOverlap: ERR. TargetWinGet failed\n"));
+ return MV_TRUE;
+ }
+
+ /* Do not check disabled windows */
+ if (MV_FALSE == addrDecWin.enable)
+ {
+ continue;
+ }
+
+ if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &addrDecWin.addrWin))
+ {
+ DB(mvOsPrintf(
+ "cpuTargetWinOverlap: Required target %d overlap current %d\n",
+ target, targetNum));
+ return MV_TRUE;
+ }
+ }
+
+ return MV_FALSE;
+
+}
+
+/*******************************************************************************
+* mvCpuIfAddDecShow - Print the CPU address decode map.
+*
+* DESCRIPTION:
+* This function print the CPU address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvCpuIfAddDecShow(MV_VOID)
+{
+ MV_CPU_DEC_WIN win;
+ MV_U32 target;
+ mvOsOutput( "\n" );
+ mvOsOutput( "CPU Interface\n" );
+ mvOsOutput( "-------------\n" );
+
+ for( target = 0; target < MAX_TARGETS; target++ )
+ {
+
+ memset( &win, 0, sizeof(MV_CPU_DEC_WIN) );
+
+ mvOsOutput( "%s ",mvCtrlTargetNameGet(target));
+ mvOsOutput( "...." );
+
+ if( mvCpuIfTargetWinGet( target, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ mvOsOutput( "base %08x, ", win.addrWin.baseLow );
+ mvSizePrint( win.addrWin.size );
+ mvOsOutput( "\n" );
+
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ else if( mvCpuIfTargetWinGet( target, &win ) == MV_NO_SUCH )
+ {
+ mvOsOutput( "no such\n" );
+ }
+ }
+}
+
+/*******************************************************************************
+* mvCpuIfEnablePex - Enable PCI Express.
+*
+* DESCRIPTION:
+* This function Enable PCI Express.
+*
+* INPUT:
+* pexIf - PEX interface number.
+* pexType - MV_PEX_ROOT_COMPLEX - root complex device
+* MV_PEX_END_POINT - end point device
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+#if defined(MV_INCLUDE_PEX)
+MV_VOID mvCpuIfEnablePex(MV_U32 pexIf, MV_PEX_TYPE pexType)
+{
+ /* Set pex mode incase S@R not exist */
+ if( pexType == MV_PEX_END_POINT)
+ {
+ MV_REG_BIT_RESET(PEX_CTRL_REG(pexIf),PXCR_DEV_TYPE_CTRL_MASK);
+ /* Change pex mode in capability reg */
+ MV_REG_BIT_RESET(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_CAPABILITY_REG), BIT22);
+ MV_REG_BIT_SET(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_CAPABILITY_REG), BIT20);
+
+ }
+ else
+ {
+ MV_REG_BIT_SET(PEX_CTRL_REG(pexIf),PXCR_DEV_TYPE_CTRL_MASK);
+ }
+
+ /* CPU config register Pex enable */
+ MV_REG_BIT_SET(CPU_CTRL_STAT_REG,CCSR_PCI_ACCESS_MASK);
+}
+#endif
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h
new file mode 100644
index 000000000..224ed07f5
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCpuIfh
+#define __INCmvCpuIfh
+
+/* includes */
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+#include "ctrlEnv/sys/mvAhbToMbus.h"
+#include "ddr2/mvDramIf.h"
+#include "ctrlEnv/sys/mvSysDram.h"
+#if defined(MV_INCLUDE_PEX)
+#include "pex/mvPex.h"
+#endif
+
+/* defines */
+
+/* typedefs */
+/* This structure describes CPU interface address decode window */
+typedef struct _mvCpuIfDecWin
+{
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_U32 winNum; /* Window Number in the AHB To Mbus bridge */
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+}MV_CPU_DEC_WIN;
+
+
+
+/* mvCpuIfLib.h API list */
+
+/* mvCpuIfLib.h API list */
+
+MV_STATUS mvCpuIfInit(MV_CPU_DEC_WIN *cpuAddrWinMap);
+MV_STATUS mvCpuIfTargetWinSet(MV_TARGET target, MV_CPU_DEC_WIN *pAddrDecWin);
+MV_STATUS mvCpuIfTargetWinGet(MV_TARGET target, MV_CPU_DEC_WIN *pAddrDecWin);
+MV_STATUS mvCpuIfTargetWinEnable(MV_TARGET target,MV_BOOL enable);
+MV_U32 mvCpuIfTargetWinSizeGet(MV_TARGET target);
+MV_U32 mvCpuIfTargetWinBaseLowGet(MV_TARGET target);
+MV_U32 mvCpuIfTargetWinBaseHighGet(MV_TARGET target);
+MV_TARGET mvCpuIfTargetOfBaseAddressGet(MV_U32 baseAddress);
+#if defined(MV_INCLUDE_PEX)
+MV_U32 mvCpuIfPexRemap(MV_TARGET pexTarget, MV_ADDR_WIN *pAddrDecWin);
+MV_VOID mvCpuIfEnablePex(MV_U32 pexIf, MV_PEX_TYPE pexType);
+#endif
+#if defined(MV_INCLUDE_PCI)
+MV_U32 mvCpuIfPciRemap(MV_TARGET pciTarget, MV_ADDR_WIN *pAddrDecWin);
+#endif
+MV_U32 mvCpuIfPciIfRemap(MV_TARGET pciTarget, MV_ADDR_WIN *pAddrDecWin);
+
+MV_VOID mvCpuIfAddDecShow(MV_VOID);
+
+#if defined(MV88F6281)
+MV_STATUS mvCpuIfBridgeReorderWAInit(void);
+#endif
+
+#endif /* __INCmvCpuIfh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S
new file mode 100644
index 000000000..b7efda02c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S
@@ -0,0 +1,163 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#define MV_ASMLANGUAGE
+#include "mvOsAsm.h"
+#include "mvDeviceId.h"
+#include "mvCtrlEnvRegs.h"
+#include "mvCpuIfRegs.h"
+#include "mvCtrlEnvAsm.h"
+
+
+/*******************************************************************************
+* mvCpuIfPreInit - Make early initialization of CPU interface.
+*
+* DESCRIPTION:
+* The function will initialize the CPU interface parameters that must
+* be initialize before any BUS activity towards the DDR interface,
+* which means it must be executed from ROM. Because of that, the function
+* is implemented in assembly code.
+* The function configure the following CPU config register parameters:
+* 1) CPU2MbusLTickDrv
+* 2) CPU2MbusLTickSample.
+* NOTE: This function must be called AFTER the internal register
+* base is modified to INTER_REGS_BASE.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+* r11 holds return function address.
+*******************************************************************************/
+#define MV88F6281_PCKG_OPT 2
+#define MV88F6192_PCKG_OPT 1
+#define MV88F6180_PCKG_OPT 0
+
+ .globl _mvCpuIfPreInit
+_mvCpuIfPreInit:
+
+ mov r11, LR /* Save link register */
+
+ /* Read device ID */
+ MV_CTRL_MODEL_GET_ASM(r4, r5);
+
+ /* goto calcConfigReg if device is 6281 */
+ ldr r5, =MV88F6281_PCKG_OPT
+ cmp r4, r5
+ beq calcConfigReg
+
+ /* goto calcConfigReg if device is 6192/6190 */
+ ldr r5, =MV88F6192_PCKG_OPT
+ cmp r4, r5
+ beq calcConfigReg
+
+ /* Else 6180 */
+ /* Get the "sample on reset" register */
+ MV_REG_READ_ASM (r4, r5, MPP_SAMPLE_AT_RESET)
+ ldr r5, =MSAR_CPUCLCK_MASK_6180
+ and r5, r4, r5
+ mov r5, r5, lsr #MSAR_CPUCLCK_OFFS_6180
+
+ ldr r4, =CPU_2_MBUSL_DDR_CLK_1x3
+ cmp r5, #CPU_2_DDR_CLK_1x3_1
+ beq setConfigReg
+
+ ldr r4, =CPU_2_MBUSL_DDR_CLK_1x4
+ cmp r5, #CPU_2_DDR_CLK_1x4_1
+ beq setConfigReg
+ b setConfigReg
+
+calcConfigReg:
+ /* Get the "sample on reset" register */
+ MV_REG_READ_ASM (r4, r5, MPP_SAMPLE_AT_RESET)
+ ldr r5, =MSAR_DDRCLCK_RTIO_MASK
+ and r5, r4, r5
+ mov r5, r5, lsr #MSAR_DDRCLCK_RTIO_OFFS
+
+ ldr r4, =CPU_2_MBUSL_DDR_CLK_1x3
+ cmp r5, #CPU_2_DDR_CLK_1x3
+ beq setConfigReg
+
+ ldr r4, =CPU_2_MBUSL_DDR_CLK_1x4
+ cmp r5, #CPU_2_DDR_CLK_1x4
+ beq setConfigReg
+
+ /* Else */
+ ldr r4, =0
+
+setConfigReg:
+ /* Read CPU Config register */
+ MV_REG_READ_ASM (r7, r5, CPU_CONFIG_REG)
+ ldr r5, =~(CCR_CPU_2_MBUSL_TICK_DRV_MASK | CCR_CPU_2_MBUSL_TICK_SMPL_MASK)
+ and r7, r7, r5 /* Clear register fields */
+ orr r7, r7, r4 /* Set the values according to the findings */
+ MV_REG_WRITE_ASM (r7, r5, CPU_CONFIG_REG)
+
+done:
+ mov PC, r11 /* r11 is saved link register */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h
new file mode 100644
index 000000000..8cfeee2db
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h
@@ -0,0 +1,304 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCpuIfRegsh
+#define __INCmvCpuIfRegsh
+
+/****************************************/
+/* ARM Control and Status Registers Map */
+/****************************************/
+
+#define CPU_CONFIG_REG 0x20100
+#define CPU_CTRL_STAT_REG 0x20104
+#define CPU_RSTOUTN_MASK_REG 0x20108
+#define CPU_SYS_SOFT_RST_REG 0x2010C
+#define CPU_AHB_MBUS_CAUSE_INT_REG 0x20110
+#define CPU_AHB_MBUS_MASK_INT_REG 0x20114
+#define CPU_FTDLL_CONFIG_REG 0x20120
+#define CPU_L2_CONFIG_REG 0x20128
+
+
+
+/* ARM Configuration register */
+/* CPU_CONFIG_REG (CCR) */
+
+
+/* Reset vector location */
+#define CCR_VEC_INIT_LOC_OFFS 1
+#define CCR_VEC_INIT_LOC_MASK BIT1
+/* reset at 0x00000000 */
+#define CCR_VEC_INIT_LOC_0000 (0 << CCR_VEC_INIT_LOC_OFFS)
+/* reset at 0xFFFF0000 */
+#define CCR_VEC_INIT_LOC_FF00 (1 << CCR_VEC_INIT_LOC_OFFS)
+
+
+#define CCR_AHB_ERROR_PROP_OFFS 2
+#define CCR_AHB_ERROR_PROP_MASK BIT2
+/* Erros are not propogated to AHB */
+#define CCR_AHB_ERROR_PROP_NO_INDICATE (0 << CCR_AHB_ERROR_PROP_OFFS)
+/* Erros are propogated to AHB */
+#define CCR_AHB_ERROR_PROP_INDICATE (1 << CCR_AHB_ERROR_PROP_OFFS)
+
+
+#define CCR_ENDIAN_INIT_OFFS 3
+#define CCR_ENDIAN_INIT_MASK BIT3
+#define CCR_ENDIAN_INIT_LITTLE (0 << CCR_ENDIAN_INIT_OFFS)
+#define CCR_ENDIAN_INIT_BIG (1 << CCR_ENDIAN_INIT_OFFS)
+
+
+#define CCR_INCR_EN_OFFS 4
+#define CCR_INCR_EN_MASK BIT4
+#define CCR_INCR_EN BIT4
+
+
+#define CCR_NCB_BLOCKING_OFFS 5
+#define CCR_NCB_BLOCKING_MASK (1 << CCR_NCB_BLOCKING_OFFS)
+#define CCR_NCB_BLOCKING_NON (0 << CCR_NCB_BLOCKING_OFFS)
+#define CCR_NCB_BLOCKING_EN (1 << CCR_NCB_BLOCKING_OFFS)
+
+#define CCR_CPU_2_MBUSL_TICK_DRV_OFFS 8
+#define CCR_CPU_2_MBUSL_TICK_DRV_MASK (0xF << CCR_CPU_2_MBUSL_TICK_DRV_OFFS)
+#define CCR_CPU_2_MBUSL_TICK_SMPL_OFFS 12
+#define CCR_CPU_2_MBUSL_TICK_SMPL_MASK (0xF << CCR_CPU_2_MBUSL_TICK_SMPL_OFFS)
+#define CCR_ICACH_PREF_BUF_ENABLE BIT16
+#define CCR_DCACH_PREF_BUF_ENABLE BIT17
+
+/* Ratio options for CPU to DDR for 6281/6192/6190 */
+#define CPU_2_DDR_CLK_1x3 4
+#define CPU_2_DDR_CLK_1x4 6
+
+/* Ratio options for CPU to DDR for 6281 only */
+#define CPU_2_DDR_CLK_2x9 7
+#define CPU_2_DDR_CLK_1x5 8
+#define CPU_2_DDR_CLK_1x6 9
+
+/* Ratio options for CPU to DDR for 6180 only */
+#define CPU_2_DDR_CLK_1x3_1 0x5
+#define CPU_2_DDR_CLK_1x4_1 0x6
+
+/* Default values for CPU to Mbus-L DDR Interface Tick Driver and */
+/* CPU to Mbus-L Tick Sample fields in CPU config register */
+
+#define TICK_DRV_1x1 0
+#define TICK_DRV_1x2 0
+#define TICK_DRV_1x3 1
+#define TICK_DRV_1x4 2
+#define TICK_SMPL_1x1 0
+#define TICK_SMPL_1x2 1
+#define TICK_SMPL_1x3 0
+#define TICK_SMPL_1x4 0
+
+#define CPU_2_MBUSL_DDR_CLK_1x2 \
+ ((TICK_DRV_1x2 << CCR_CPU_2_MBUSL_TICK_DRV_OFFS) | \
+ (TICK_SMPL_1x2 << CCR_CPU_2_MBUSL_TICK_SMPL_OFFS))
+#define CPU_2_MBUSL_DDR_CLK_1x3 \
+ ((TICK_DRV_1x3 << CCR_CPU_2_MBUSL_TICK_DRV_OFFS) | \
+ (TICK_SMPL_1x3 << CCR_CPU_2_MBUSL_TICK_SMPL_OFFS))
+#define CPU_2_MBUSL_DDR_CLK_1x4 \
+ ((TICK_DRV_1x4 << CCR_CPU_2_MBUSL_TICK_DRV_OFFS) | \
+ (TICK_SMPL_1x4 << CCR_CPU_2_MBUSL_TICK_SMPL_OFFS))
+
+/* ARM Control and Status register */
+/* CPU_CTRL_STAT_REG (CCSR) */
+
+
+/*
+This is used to block PCI express\PCI from access Socrates/Feroceon GP
+while ARM boot is still in progress
+*/
+
+#define CCSR_PCI_ACCESS_OFFS 0
+#define CCSR_PCI_ACCESS_MASK BIT0
+#define CCSR_PCI_ACCESS_ENABLE (0 << CCSR_PCI_ACCESS_OFFS)
+#define CCSR_PCI_ACCESS_DISBALE (1 << CCSR_PCI_ACCESS_OFFS)
+
+#define CCSR_ARM_RESET BIT1
+#define CCSR_SELF_INT BIT2
+#define CCSR_BIG_ENDIAN BIT15
+
+
+/* RSTOUTn Mask Register */
+/* CPU_RSTOUTN_MASK_REG (CRMR) */
+
+#define CRMR_PEX_RST_OUT_OFFS 0
+#define CRMR_PEX_RST_OUT_MASK BIT0
+#define CRMR_PEX_RST_OUT_ENABLE (1 << CRMR_PEX_RST_OUT_OFFS)
+#define CRMR_PEX_RST_OUT_DISABLE (0 << CRMR_PEX_RST_OUT_OFFS)
+
+#define CRMR_WD_RST_OUT_OFFS 1
+#define CRMR_WD_RST_OUT_MASK BIT1
+#define CRMR_WD_RST_OUT_ENABLE (1 << CRMR_WD_RST_OUT_OFFS)
+#define CRMR_WD_RST_OUT_DISBALE (0 << CRMR_WD_RST_OUT_OFFS)
+
+#define CRMR_SOFT_RST_OUT_OFFS 2
+#define CRMR_SOFT_RST_OUT_MASK BIT2
+#define CRMR_SOFT_RST_OUT_ENABLE (1 << CRMR_SOFT_RST_OUT_OFFS)
+#define CRMR_SOFT_RST_OUT_DISBALE (0 << CRMR_SOFT_RST_OUT_OFFS)
+
+/* System Software Reset Register */
+/* CPU_SYS_SOFT_RST_REG (CSSRR) */
+
+#define CSSRR_SYSTEM_SOFT_RST BIT0
+
+/* AHB to Mbus Bridge Interrupt Cause Register*/
+/* CPU_AHB_MBUS_CAUSE_INT_REG (CAMCIR) */
+
+#define CAMCIR_ARM_SELF_INT BIT0
+#define CAMCIR_ARM_TIMER0_INT_REQ BIT1
+#define CAMCIR_ARM_TIMER1_INT_REQ BIT2
+#define CAMCIR_ARM_WD_TIMER_INT_REQ BIT3
+
+
+/* AHB to Mbus Bridge Interrupt Mask Register*/
+/* CPU_AHB_MBUS_MASK_INT_REG (CAMMIR) */
+
+#define CAMCIR_ARM_SELF_INT_OFFS 0
+#define CAMCIR_ARM_SELF_INT_MASK BIT0
+#define CAMCIR_ARM_SELF_INT_EN (1 << CAMCIR_ARM_SELF_INT_OFFS)
+#define CAMCIR_ARM_SELF_INT_DIS (0 << CAMCIR_ARM_SELF_INT_OFFS)
+
+
+#define CAMCIR_ARM_TIMER0_INT_REQ_OFFS 1
+#define CAMCIR_ARM_TIMER0_INT_REQ_MASK BIT1
+#define CAMCIR_ARM_TIMER0_INT_REQ_EN (1 << CAMCIR_ARM_TIMER0_INT_REQ_OFFS)
+#define CAMCIR_ARM_TIMER0_INT_REQ_DIS (0 << CAMCIR_ARM_TIMER0_INT_REQ_OFFS)
+
+#define CAMCIR_ARM_TIMER1_INT_REQ_OFFS 2
+#define CAMCIR_ARM_TIMER1_INT_REQ_MASK BIT2
+#define CAMCIR_ARM_TIMER1_INT_REQ_EN (1 << CAMCIR_ARM_TIMER1_INT_REQ_OFFS)
+#define CAMCIR_ARM_TIMER1_INT_REQ_DIS (0 << CAMCIR_ARM_TIMER1_INT_REQ_OFFS)
+
+#define CAMCIR_ARM_WD_TIMER_INT_REQ_OFFS 3
+#define CAMCIR_ARM_WD_TIMER_INT_REQ_MASK BIT3
+#define CAMCIR_ARM_WD_TIMER_INT_REQ_EN (1 << CAMCIR_ARM_WD_TIMER_INT_REQ_OFFS)
+#define CAMCIR_ARM_WD_TIMER_INT_REQ_DIS (0 << CAMCIR_ARM_WD_TIMER_INT_REQ_OFFS)
+
+/* CPU FTDLL Config register (CFCR) fields */
+#define CFCR_FTDLL_ICACHE_TAG_OFFS 0
+#define CFCR_FTDLL_ICACHE_TAG_MASK (0x7F << CFCR_FTDLL_ICACHE_TAG_OFFS)
+#define CFCR_FTDLL_DCACHE_TAG_OFFS 8
+#define CFCR_FTDLL_DCACHE_TAG_MASK (0x7F << CFCR_FTDLL_DCACHE_TAG_OFFS)
+#define CFCR_FTDLL_OVERWRITE_ENABLE (1 << 15)
+/* For Orion 2 D2 only */
+#define CFCR_MRVL_CPU_ID_OFFS 16
+#define CFCR_MRVL_CPU_ID_MASK (0x1 << CFCR_MRVL_CPU_ID_OFFS)
+#define CFCR_ARM_CPU_ID (0x0 << CFCR_MRVL_CPU_ID_OFFS)
+#define CFCR_MRVL_CPU_ID (0x1 << CFCR_MRVL_CPU_ID_OFFS)
+#define CFCR_VFP_SUB_ARC_NUM_OFFS 7
+#define CFCR_VFP_SUB_ARC_NUM_MASK (0x1 << CFCR_VFP_SUB_ARC_NUM_OFFS)
+#define CFCR_VFP_SUB_ARC_NUM_1 (0x0 << CFCR_VFP_SUB_ARC_NUM_OFFS)
+#define CFCR_VFP_SUB_ARC_NUM_2 (0x1 << CFCR_VFP_SUB_ARC_NUM_OFFS)
+
+/* CPU_L2_CONFIG_REG fields */
+#ifdef MV_CPU_LE
+#define CL2CR_L2_ECC_EN_OFFS 2
+#define CL2CR_L2_WT_MODE_OFFS 4
+#else
+#define CL2CR_L2_ECC_EN_OFFS 26
+#define CL2CR_L2_WT_MODE_OFFS 28
+#endif
+
+#define CL2CR_L2_ECC_EN_MASK (1 << CL2CR_L2_ECC_EN_OFFS)
+#define CL2CR_L2_WT_MODE_MASK (1 << CL2CR_L2_WT_MODE_OFFS)
+
+/*******************************************/
+/* Main Interrupt Controller Registers Map */
+/*******************************************/
+
+#define CPU_MAIN_INT_CAUSE_REG 0x20200
+#define CPU_MAIN_IRQ_MASK_REG 0x20204
+#define CPU_MAIN_FIQ_MASK_REG 0x20208
+#define CPU_ENPOINT_MASK_REG 0x2020C
+#define CPU_MAIN_INT_CAUSE_HIGH_REG 0x20210
+#define CPU_MAIN_IRQ_MASK_HIGH_REG 0x20214
+#define CPU_MAIN_FIQ_MASK_HIGH_REG 0x20218
+#define CPU_ENPOINT_MASK_HIGH_REG 0x2021C
+
+
+/*******************************************/
+/* ARM Doorbell Registers Map */
+/*******************************************/
+
+#define CPU_HOST_TO_ARM_DRBL_REG 0x20400
+#define CPU_HOST_TO_ARM_MASK_REG 0x20404
+#define CPU_ARM_TO_HOST_DRBL_REG 0x20408
+#define CPU_ARM_TO_HOST_MASK_REG 0x2040C
+
+
+
+/* CPU control register map */
+/* Set bits means value is about to change according to new value */
+#define CPU_CONFIG_DEFAULT_MASK (CCR_VEC_INIT_LOC_MASK | CCR_AHB_ERROR_PROP_MASK)
+
+#define CPU_CONFIG_DEFAULT (CCR_VEC_INIT_LOC_FF00)
+
+/* CPU Control and status defaults */
+#define CPU_CTRL_STAT_DEFAULT_MASK (CCSR_PCI_ACCESS_MASK)
+
+
+#define CPU_CTRL_STAT_DEFAULT (CCSR_PCI_ACCESS_ENABLE)
+
+#endif /* __INCmvCpuIfRegsh */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c
new file mode 100644
index 000000000..769475f8f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c
@@ -0,0 +1,324 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvSysAudio.h"
+
+/*******************************************************************************
+* mvAudioWinSet - Set AUDIO target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window, also known as address decode window.
+* After setting this target window, the AUDIO will be able to access the
+* target within the address window.
+*
+* INPUT:
+* winNum - AUDIO target address decode window number.
+* pAddrDecWin - AUDIO target window data structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if address window overlapps with other address decode windows.
+* MV_BAD_PARAM if base address is invalid parameter or target is
+* unknown.
+*
+*******************************************************************************/
+MV_STATUS mvAudioWinSet(MV_U32 winNum, MV_AUDIO_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttribs;
+ MV_DEC_REGS decRegs;
+
+ /* Parameter checking */
+ if (winNum >= MV_AUDIO_MAX_ADDR_DECODE_WIN)
+ {
+ mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvAudioWinSet:Error setting AUDIO window %d to "\
+ "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum,
+ mvCtrlTargetNameGet(pAddrDecWin->target),
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ decRegs.baseReg = 0;
+ decRegs.sizeReg = 0;
+
+ if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+ {
+ mvOsPrintf("%s: mvCtrlAddrDecToReg Failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ mvCtrlAttribGet(pAddrDecWin->target, &targetAttribs);
+
+ /* set attributes */
+ decRegs.sizeReg &= ~MV_AUDIO_WIN_ATTR_MASK;
+ decRegs.sizeReg |= (targetAttribs.attrib << MV_AUDIO_WIN_ATTR_OFFSET);
+
+ /* set target ID */
+ decRegs.sizeReg &= ~MV_AUDIO_WIN_TARGET_MASK;
+ decRegs.sizeReg |= (targetAttribs.targetId << MV_AUDIO_WIN_TARGET_OFFSET);
+
+ if (pAddrDecWin->enable == MV_TRUE)
+ {
+ decRegs.sizeReg |= MV_AUDIO_WIN_ENABLE_MASK;
+ }
+ else
+ {
+ decRegs.sizeReg &= ~MV_AUDIO_WIN_ENABLE_MASK;
+ }
+
+ MV_REG_WRITE( MV_AUDIO_WIN_CTRL_REG(winNum), decRegs.sizeReg);
+ MV_REG_WRITE( MV_AUDIO_WIN_BASE_REG(winNum), decRegs.baseReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvAudioWinGet - Get AUDIO peripheral target address window.
+*
+* DESCRIPTION:
+* Get AUDIO peripheral target address window.
+*
+* INPUT:
+* winNum - AUDIO target address decode window number.
+*
+* OUTPUT:
+* pAddrDecWin - AUDIO target window data structure.
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvAudioWinGet(MV_U32 winNum, MV_AUDIO_DEC_WIN *pAddrDecWin)
+{
+ MV_DEC_REGS decRegs;
+ MV_TARGET_ATTRIB targetAttrib;
+
+ /* Parameter checking */
+ if (winNum >= MV_AUDIO_MAX_ADDR_DECODE_WIN)
+ {
+ mvOsPrintf("%s : ERR. Invalid winNum %d\n",
+ __FUNCTION__, winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ decRegs.baseReg = MV_REG_READ( MV_AUDIO_WIN_BASE_REG(winNum) );
+ decRegs.sizeReg = MV_REG_READ( MV_AUDIO_WIN_CTRL_REG(winNum) );
+
+ if (MV_OK != mvCtrlRegToAddrDec(&decRegs, &pAddrDecWin->addrWin) )
+ {
+ mvOsPrintf("%s: mvCtrlRegToAddrDec Failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* attrib and targetId */
+ targetAttrib.attrib = (decRegs.sizeReg & MV_AUDIO_WIN_ATTR_MASK) >>
+ MV_AUDIO_WIN_ATTR_OFFSET;
+ targetAttrib.targetId = (decRegs.sizeReg & MV_AUDIO_WIN_TARGET_MASK) >>
+ MV_AUDIO_WIN_TARGET_OFFSET;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ /* Check if window is enabled */
+ if(decRegs.sizeReg & MV_AUDIO_WIN_ENABLE_MASK)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+ }
+ return MV_OK;
+}
+/*******************************************************************************
+* mvAudioAddrDecShow - Print the AUDIO address decode map.
+*
+* DESCRIPTION:
+* This function print the AUDIO address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvAudioAddrDecShow(MV_VOID)
+{
+
+ MV_AUDIO_DEC_WIN win;
+ int i;
+
+ if (MV_FALSE == mvCtrlPwrClckGet(AUDIO_UNIT_ID, 0))
+ return;
+
+
+ mvOsOutput( "\n" );
+ mvOsOutput( "AUDIO:\n" );
+ mvOsOutput( "----\n" );
+
+ for( i = 0; i < MV_AUDIO_MAX_ADDR_DECODE_WIN; i++ )
+ {
+ memset( &win, 0, sizeof(MV_AUDIO_DEC_WIN) );
+
+ mvOsOutput( "win%d - ", i );
+
+ if( mvAudioWinGet( i, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ mvOsOutput( "%s base %08x, ",
+ mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+ mvOsOutput( "...." );
+
+ mvSizePrint( win.addrWin.size );
+
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+}
+
+
+/*******************************************************************************
+* mvAudioWinInit - Initialize the integrated AUDIO target address window.
+*
+* DESCRIPTION:
+* Initialize the AUDIO peripheral target address window.
+*
+* INPUT:
+*
+*
+* OUTPUT:
+*
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvAudioInit(MV_VOID)
+{
+ int winNum;
+ MV_AUDIO_DEC_WIN audioWin;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+ MV_U32 status;
+
+ mvAudioHalInit();
+
+ /* Initiate Audio address decode */
+
+ /* First disable all address decode windows */
+ for(winNum = 0; winNum < MV_AUDIO_MAX_ADDR_DECODE_WIN; winNum++)
+ {
+ MV_U32 regVal = MV_REG_READ(MV_AUDIO_WIN_CTRL_REG(winNum));
+ regVal &= ~MV_AUDIO_WIN_ENABLE_MASK;
+ MV_REG_WRITE(MV_AUDIO_WIN_CTRL_REG(winNum), regVal);
+ }
+
+ for(winNum = 0; winNum < MV_AUDIO_MAX_ADDR_DECODE_WIN; winNum++)
+ {
+
+ /* We will set the Window to DRAM_CS0 in default */
+ /* first get attributes from CPU If */
+ status = mvCpuIfTargetWinGet(SDRAM_CS0,
+ &cpuAddrDecWin);
+
+ if (MV_OK != status)
+ {
+ mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+ audioWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ audioWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ audioWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ audioWin.enable = MV_TRUE;
+ audioWin.target = SDRAM_CS0;
+
+ if(MV_OK != mvAudioWinSet(winNum, &audioWin))
+ {
+ return MV_ERROR;
+ }
+ }
+ }
+
+ return MV_OK;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h
new file mode 100644
index 000000000..f59eb9a90
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h
@@ -0,0 +1,123 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCMVSysAudioH
+#define __INCMVSysAudioH
+
+#include "mvCommon.h"
+#include "audio/mvAudio.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+/***********************************/
+/* Audio Address Decoding registers*/
+/***********************************/
+
+#define MV_AUDIO_MAX_ADDR_DECODE_WIN 2
+#define MV_AUDIO_RECORD_WIN_NUM 0
+#define MV_AUDIO_PLAYBACK_WIN_NUM 1
+
+#define MV_AUDIO_WIN_CTRL_REG(win) (AUDIO_REG_BASE + 0xA04 + ((win)<<3))
+#define MV_AUDIO_WIN_BASE_REG(win) (AUDIO_REG_BASE + 0xA00 + ((win)<<3))
+
+#define MV_AUDIO_RECORD_WIN_CTRL_REG MV_AUDIO_WIN_CTRL_REG(MV_AUDIO_RECORD_WIN_NUM)
+#define MV_AUDIO_RECORD_WIN_BASE_REG MV_AUDIO_WIN_BASE_REG(MV_AUDIO_RECORD_WIN_NUM)
+#define MV_AUDIO_PLAYBACK_WIN_CTRL_REG MV_AUDIO_WIN_CTRL_REG(MV_AUDIO_PLAYBACK_WIN_NUM)
+#define MV_AUDIO_PLAYBACK_WIN_BASE_REG MV_AUDIO_WIN_BASE_REG(MV_AUDIO_PLAYBACK_WIN_NUM)
+
+
+/* BITs in Windows 0-3 Control and Base Registers */
+#define MV_AUDIO_WIN_ENABLE_BIT 0
+#define MV_AUDIO_WIN_ENABLE_MASK (1<<MV_AUDIO_WIN_ENABLE_BIT)
+
+#define MV_AUDIO_WIN_TARGET_OFFSET 4
+#define MV_AUDIO_WIN_TARGET_MASK (0xF<<MV_AUDIO_WIN_TARGET_OFFSET)
+
+#define MV_AUDIO_WIN_ATTR_OFFSET 8
+#define MV_AUDIO_WIN_ATTR_MASK (0xFF<<MV_AUDIO_WIN_ATTR_OFFSET)
+
+#define MV_AUDIO_WIN_SIZE_OFFSET 16
+#define MV_AUDIO_WIN_SIZE_MASK (0xFFFF<<MV_AUDIO_WIN_SIZE_OFFSET)
+
+#define MV_AUDIO_WIN_BASE_OFFSET 16
+#define MV_AUDIO_WIN_BASE_MASK (0xFFFF<<MV_AUDIO_WIN_BASE_OFFSET)
+
+
+typedef struct _mvAudioDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+} MV_AUDIO_DEC_WIN;
+
+
+MV_STATUS mvAudioInit(MV_VOID);
+MV_STATUS mvAudioWinGet(MV_U32 winNum, MV_AUDIO_DEC_WIN *pAddrDecWin);
+MV_STATUS mvAudioWinSet(MV_U32 winNum, MV_AUDIO_DEC_WIN *pAddrDecWin);
+MV_STATUS mvAudioWinInit(MV_VOID);
+MV_VOID mvAudioAddrDecShow(MV_VOID);
+
+
+#endif
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c
new file mode 100644
index 000000000..84d0cd0bd
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c
@@ -0,0 +1,382 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvSysCesa.h"
+
+#if (MV_CESA_VERSION >= 2)
+MV_TARGET tdmaAddrDecPrioTable[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+ SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+ SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+ SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+ SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+ PEX0_MEM,
+#endif
+
+ TBL_TERM
+};
+
+/*******************************************************************************
+* mvCesaWinGet - Get TDMA target address window.
+*
+* DESCRIPTION:
+* Get TDMA target address window.
+*
+* INPUT:
+* winNum - TDMA target address decode window number.
+*
+* OUTPUT:
+* pDecWin - TDMA target window data structure.
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+static MV_STATUS mvCesaWinGet(MV_U32 winNum, MV_DEC_WIN *pDecWin)
+{
+ MV_DEC_WIN_PARAMS winParam;
+ MV_U32 sizeReg, baseReg;
+
+ /* Parameter checking */
+ if (winNum >= MV_CESA_TDMA_ADDR_DEC_WIN)
+ {
+ mvOsPrintf("%s : ERR. Invalid winNum %d\n",
+ __FUNCTION__, winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ baseReg = MV_REG_READ( MV_CESA_TDMA_BASE_ADDR_REG(winNum) );
+ sizeReg = MV_REG_READ( MV_CESA_TDMA_WIN_CTRL_REG(winNum) );
+
+ /* Check if window is enabled */
+ if(sizeReg & MV_CESA_TDMA_WIN_ENABLE_MASK)
+ {
+ pDecWin->enable = MV_TRUE;
+
+ /* Extract window parameters from registers */
+ winParam.targetId = (sizeReg & MV_CESA_TDMA_WIN_TARGET_MASK) >> MV_CESA_TDMA_WIN_TARGET_OFFSET;
+ winParam.attrib = (sizeReg & MV_CESA_TDMA_WIN_ATTR_MASK) >> MV_CESA_TDMA_WIN_ATTR_OFFSET;
+ winParam.size = (sizeReg & MV_CESA_TDMA_WIN_SIZE_MASK) >> MV_CESA_TDMA_WIN_SIZE_OFFSET;
+ winParam.baseAddr = (baseReg & MV_CESA_TDMA_WIN_BASE_MASK);
+
+ /* Translate the decode window parameters to address decode struct */
+ if (MV_OK != mvCtrlParamsToAddrDec(&winParam, pDecWin))
+ {
+ mvOsPrintf("Failed to translate register parameters to CESA address" \
+ " decode window structure\n");
+ return MV_ERROR;
+ }
+ }
+ else
+ {
+ pDecWin->enable = MV_FALSE;
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* cesaWinOverlapDetect - Detect CESA TDMA address windows overlapping
+*
+* DESCRIPTION:
+* An unpredicted behaviur is expected in case TDMA address decode
+* windows overlapps.
+* This function detects TDMA address decode windows overlapping of a
+* specified window. The function does not check the window itself for
+* overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* winNum - address decode window number.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE - if the given address window overlap current address
+* decode map,
+* MV_FALSE - otherwise, MV_ERROR if reading invalid data
+* from registers.
+*
+*******************************************************************************/
+static MV_STATUS cesaWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 winNumIndex;
+ MV_DEC_WIN addrDecWin;
+
+ for(winNumIndex=0; winNumIndex<MV_CESA_TDMA_ADDR_DEC_WIN; winNumIndex++)
+ {
+ /* Do not check window itself */
+ if (winNumIndex == winNum)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvCesaWinGet(winNumIndex, &addrDecWin))
+ {
+ mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* Do not check disabled windows */
+ if(addrDecWin.enable == MV_FALSE)
+ {
+ continue;
+ }
+
+ if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+ {
+ return MV_TRUE;
+ }
+ }
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvCesaTdmaWinSet - Set CESA TDMA target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window, also known as address decode window.
+* After setting this target window, the CESA TDMA will be able to access the
+* target within the address window.
+*
+* INPUT:
+* winNum - CESA TDMA target address decode window number.
+* pAddrDecWin - CESA TDMA target window data structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR - if address window overlapps with other address decode windows.
+* MV_BAD_PARAM - if base address is invalid parameter or target is
+* unknown.
+*
+*******************************************************************************/
+static MV_STATUS mvCesaTdmaWinSet(MV_U32 winNum, MV_DEC_WIN *pDecWin)
+{
+ MV_DEC_WIN_PARAMS winParams;
+ MV_U32 sizeReg, baseReg;
+
+ /* Parameter checking */
+ if (winNum >= MV_CESA_TDMA_ADDR_DEC_WIN)
+ {
+ mvOsPrintf("mvCesaTdmaWinSet: ERR. Invalid win num %d\n",winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlapps with current windows */
+ if (MV_TRUE == cesaWinOverlapDetect(winNum, &pDecWin->addrWin))
+ {
+ mvOsPrintf("%s: ERR. Window %d overlap\n", __FUNCTION__, winNum);
+ return MV_ERROR;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pDecWin->addrWin.baseLow, pDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvCesaTdmaWinSet: Error setting CESA TDMA window %d to "\
+ "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum,
+ mvCtrlTargetNameGet(pDecWin->target),
+ pDecWin->addrWin.baseLow,
+ pDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ if(MV_OK != mvCtrlAddrDecToParams(pDecWin, &winParams))
+ {
+ mvOsPrintf("%s: mvCtrlAddrDecToParams Failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* set Size, Attributes and TargetID */
+ sizeReg = (((winParams.targetId << MV_CESA_TDMA_WIN_TARGET_OFFSET) & MV_CESA_TDMA_WIN_TARGET_MASK) |
+ ((winParams.attrib << MV_CESA_TDMA_WIN_ATTR_OFFSET) & MV_CESA_TDMA_WIN_ATTR_MASK) |
+ ((winParams.size << MV_CESA_TDMA_WIN_SIZE_OFFSET) & MV_CESA_TDMA_WIN_SIZE_MASK));
+
+ if (pDecWin->enable == MV_TRUE)
+ {
+ sizeReg |= MV_CESA_TDMA_WIN_ENABLE_MASK;
+ }
+ else
+ {
+ sizeReg &= ~MV_CESA_TDMA_WIN_ENABLE_MASK;
+ }
+
+ /* Update Base value */
+ baseReg = (winParams.baseAddr & MV_CESA_TDMA_WIN_BASE_MASK);
+
+ MV_REG_WRITE( MV_CESA_TDMA_WIN_CTRL_REG(winNum), sizeReg);
+ MV_REG_WRITE( MV_CESA_TDMA_BASE_ADDR_REG(winNum), baseReg);
+
+ return MV_OK;
+}
+
+
+static MV_STATUS mvCesaTdmaAddrDecInit (void)
+{
+ MV_U32 winNum;
+ MV_STATUS status;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+ MV_DEC_WIN cesaWin;
+ MV_U32 winPrioIndex = 0;
+
+ /* First disable all address decode windows */
+ for(winNum=0; winNum<MV_CESA_TDMA_ADDR_DEC_WIN; winNum++)
+ {
+ MV_REG_BIT_RESET(MV_CESA_TDMA_WIN_CTRL_REG(winNum), MV_CESA_TDMA_WIN_ENABLE_MASK);
+ }
+
+ /* Go through all windows in user table until table terminator */
+ winNum = 0;
+ while( (tdmaAddrDecPrioTable[winPrioIndex] != TBL_TERM) &&
+ (winNum < MV_CESA_TDMA_ADDR_DEC_WIN) ) {
+
+ /* first get attributes from CPU If */
+ status = mvCpuIfTargetWinGet(tdmaAddrDecPrioTable[winPrioIndex],
+ &cpuAddrDecWin);
+ if(MV_NO_SUCH == status){
+ winPrioIndex++;
+ continue;
+ }
+
+ if (MV_OK != status)
+ {
+ mvOsPrintf("cesaInit: TargetWinGet failed. winNum=%d, winIdx=%d, target=%d, status=0x%x\n",
+ winNum, winPrioIndex, tdmaAddrDecPrioTable[winPrioIndex], status);
+ return MV_ERROR;
+ }
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+ cesaWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ cesaWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ cesaWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ cesaWin.enable = MV_TRUE;
+ cesaWin.target = tdmaAddrDecPrioTable[winPrioIndex];
+
+#if defined(MV646xx)
+ /* Get the default attributes for that target window */
+ mvCtrlDefAttribGet(cesaWin.target, &cesaWin.addrWinAttr);
+#endif /* MV646xx */
+
+ if(MV_OK != mvCesaTdmaWinSet(winNum, &cesaWin))
+ {
+ mvOsPrintf("mvCesaTdmaWinSet FAILED: winNum=%d\n",
+ winNum);
+ return MV_ERROR;
+ }
+ winNum++;
+ }
+ winPrioIndex++;
+ }
+ return MV_OK;
+}
+#endif /* MV_CESA_VERSION >= 2 */
+
+
+
+
+MV_STATUS mvCesaInit (int numOfSession, int queueDepth, char* pSramBase, void *osHandle)
+{
+ MV_U32 cesaCryptEngBase;
+ MV_CPU_DEC_WIN addrDecWin;
+
+ if(sizeof(MV_CESA_SRAM_MAP) > MV_CESA_SRAM_SIZE)
+ {
+ mvOsPrintf("mvCesaInit: Wrong SRAM map - %ld > %d\n",
+ sizeof(MV_CESA_SRAM_MAP), MV_CESA_SRAM_SIZE);
+ return MV_FAIL;
+ }
+#if 0
+ if (mvCpuIfTargetWinGet(CRYPT_ENG, &addrDecWin) == MV_OK)
+ cesaCryptEngBase = addrDecWin.addrWin.baseLow;
+ else
+ {
+ mvOsPrintf("mvCesaInit: ERR. mvCpuIfTargetWinGet failed\n");
+ return MV_ERROR;
+ }
+#else
+ cesaCryptEngBase = (MV_U32)pSramBase;
+#endif
+
+#if 0 /* Already done in the platform init */
+#if (MV_CESA_VERSION >= 2)
+ mvCesaTdmaAddrDecInit();
+#endif /* MV_CESA_VERSION >= 2 */
+#endif
+ return mvCesaHalInit(numOfSession, queueDepth, pSramBase, cesaCryptEngBase,
+ osHandle);
+
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h
new file mode 100644
index 000000000..73bcdc583
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvSysCesa_h__
+#define __mvSysCesa_h__
+
+
+#include "mvCommon.h"
+#include "cesa/mvCesa.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+/***************************** TDMA Registers *************************************/
+
+#define MV_CESA_TDMA_ADDR_DEC_WIN 4
+
+#define MV_CESA_TDMA_BASE_ADDR_REG(win) (MV_CESA_TDMA_REG_BASE + 0xa00 + (win<<3))
+
+#define MV_CESA_TDMA_WIN_CTRL_REG(win) (MV_CESA_TDMA_REG_BASE + 0xa04 + (win<<3))
+
+#define MV_CESA_TDMA_WIN_ENABLE_BIT 0
+#define MV_CESA_TDMA_WIN_ENABLE_MASK (1 << MV_CESA_TDMA_WIN_ENABLE_BIT)
+
+#define MV_CESA_TDMA_WIN_TARGET_OFFSET 4
+#define MV_CESA_TDMA_WIN_TARGET_MASK (0xf << MV_CESA_TDMA_WIN_TARGET_OFFSET)
+
+#define MV_CESA_TDMA_WIN_ATTR_OFFSET 8
+#define MV_CESA_TDMA_WIN_ATTR_MASK (0xff << MV_CESA_TDMA_WIN_ATTR_OFFSET)
+
+#define MV_CESA_TDMA_WIN_SIZE_OFFSET 16
+#define MV_CESA_TDMA_WIN_SIZE_MASK (0xFFFF << MV_CESA_TDMA_WIN_SIZE_OFFSET)
+
+#define MV_CESA_TDMA_WIN_BASE_OFFSET 16
+#define MV_CESA_TDMA_WIN_BASE_MASK (0xFFFF << MV_CESA_TDMA_WIN_BASE_OFFSET)
+
+
+MV_STATUS mvCesaInit (int numOfSession, int queueDepth, char* pSramBase, void *osHandle);
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.c
new file mode 100644
index 000000000..6f76c2caa
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.c
@@ -0,0 +1,348 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+
+#include "ddr2/mvDramIf.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/sys/mvSysDram.h"
+
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+static MV_BOOL sdramIfWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin);
+
+/*******************************************************************************
+* mvDramIfWinSet - Set DRAM interface address decode window
+*
+* DESCRIPTION:
+* This function sets DRAM interface address decode window.
+*
+* INPUT:
+* target - System target. Use only SDRAM targets.
+* pAddrDecWin - SDRAM address window structure.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* MV_BAD_PARAM if parameters are invalid or window is invalid, MV_OK
+* otherwise.
+*******************************************************************************/
+MV_STATUS mvDramIfWinSet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin)
+{
+ MV_U32 baseReg=0,sizeReg=0;
+ MV_U32 baseToReg=0 , sizeToReg=0;
+
+ /* Check parameters */
+ if (!MV_TARGET_IS_DRAM(target))
+ {
+ mvOsPrintf("mvDramIfWinSet: target %d is not SDRAM\n", target);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlaps with current enabled windows */
+ if (MV_TRUE == sdramIfWinOverlap(target, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("mvDramIfWinSet: ERR. Target %d overlaps\n", target);
+ return MV_BAD_PARAM;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvDramIfWinSet:Error setting DRAM interface window %d."\
+ "\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ target,
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ /* read base register*/
+ baseReg = MV_REG_READ(SDRAM_BASE_ADDR_REG(0,target));
+
+ /* read size register */
+ sizeReg = MV_REG_READ(SDRAM_SIZE_REG(0,target));
+
+ /* BaseLow[31:16] => base register [31:16] */
+ baseToReg = pAddrDecWin->addrWin.baseLow & SCBAR_BASE_MASK;
+
+ /* Write to address decode Base Address Register */
+ baseReg &= ~SCBAR_BASE_MASK;
+ baseReg |= baseToReg;
+
+ /* Translate the given window size to register format */
+ sizeToReg = ctrlSizeToReg(pAddrDecWin->addrWin.size, SCSR_SIZE_ALIGNMENT);
+
+ /* Size parameter validity check. */
+ if (-1 == sizeToReg)
+ {
+ mvOsPrintf("mvCtrlAddrDecToReg: ERR. Win %d size invalid.\n",target);
+ return MV_BAD_PARAM;
+ }
+
+ /* set size */
+ sizeReg &= ~SCSR_SIZE_MASK;
+ /* Size is located at upper 16 bits */
+ sizeReg |= (sizeToReg << SCSR_SIZE_OFFS);
+
+ /* enable/Disable */
+ if (MV_TRUE == pAddrDecWin->enable)
+ {
+ sizeReg |= SCSR_WIN_EN;
+ }
+ else
+ {
+ sizeReg &= ~SCSR_WIN_EN;
+ }
+
+ /* 3) Write to address decode Base Address Register */
+ MV_REG_WRITE(SDRAM_BASE_ADDR_REG(0,target), baseReg);
+
+ /* Write to address decode Size Register */
+ MV_REG_WRITE(SDRAM_SIZE_REG(0,target), sizeReg);
+
+ return MV_OK;
+}
+/*******************************************************************************
+* mvDramIfWinGet - Get DRAM interface address decode window
+*
+* DESCRIPTION:
+* This function gets DRAM interface address decode window.
+*
+* INPUT:
+* target - System target. Use only SDRAM targets.
+*
+* OUTPUT:
+* pAddrDecWin - SDRAM address window structure.
+*
+* RETURN:
+* MV_BAD_PARAM if parameters are invalid or window is invalid, MV_OK
+* otherwise.
+*******************************************************************************/
+MV_STATUS mvDramIfWinGet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin)
+{
+ MV_U32 baseReg,sizeReg;
+ MV_U32 sizeRegVal;
+ /* Check parameters */
+ if (!MV_TARGET_IS_DRAM(target))
+ {
+ mvOsPrintf("mvDramIfWinGet: target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ /* Read base and size registers */
+ sizeReg = MV_REG_READ(SDRAM_SIZE_REG(0,target));
+ baseReg = MV_REG_READ(SDRAM_BASE_ADDR_REG(0,target));
+
+ sizeRegVal = (sizeReg & SCSR_SIZE_MASK) >> SCSR_SIZE_OFFS;
+
+ pAddrDecWin->addrWin.size = ctrlRegToSize(sizeRegVal,
+ SCSR_SIZE_ALIGNMENT);
+
+ /* Check if ctrlRegToSize returned OK */
+ if (-1 == pAddrDecWin->addrWin.size)
+ {
+ mvOsPrintf("mvDramIfWinGet: size of target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ /* Extract base address */
+ /* Base register [31:16] ==> baseLow[31:16] */
+ pAddrDecWin->addrWin.baseLow = baseReg & SCBAR_BASE_MASK;
+
+ pAddrDecWin->addrWin.baseHigh = 0;
+
+
+ if (sizeReg & SCSR_WIN_EN)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+ }
+
+ return MV_OK;
+}
+/*******************************************************************************
+* mvDramIfWinEnable - Enable/Disable SDRAM address decode window
+*
+* DESCRIPTION:
+* This function enable/Disable SDRAM address decode window.
+*
+* INPUT:
+* target - System target. Use only SDRAM targets.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR in case function parameter are invalid, MV_OK otherewise.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfWinEnable(MV_TARGET target, MV_BOOL enable)
+{
+ MV_DRAM_DEC_WIN addrDecWin;
+
+ /* Check parameters */
+ if (!MV_TARGET_IS_DRAM(target))
+ {
+ mvOsPrintf("mvDramIfWinEnable: target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ if (enable == MV_TRUE)
+ { /* First check for overlap with other enabled windows */
+ if (MV_OK != mvDramIfWinGet(target, &addrDecWin))
+ {
+ mvOsPrintf("mvDramIfWinEnable:ERR. Getting target %d failed.\n",
+ target);
+ return MV_ERROR;
+ }
+ /* Check for overlapping */
+ if (MV_FALSE == sdramIfWinOverlap(target, &(addrDecWin.addrWin)))
+ {
+ /* No Overlap. Enable address decode winNum window */
+ MV_REG_BIT_SET(SDRAM_SIZE_REG(0,target), SCSR_WIN_EN);
+ }
+ else
+ { /* Overlap detected */
+ mvOsPrintf("mvDramIfWinEnable: ERR. Target %d overlap detect\n",
+ target);
+ return MV_ERROR;
+ }
+ }
+ else
+ { /* Disable address decode winNum window */
+ MV_REG_BIT_RESET(SDRAM_SIZE_REG(0, target), SCSR_WIN_EN);
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* sdramIfWinOverlap - Check if an address window overlap an SDRAM address window
+*
+* DESCRIPTION:
+* This function scan each SDRAM address decode window to test if it
+* overlapps the given address windoow
+*
+* INPUT:
+* target - SDRAM target where the function skips checking.
+* pAddrDecWin - The tested address window for overlapping with
+* SDRAM windows.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlaps any enabled address
+* decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL sdramIfWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin)
+{
+ MV_TARGET targetNum;
+ MV_DRAM_DEC_WIN addrDecWin;
+
+ for(targetNum = SDRAM_CS0; targetNum < MV_DRAM_MAX_CS ; targetNum++)
+ {
+ /* don't check our winNum or illegal targets */
+ if (targetNum == target)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvDramIfWinGet(targetNum, &addrDecWin))
+ {
+ mvOsPrintf("sdramIfWinOverlap: ERR. TargetWinGet failed\n");
+ return MV_ERROR;
+ }
+
+ /* Do not check disabled windows */
+ if (MV_FALSE == addrDecWin.enable)
+ {
+ continue;
+ }
+
+ if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &addrDecWin.addrWin))
+ {
+ mvOsPrintf(
+ "sdramIfWinOverlap: Required target %d overlap winNum %d\n",
+ target, targetNum);
+ return MV_TRUE;
+ }
+ }
+
+ return MV_FALSE;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.h
new file mode 100644
index 000000000..7bd9c9de2
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.h
@@ -0,0 +1,80 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __sysDram
+#define __sysDram
+
+/* This structure describes CPU interface address decode window */
+typedef struct _mvDramIfDecWin
+{
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+}MV_DRAM_DEC_WIN;
+
+MV_STATUS mvDramIfWinSet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvDramIfWinGet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvDramIfWinEnable(MV_TARGET target, MV_BOOL enable);
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c
new file mode 100644
index 000000000..7f6e4a59d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c
@@ -0,0 +1,658 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "ctrlEnv/sys/mvSysGbe.h"
+
+
+
+typedef struct _mvEthDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+}MV_ETH_DEC_WIN;
+
+MV_TARGET ethAddrDecPrioTap[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+ SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+ SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+ SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+ SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS0)
+ DEVICE_CS0,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+ DEVICE_CS1,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS2)
+ DEVICE_CS2,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS3)
+ DEVICE_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+ PEX0_IO,
+#endif
+ TBL_TERM
+};
+
+static MV_STATUS ethWinOverlapDetect(int port, MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+static MV_STATUS mvEthWinSet(int port, MV_U32 winNum, MV_ETH_DEC_WIN *pAddrDecWin);
+static MV_STATUS mvEthWinGet(int port, MV_U32 winNum, MV_ETH_DEC_WIN *pAddrDecWin);
+
+
+/*******************************************************************************
+* mvEthWinInit - Initialize ETH address decode windows
+*
+* DESCRIPTION:
+* This function initialize ETH window decode unit. It set the
+* default address decode windows of the unit.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if setting fail.
+*******************************************************************************/
+/* Configure EthDrv memory map registes. */
+MV_STATUS mvEthWinInit (int port)
+{
+ MV_U32 winNum, status, winPrioIndex=0, i, regVal=0;
+ MV_ETH_DEC_WIN ethWin;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+ static MV_U32 accessProtReg = 0;
+
+#if (MV_ETH_VERSION <= 1)
+ static MV_BOOL isFirst = MV_TRUE;
+
+ if(isFirst == MV_FALSE)
+ {
+ MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(port), accessProtReg);
+ return MV_OK;
+ }
+ isFirst = MV_FALSE;
+#endif /* MV_GIGA_ETH_VERSION */
+
+ /* Initiate Ethernet address decode */
+
+ /* First disable all address decode windows */
+ for(winNum=0; winNum<ETH_MAX_DECODE_WIN; winNum++)
+ {
+ regVal |= MV_BIT_MASK(winNum);
+ }
+ MV_REG_WRITE(ETH_BASE_ADDR_ENABLE_REG(port), regVal);
+
+ /* Go through all windows in user table until table terminator */
+ for (winNum=0; ((ethAddrDecPrioTap[winPrioIndex] != TBL_TERM) &&
+ (winNum < ETH_MAX_DECODE_WIN)); )
+ {
+ /* first get attributes from CPU If */
+ status = mvCpuIfTargetWinGet(ethAddrDecPrioTap[winPrioIndex],
+ &cpuAddrDecWin);
+
+ if(MV_NO_SUCH == status)
+ {
+ winPrioIndex++;
+ continue;
+ }
+ if (MV_OK != status)
+ {
+ mvOsPrintf("mvEthWinInit: ERR. mvCpuIfTargetWinGet failed\n");
+ return MV_ERROR;
+ }
+
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+ ethWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ ethWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ ethWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ ethWin.enable = MV_TRUE;
+ ethWin.target = ethAddrDecPrioTap[winPrioIndex];
+
+ if(MV_OK != mvEthWinSet(port, winNum, &ethWin))
+ {
+ mvOsPrintf("mvEthWinInit: ERR. mvEthWinSet failed winNum=%d\n",
+ winNum);
+ return MV_ERROR;
+ }
+ winNum++;
+ }
+ winPrioIndex ++;
+ }
+
+ /* set full access to all windows. */
+ for(i=0; i<winNum; i++)
+ {
+ accessProtReg |= (FULL_ACCESS << (i*2));
+ }
+ MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(port), accessProtReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthWinSet - Set ETH target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window, also known as address decode window.
+* After setting this target window, the ETH will be able to access the
+* target within the address window.
+*
+* INPUT:
+* winNum - ETH to target address decode window number.
+* pAddrDecWin - ETH target window data structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if address window overlapps with other address decode windows.
+* MV_BAD_PARAM if base address is invalid parameter or target is
+* unknown.
+*
+*******************************************************************************/
+MV_STATUS mvEthWinSet(int port, MV_U32 winNum, MV_ETH_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttribs;
+ MV_DEC_REGS decRegs;
+
+ /* Parameter checking */
+ if (winNum >= ETH_MAX_DECODE_WIN)
+ {
+ mvOsPrintf("mvEthWinSet: ERR. Invalid win num %d\n",winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlapps with current windows */
+ if (MV_TRUE == ethWinOverlapDetect(port, winNum, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("mvEthWinSet: ERR. Window %d overlap\n", winNum);
+ return MV_ERROR;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvEthWinSet: Error setting Ethernet window %d to "\
+ "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum,
+ mvCtrlTargetNameGet(pAddrDecWin->target),
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+
+ decRegs.baseReg = MV_REG_READ(ETH_WIN_BASE_REG(port, winNum));
+ decRegs.sizeReg = MV_REG_READ(ETH_WIN_SIZE_REG(port, winNum));
+
+ if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+ {
+ mvOsPrintf("mvEthWinSet:mvCtrlAddrDecToReg Failed\n");
+ return MV_ERROR;
+ }
+
+ mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+ /* set attributes */
+ decRegs.baseReg &= ~ETH_WIN_ATTR_MASK;
+ decRegs.baseReg |= targetAttribs.attrib << ETH_WIN_ATTR_OFFS;
+ /* set target ID */
+ decRegs.baseReg &= ~ETH_WIN_TARGET_MASK;
+ decRegs.baseReg |= targetAttribs.targetId << ETH_WIN_TARGET_OFFS;
+
+ /* for the safe side we disable the window before writing the new
+ values */
+ mvEthWinEnable(port, winNum, MV_FALSE);
+ MV_REG_WRITE(ETH_WIN_BASE_REG(port, winNum), decRegs.baseReg);
+
+ /* Write to address decode Size Register */
+ MV_REG_WRITE(ETH_WIN_SIZE_REG(port, winNum), decRegs.sizeReg);
+
+ /* Enable address decode target window */
+ if (pAddrDecWin->enable == MV_TRUE)
+ {
+ mvEthWinEnable(port, winNum, MV_TRUE);
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvETHWinGet - Get dma peripheral target address window.
+*
+* DESCRIPTION:
+* Get ETH peripheral target address window.
+*
+* INPUT:
+* winNum - ETH to target address decode window number.
+*
+* OUTPUT:
+* pAddrDecWin - ETH target window data structure.
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvEthWinGet(int port, MV_U32 winNum, MV_ETH_DEC_WIN *pAddrDecWin)
+{
+ MV_DEC_REGS decRegs;
+ MV_TARGET_ATTRIB targetAttrib;
+
+ /* Parameter checking */
+ if (winNum >= ETH_MAX_DECODE_WIN)
+ {
+ mvOsPrintf("mvEthWinGet: ERR. Invalid winNum %d\n", winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ decRegs.baseReg = MV_REG_READ(ETH_WIN_BASE_REG(port, winNum));
+ decRegs.sizeReg = MV_REG_READ(ETH_WIN_SIZE_REG(port, winNum));
+
+ if (MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+ {
+ mvOsPrintf("mvAhbToMbusWinGet: mvCtrlRegToAddrDec Failed \n");
+ return MV_ERROR;
+ }
+
+ /* attrib and targetId */
+ targetAttrib.attrib =
+ (decRegs.baseReg & ETH_WIN_ATTR_MASK) >> ETH_WIN_ATTR_OFFS;
+ targetAttrib.targetId =
+ (decRegs.baseReg & ETH_WIN_TARGET_MASK) >> ETH_WIN_TARGET_OFFS;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ /* Check if window is enabled */
+ if (~(MV_REG_READ(ETH_BASE_ADDR_ENABLE_REG(port))) & (1 << winNum) )
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthWinEnable - Enable/disable a ETH to target address window
+*
+* DESCRIPTION:
+* This function enable/disable a ETH to target address window.
+* According to parameter 'enable' the routine will enable the
+* window, thus enabling ETH accesses (before enabling the window it is
+* tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+* winNum - ETH to target address decode window number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_ERROR if decode window number was wrong or enabled window overlapps.
+*
+*******************************************************************************/
+MV_STATUS mvEthWinEnable(int port, MV_U32 winNum,MV_BOOL enable)
+{
+ MV_ETH_DEC_WIN addrDecWin;
+
+ /* Parameter checking */
+ if (winNum >= ETH_MAX_DECODE_WIN)
+ {
+ mvOsPrintf("mvEthTargetWinEnable:ERR. Invalid winNum%d\n",winNum);
+ return MV_ERROR;
+ }
+
+ if (enable == MV_TRUE)
+ { /* First check for overlap with other enabled windows */
+ /* Get current window */
+ if (MV_OK != mvEthWinGet(port, winNum, &addrDecWin))
+ {
+ mvOsPrintf("mvEthTargetWinEnable:ERR. targetWinGet fail\n");
+ return MV_ERROR;
+ }
+ /* Check for overlapping */
+ if (MV_FALSE == ethWinOverlapDetect(port, winNum, &(addrDecWin.addrWin)))
+ {
+ /* No Overlap. Enable address decode target window */
+ MV_REG_BIT_RESET(ETH_BASE_ADDR_ENABLE_REG(port), (1 << winNum));
+ }
+ else
+ { /* Overlap detected */
+ mvOsPrintf("mvEthTargetWinEnable:ERR. Overlap detected\n");
+ return MV_ERROR;
+ }
+ }
+ else
+ { /* Disable address decode target window */
+ MV_REG_BIT_SET(ETH_BASE_ADDR_ENABLE_REG(port), (1 << winNum));
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthWinTargetGet - Get Window number associated with target
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+* window number
+*
+*******************************************************************************/
+MV_U32 mvEthWinTargetGet(int port, MV_TARGET target)
+{
+ MV_ETH_DEC_WIN decWin;
+ MV_U32 winNum;
+
+ /* Check parameters */
+ if (target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetGet: target %d is Illigal\n", target);
+ return 0xffffffff;
+ }
+
+ for (winNum=0; winNum<ETH_MAX_DECODE_WIN; winNum++)
+ {
+ if (mvEthWinGet(port, winNum,&decWin) != MV_OK)
+ {
+ mvOsPrintf("mvAhbToMbusWinTargetGet: window returned error\n");
+ return 0xffffffff;
+ }
+
+ if (decWin.enable == MV_TRUE)
+ {
+ if (decWin.target == target)
+ {
+ return winNum;
+ }
+ }
+ }
+ return 0xFFFFFFFF;
+}
+
+/*******************************************************************************
+* mvEthProtWinSet - Set access protection of Ethernet to target window.
+*
+* DESCRIPTION:
+* Each Ethernet port can be configured with access attributes for each
+* of the Ethenret to target windows (address decode windows). This
+* function sets access attributes to a given window for the given channel.
+*
+* INPUTS:
+* ethPort - ETH channel number. See MV_ETH_CHANNEL enumerator.
+* winNum - IETH to target address decode window number.
+* access - IETH access rights. See MV_ACCESS_RIGHTS enumerator.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR in case window number is invalid or access right reserved.
+*
+*******************************************************************************/
+MV_STATUS mvEthProtWinSet(MV_U32 portNo, MV_U32 winNum, MV_ACCESS_RIGHTS access)
+{
+ MV_U32 protReg;
+
+ /* Parameter checking */
+ if(portNo >= mvCtrlEthMaxPortGet())
+ {
+ mvOsPrintf("mvEthProtWinSet:ERR. Invalid port number %d\n", portNo);
+ return MV_ERROR;
+ }
+
+ if (winNum >= ETH_MAX_DECODE_WIN)
+ {
+ mvOsPrintf("mvEthProtWinSet:ERR. Invalid winNum%d\n",winNum);
+ return MV_ERROR;
+ }
+
+ if((access == ACC_RESERVED) || (access >= MAX_ACC_RIGHTS))
+ {
+ mvOsPrintf("mvEthProtWinSet:ERR. Inv access param %d\n", access);
+ return MV_ERROR;
+ }
+ /* Read current protection register */
+ protReg = MV_REG_READ(ETH_ACCESS_PROTECT_REG(portNo));
+
+ /* Clear protection window field */
+ protReg &= ~(ETH_PROT_WIN_MASK(winNum));
+
+ /* Set new protection field value */
+ protReg |= (access << (ETH_PROT_WIN_OFFS(winNum)));
+
+ /* Write protection register back */
+ MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(portNo), protReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* ethWinOverlapDetect - Detect ETH address windows overlapping
+*
+* DESCRIPTION:
+* An unpredicted behaviur is expected in case ETH address decode
+* windows overlapps.
+* This function detects ETH address decode windows overlapping of a
+* specified window. The function does not check the window itself for
+* overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* winNum - address decode window number.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+* from registers.
+*
+*******************************************************************************/
+static MV_STATUS ethWinOverlapDetect(int port, MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 baseAddrEnableReg;
+ MV_U32 winNumIndex;
+ MV_ETH_DEC_WIN addrDecWin;
+
+ /* Read base address enable register. Do not check disabled windows */
+ baseAddrEnableReg = MV_REG_READ(ETH_BASE_ADDR_ENABLE_REG(port));
+
+ for (winNumIndex=0; winNumIndex<ETH_MAX_DECODE_WIN; winNumIndex++)
+ {
+ /* Do not check window itself */
+ if (winNumIndex == winNum)
+ {
+ continue;
+ }
+
+ /* Do not check disabled windows */
+ if (baseAddrEnableReg & (1 << winNumIndex))
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvEthWinGet(port, winNumIndex, &addrDecWin))
+ {
+ mvOsPrintf("ethWinOverlapDetect: ERR. TargetWinGet failed\n");
+ return MV_ERROR;
+ }
+/*
+ mvOsPrintf("ethWinOverlapDetect:\n
+ winNumIndex =%d baseHigh =0x%x baseLow=0x%x size=0x%x enable=0x%x\n",
+ winNumIndex,
+ addrDecWin.addrWin.baseHigh,
+ addrDecWin.addrWin.baseLow,
+ addrDecWin.addrWin.size,
+ addrDecWin.enable);
+*/
+ if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+ {
+ return MV_TRUE;
+ }
+ }
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvEthAddrDecShow - Print the Etherent address decode map.
+*
+* DESCRIPTION:
+* This function print the Etherent address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+void mvEthPortAddrDecShow(int port)
+{
+ MV_ETH_DEC_WIN win;
+ int i;
+
+ mvOsOutput( "\n" );
+ mvOsOutput( "ETH %d:\n", port );
+ mvOsOutput( "----\n" );
+
+ for( i = 0; i < ETH_MAX_DECODE_WIN; i++ )
+ {
+ memset( &win, 0, sizeof(ETH_MAX_DECODE_WIN) );
+
+ mvOsOutput( "win%d - ", i );
+
+ if( mvEthWinGet(port, i, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ mvOsOutput( "%s base %08x, ",
+ mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+ mvOsOutput( "...." );
+ mvSizePrint( win.addrWin.size );
+
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+ return;
+}
+
+void mvEthAddrDecShow(void)
+{
+ int port;
+
+ for(port=0; port<mvCtrlEthMaxPortGet(); port++)
+ {
+ if (MV_FALSE == mvCtrlPwrClckGet(ETH_GIG_UNIT_ID, port)) continue;
+
+ mvEthPortAddrDecShow(port);
+ }
+}
+
+
+void mvEthInit(void)
+{
+ MV_U32 port;
+
+ /* Power down all existing ports */
+ for(port=0; port<mvCtrlEthMaxPortGet(); port++)
+ {
+ if (MV_FALSE == mvCtrlPwrClckGet(ETH_GIG_UNIT_ID, port))
+ continue;
+
+ mvEthPortPowerUp(port);
+ mvEthWinInit(port);
+ }
+ mvEthHalInit();
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h
new file mode 100644
index 000000000..615af512d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h
@@ -0,0 +1,113 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSysGbeh
+#define __INCmvSysGbeh
+
+#include "mvCommon.h"
+#include "eth/mvEth.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+#define ETH_WIN_BASE_REG(port, win) (MV_ETH_REG_BASE(port) + 0x200 + ((win)<<3))
+#define ETH_WIN_SIZE_REG(port, win) (MV_ETH_REG_BASE(port) + 0x204 + ((win)<<3))
+#define ETH_WIN_REMAP_REG(port, win) (MV_ETH_REG_BASE(port) + 0x280 + ((win)<<2))
+#define ETH_BASE_ADDR_ENABLE_REG(port) (MV_ETH_REG_BASE(port) + 0x290)
+#define ETH_ACCESS_PROTECT_REG(port) (MV_ETH_REG_BASE(port) + 0x294)
+
+/**** Address decode parameters ****/
+
+/* Ethernet Base Address Register bits */
+#define ETH_MAX_DECODE_WIN 6
+#define ETH_MAX_HIGH_ADDR_REMAP_WIN 4
+
+/* Ethernet Port Access Protect (EPAP) register */
+
+/* The target associated with this window*/
+#define ETH_WIN_TARGET_OFFS 0
+#define ETH_WIN_TARGET_MASK (0xf << ETH_WIN_TARGET_OFFS)
+/* The target attributes Associated with window */
+#define ETH_WIN_ATTR_OFFS 8
+#define ETH_WIN_ATTR_MASK (0xff << ETH_WIN_ATTR_OFFS)
+
+/* Ethernet Port Access Protect Register (EPAPR) */
+#define ETH_PROT_NO_ACCESS NO_ACCESS_ALLOWED
+#define ETH_PROT_READ_ONLY READ_ONLY
+#define ETH_PROT_FULL_ACCESS FULL_ACCESS
+#define ETH_PROT_WIN_OFFS(winNum) (2 * (winNum))
+#define ETH_PROT_WIN_MASK(winNum) (0x3 << ETH_PROT_WIN_OFFS(winNum))
+
+MV_STATUS mvEthWinInit (int port);
+MV_STATUS mvEthWinEnable(int port, MV_U32 winNum, MV_BOOL enable);
+MV_U32 mvEthWinTargetGet(int port, MV_TARGET target);
+MV_STATUS mvEthProtWinSet(MV_U32 portNo, MV_U32 winNum, MV_ACCESS_RIGHTS
+ access);
+
+void mvEthPortAddrDecShow(int port);
+
+MV_VOID mvEthAddrDecShow(MV_VOID);
+
+void mvEthInit(void);
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.c
new file mode 100644
index 000000000..b0cb466ab
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.c
@@ -0,0 +1,1697 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "ctrlEnv/sys/mvSysPex.h"
+
+/* this structure describes the mapping between a Pex Window and a CPU target*/
+typedef struct _pexWinToTarget
+{
+ MV_TARGET target;
+ MV_BOOL enable;
+
+}PEX_WIN_TO_TARGET;
+
+/* this array is a priority array that define How Pex windows should be
+configured , We have only 6 Pex Windows that can be configured , but we
+have maximum of 9 CPU target windows ! the following array is a priority
+array where the lowest index has the highest priotiy and the highest
+index has the lowest priority of being cnfigured */
+
+MV_U32 pexDevBarPrioTable[] =
+{
+#if defined(MV_INCLUDE_DEVICE_CS0)
+ DEVICE_CS0,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+ DEVICE_CS1,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS2)
+ DEVICE_CS2,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS3)
+ DEVICE_CS3,
+#endif
+/*
+#if defined(MV_INCLUDE_DEVICE_CS4)
+ DEVICE_CS4,
+#endif
+*/
+ TBL_TERM
+};
+
+
+/* PEX Wins registers offsets are inconsecutive. This struct describes WIN */
+/* register offsets and its function where its is located. */
+/* Also, PEX address remap registers offsets are inconsecutive. This struct */
+/* describes address remap register offsets */
+typedef struct _pexWinRegInfo
+{
+ MV_U32 baseLowRegOffs;
+ MV_U32 baseHighRegOffs;
+ MV_U32 sizeRegOffs;
+ MV_U32 remapLowRegOffs;
+ MV_U32 remapHighRegOffs;
+
+}PEX_WIN_REG_INFO;
+
+static MV_STATUS pexWinOverlapDetect(MV_U32 pexIf, MV_U32 winNum,
+ MV_ADDR_WIN *pAddrWin);
+static MV_STATUS pexWinRegInfoGet(MV_U32 pexIf, MV_U32 winNum,
+ PEX_WIN_REG_INFO *pWinRegInfo);
+
+static MV_STATUS pexBarIsValid(MV_U32 baseLow, MV_U32 size);
+
+static MV_BOOL pexIsWinWithinBar(MV_U32 pexIf,MV_ADDR_WIN *pAddrWin);
+static MV_BOOL pexBarOverlapDetect(MV_U32 pexIf,MV_U32 barNum,
+ MV_ADDR_WIN *pAddrWin);
+const MV_8* pexBarNameGet( MV_U32 bar );
+
+
+/*******************************************************************************
+* mvPexInit - Initialize PEX interfaces
+*
+* DESCRIPTION:
+*
+* This function is responsible of intialization of the Pex Interface , It
+* configure the Pex Bars and Windows in the following manner:
+*
+* Assumptions :
+* Bar0 is always internal registers bar
+* Bar1 is always the DRAM bar
+* Bar2 is always the Device bar
+*
+* 1) Sets the Internal registers bar base by obtaining the base from
+* the CPU Interface
+* 2) Sets the DRAM bar base and size by getting the base and size from
+* the CPU Interface when the size is the sum of all enabled DRAM
+* chip selects and the base is the base of CS0 .
+* 3) Sets the Device bar base and size by getting these values from the
+* CPU Interface when the base is the base of the lowest base of the
+* Device chip selects, and the
+*
+*
+* INPUT:
+*
+* pexIf - PEX interface number.
+*
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK if function success otherwise MV_ERROR or MV_BAD_PARAM
+*
+*******************************************************************************/
+MV_STATUS mvPexInit(MV_U32 pexIf, MV_PEX_TYPE pexType)
+{
+ MV_U32 bar;
+ MV_U32 winNum;
+ MV_PEX_BAR pexBar;
+ MV_PEX_DEC_WIN pexWin;
+ MV_CPU_DEC_WIN addrDecWin;
+ MV_TARGET target;
+ MV_U32 pexCurrWin=0;
+ MV_U32 status;
+ /* default and exapntion rom
+ are always configured */
+
+#ifndef MV_DISABLE_PEX_DEVICE_BAR
+ MV_U32 winIndex;
+ MV_U32 maxBase=0, sizeOfMaxBase=0;
+ MV_U32 pexStartWindow;
+#endif
+
+ /* Parameter checking */
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexInit: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+
+ /* Enabled CPU access to PCI-Express */
+ mvCpuIfEnablePex(pexIf, pexType);
+
+ /* Start with bars */
+ /* First disable all PEX bars*/
+ for (bar = 0; bar < PEX_MAX_BARS; bar++)
+ {
+ if (PEX_INTER_REGS_BAR != bar)
+ {
+ if (MV_OK != mvPexBarEnable(pexIf, bar, MV_FALSE))
+ {
+ mvOsPrintf("mvPexInit:mvPexBarEnable bar =%d failed \n",bar);
+ return MV_ERROR;
+ }
+
+ }
+
+ }
+
+ /* and disable all PEX target windows */
+ for (winNum = 0; winNum < PEX_MAX_TARGET_WIN - 2; winNum++)
+ {
+ if (MV_OK != mvPexTargetWinEnable(pexIf, winNum, MV_FALSE))
+ {
+ mvOsPrintf("mvPexInit:mvPexTargetWinEnable winNum =%d failed \n",
+ winNum);
+ return MV_ERROR;
+
+ }
+ }
+
+ /* Now, go through all bars*/
+
+
+
+/******************************************************************************/
+/* Internal registers bar */
+/******************************************************************************/
+ bar = PEX_INTER_REGS_BAR;
+
+ /* we only open the bar , no need to open windows for this bar */
+
+ /* first get the CS attribute from the CPU Interface */
+ if (MV_OK !=mvCpuIfTargetWinGet(INTER_REGS,&addrDecWin))
+ {
+ mvOsPrintf("mvPexInit: ERR. mvCpuIfTargetWinGet failed target =%d\n",INTER_REGS);
+ return MV_ERROR;
+ }
+
+ pexBar.addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+ pexBar.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+ pexBar.addrWin.size = addrDecWin.addrWin.size;
+ pexBar.enable = MV_TRUE;
+
+ if (MV_OK != mvPexBarSet(pexIf, bar, &pexBar))
+ {
+ mvOsPrintf("mvPexInit: ERR. mvPexBarSet %d failed\n", bar);
+ return MV_ERROR;
+ }
+
+/******************************************************************************/
+/* DRAM bar */
+/******************************************************************************/
+
+ bar = PEX_DRAM_BAR;
+
+ pexBar.addrWin.size = 0;
+
+ for (target = SDRAM_CS0;target < MV_DRAM_MAX_CS; target++ )
+ {
+
+ status = mvCpuIfTargetWinGet(target,&addrDecWin);
+
+ if((MV_NO_SUCH == status)&&(target != SDRAM_CS0))
+ {
+ continue;
+ }
+
+ /* first get attributes from CPU If */
+ if (MV_OK != status)
+ {
+ mvOsPrintf("mvPexInit: ERR. mvCpuIfTargetWinGet failed target =%d\n",target);
+ return MV_ERROR;
+ }
+ if (addrDecWin.enable == MV_TRUE)
+ {
+ /* the base is the base of DRAM CS0 always */
+ if (SDRAM_CS0 == target )
+ {
+ pexBar.addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+ pexBar.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+
+ }
+
+ /* increment the bar size to be the sum of the size of all
+ DRAM chips selecs */
+ pexBar.addrWin.size += addrDecWin.addrWin.size;
+
+ /* set a Pex window for this target !
+ DRAM CS always will have a Pex Window , and is not a
+ part of the priority table */
+ pexWin.addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+ pexWin.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+ pexWin.addrWin.size = addrDecWin.addrWin.size;
+
+ /* we disable the windows at first because we are not
+ sure that it is witihin bar boundries */
+ pexWin.enable =MV_FALSE;
+ pexWin.target = target;
+ pexWin.targetBar = bar;
+
+ if (MV_OK != mvPexTargetWinSet(pexIf,pexCurrWin++,&pexWin))
+ {
+ mvOsPrintf("mvPexInit: ERR. mvPexTargetWinSet failed\n");
+ return MV_ERROR;
+ }
+ }
+ }
+
+ /* check if the size of the bar is illeggal */
+ if (-1 == ctrlSizeToReg(pexBar.addrWin.size, PXBCR_BAR_SIZE_ALIGNMENT))
+ {
+ /* try to get a good size */
+ pexBar.addrWin.size = ctrlSizeRegRoundUp(pexBar.addrWin.size,
+ PXBCR_BAR_SIZE_ALIGNMENT);
+ }
+
+ /* check if the size and base are valid */
+ if (MV_TRUE == pexBarOverlapDetect(pexIf,bar,&pexBar.addrWin))
+ {
+ mvOsPrintf("mvPexInit:Warning :Bar %d size is illigal\n",bar);
+ mvOsPrintf("it will be disabled\n");
+ mvOsPrintf("please check Pex and CPU windows configuration\n");
+ }
+ else
+ {
+ pexBar.enable = MV_TRUE;
+
+ /* configure the bar */
+ if (MV_OK != mvPexBarSet(pexIf, bar, &pexBar))
+ {
+ mvOsPrintf("mvPexInit: ERR. mvPexBarSet %d failed\n", bar);
+ return MV_ERROR;
+ }
+
+ /* after the bar was configured then we enable the Pex windows*/
+ for (winNum = 0;winNum < pexCurrWin ;winNum++)
+ {
+ if (MV_OK != mvPexTargetWinEnable(pexIf, winNum, MV_TRUE))
+ {
+ mvOsPrintf("mvPexInit: Can't enable window =%d\n",winNum);
+ return MV_ERROR;
+ }
+
+ }
+ }
+
+/******************************************************************************/
+/* DEVICE bar */
+/******************************************************************************/
+
+/* Open the Device BAR for non linux only */
+#ifndef MV_DISABLE_PEX_DEVICE_BAR
+
+ /* then device bar*/
+ bar = PEX_DEVICE_BAR;
+
+ /* save the starting window */
+ pexStartWindow = pexCurrWin;
+ pexBar.addrWin.size = 0;
+ pexBar.addrWin.baseLow = 0xffffffff;
+ pexBar.addrWin.baseHigh = 0;
+ maxBase = 0;
+
+ for (target = DEV_TO_TARGET(START_DEV_CS);target < DEV_TO_TARGET(MV_DEV_MAX_CS); target++ )
+ {
+ status = mvCpuIfTargetWinGet(target,&addrDecWin);
+
+ if (MV_NO_SUCH == status)
+ {
+ continue;
+ }
+
+ if (MV_OK != status)
+ {
+ mvOsPrintf("mvPexInit: ERR. mvCpuIfTargetWinGet failed target =%d\n",target);
+ return MV_ERROR;
+ }
+
+ if (addrDecWin.enable == MV_TRUE)
+ {
+ /* get the minimum base */
+ if (addrDecWin.addrWin.baseLow < pexBar.addrWin.baseLow)
+ {
+ pexBar.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+ }
+
+ /* get the maximum base */
+ if (addrDecWin.addrWin.baseLow > maxBase)
+ {
+ maxBase = addrDecWin.addrWin.baseLow;
+ sizeOfMaxBase = addrDecWin.addrWin.size;
+ }
+
+ /* search in the priority table for this target */
+ for (winIndex = 0; pexDevBarPrioTable[winIndex] != TBL_TERM;
+ winIndex++)
+ {
+ if (pexDevBarPrioTable[winIndex] != target)
+ {
+ continue;
+ }
+ else if (pexDevBarPrioTable[winIndex] == target)
+ {
+ /*found it */
+
+ /* if the index of this target in the prio table is valid
+ then we set the Pex window for this target, a valid index is
+ an index that is lower than the number of the windows that
+ was not configured yet */
+
+ /* we subtract 2 always because the default and expantion
+ rom windows are always configured */
+ if ( pexCurrWin < PEX_MAX_TARGET_WIN - 2)
+ {
+ /* set a Pex window for this target ! */
+ pexWin.addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+ pexWin.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+ pexWin.addrWin.size = addrDecWin.addrWin.size;
+
+ /* we disable the windows at first because we are not
+ sure that it is witihin bar boundries */
+ pexWin.enable = MV_FALSE;
+ pexWin.target = target;
+ pexWin.targetBar = bar;
+
+ if (MV_OK != mvPexTargetWinSet(pexIf,pexCurrWin++,
+ &pexWin))
+ {
+ mvOsPrintf("mvPexInit: ERR. Window Set failed\n");
+ return MV_ERROR;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ pexBar.addrWin.size = maxBase - pexBar.addrWin.baseLow + sizeOfMaxBase;
+ pexBar.enable = MV_TRUE;
+
+ /* check if the size of the bar is illegal */
+ if (-1 == ctrlSizeToReg(pexBar.addrWin.size, PXBCR_BAR_SIZE_ALIGNMENT))
+ {
+ /* try to get a good size */
+ pexBar.addrWin.size = ctrlSizeRegRoundUp(pexBar.addrWin.size,
+ PXBCR_BAR_SIZE_ALIGNMENT);
+ }
+
+ /* check if the size and base are valid */
+ if (MV_TRUE == pexBarOverlapDetect(pexIf,bar,&pexBar.addrWin))
+ {
+ mvOsPrintf("mvPexInit:Warning :Bar %d size is illigal\n",bar);
+ mvOsPrintf("it will be disabled\n");
+ mvOsPrintf("please check Pex and CPU windows configuration\n");
+ }
+ else
+ {
+ if (MV_OK != mvPexBarSet(pexIf, bar, &pexBar))
+ {
+ mvOsPrintf("mvPexInit: ERR. mvPexBarSet %d failed\n", bar);
+ return MV_ERROR;
+ }
+
+ /* now enable the windows */
+ for (winNum = pexStartWindow; winNum < pexCurrWin ; winNum++)
+ {
+ if (MV_OK != mvPexTargetWinEnable(pexIf, winNum, MV_TRUE))
+ {
+ mvOsPrintf("mvPexInit:mvPexTargetWinEnable winNum =%d failed \n",
+ winNum);
+ return MV_ERROR;
+ }
+ }
+ }
+
+#endif
+
+ return mvPexHalInit(pexIf, pexType);
+
+}
+
+/*******************************************************************************
+* mvPexTargetWinSet - Set PEX to peripheral target address window BAR
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_OK if PEX BAR target window was set correctly,
+* MV_BAD_PARAM on bad params
+* MV_ERROR otherwise
+* (e.g. address window overlapps with other active PEX target window).
+*
+*******************************************************************************/
+MV_STATUS mvPexTargetWinSet(MV_U32 pexIf, MV_U32 winNum,
+ MV_PEX_DEC_WIN *pAddrDecWin)
+{
+
+ MV_DEC_REGS decRegs;
+ PEX_WIN_REG_INFO winRegInfo;
+ MV_TARGET_ATTRIB targetAttribs;
+
+ /* Parameter checking */
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexTargetWinSet: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+
+ if (winNum >= PEX_MAX_TARGET_WIN)
+ {
+ mvOsPrintf("mvPexTargetWinSet: ERR. Invalid PEX winNum %d\n", winNum);
+ return MV_BAD_PARAM;
+
+ }
+
+ /* get the pex Window registers offsets */
+ pexWinRegInfoGet(pexIf,winNum,&winRegInfo);
+
+
+ if (MV_TRUE == pAddrDecWin->enable)
+ {
+
+ /* 2) Check if the requested window overlaps with current windows */
+ if (MV_TRUE == pexWinOverlapDetect(pexIf,winNum, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("mvPexTargetWinSet: ERR. Target %d overlap\n", winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* 2) Check if the requested window overlaps with current windows */
+ if (MV_FALSE == pexIsWinWithinBar(pexIf,&pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("mvPexTargetWinSet: Win %d should be in bar boundries\n",
+ winNum);
+ return MV_BAD_PARAM;
+ }
+
+ }
+
+
+
+ /* read base register*/
+
+ if (winRegInfo.baseLowRegOffs)
+ {
+ decRegs.baseReg = MV_REG_READ(winRegInfo.baseLowRegOffs);
+ }
+ else
+ {
+ decRegs.baseReg = 0;
+ }
+
+ if (winRegInfo.sizeRegOffs)
+ {
+ decRegs.sizeReg = MV_REG_READ(winRegInfo.sizeRegOffs);
+ }
+ else
+ {
+ decRegs.sizeReg =0;
+ }
+
+ if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+ {
+ mvOsPrintf("mvPexTargetWinSet:mvCtrlAddrDecToReg Failed\n");
+ return MV_ERROR;
+ }
+
+ /* enable\Disable */
+ if (MV_TRUE == pAddrDecWin->enable)
+ {
+ decRegs.sizeReg |= PXWCR_WIN_EN;
+ }
+ else
+ {
+ decRegs.sizeReg &= ~PXWCR_WIN_EN;
+ }
+
+
+ /* clear bit location */
+ decRegs.sizeReg &= ~PXWCR_WIN_BAR_MAP_MASK;
+
+ /* set bar Mapping */
+ if (pAddrDecWin->targetBar == 1)
+ {
+ decRegs.sizeReg |= PXWCR_WIN_BAR_MAP_BAR1;
+ }
+ else if (pAddrDecWin->targetBar == 2)
+ {
+ decRegs.sizeReg |= PXWCR_WIN_BAR_MAP_BAR2;
+ }
+
+ mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+ /* set attributes */
+ decRegs.sizeReg &= ~PXWCR_ATTRIB_MASK;
+ decRegs.sizeReg |= targetAttribs.attrib << PXWCR_ATTRIB_OFFS;
+ /* set target ID */
+ decRegs.sizeReg &= ~PXWCR_TARGET_MASK;
+ decRegs.sizeReg |= targetAttribs.targetId << PXWCR_TARGET_OFFS;
+
+
+ /* 3) Write to address decode Base Address Register */
+
+ if (winRegInfo.baseLowRegOffs)
+ {
+ MV_REG_WRITE(winRegInfo.baseLowRegOffs, decRegs.baseReg);
+ }
+
+ /* write size reg */
+ if (winRegInfo.sizeRegOffs)
+ {
+ if ((MV_PEX_WIN_DEFAULT == winNum)||
+ (MV_PEX_WIN_EXP_ROM == winNum))
+ {
+ /* clear size because there is no size field*/
+ decRegs.sizeReg &= ~PXWCR_SIZE_MASK;
+
+ /* clear enable because there is no enable field*/
+ decRegs.sizeReg &= ~PXWCR_WIN_EN;
+
+ }
+
+ MV_REG_WRITE(winRegInfo.sizeRegOffs, decRegs.sizeReg);
+ }
+
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvPexTargetWinGet - Get PEX to peripheral target address window
+*
+* DESCRIPTION:
+* Get the PEX to peripheral target address window BAR.
+*
+* INPUT:
+* pexIf - PEX interface number.
+* bar - BAR to be accessed by slave.
+*
+* OUTPUT:
+* pAddrBarWin - PEX target window information data structure.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexTargetWinGet(MV_U32 pexIf, MV_U32 winNum,
+ MV_PEX_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttrib;
+ MV_DEC_REGS decRegs;
+
+ PEX_WIN_REG_INFO winRegInfo;
+
+ /* Parameter checking */
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexTargetWinGet: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+
+ if (winNum >= PEX_MAX_TARGET_WIN)
+ {
+ mvOsPrintf("mvPexTargetWinGet: ERR. Invalid PEX winNum %d\n", winNum);
+ return MV_BAD_PARAM;
+
+ }
+
+ /* get the pex Window registers offsets */
+ pexWinRegInfoGet(pexIf,winNum,&winRegInfo);
+
+ /* read base register*/
+ if (winRegInfo.baseLowRegOffs)
+ {
+ decRegs.baseReg = MV_REG_READ(winRegInfo.baseLowRegOffs);
+ }
+ else
+ {
+ decRegs.baseReg = 0;
+ }
+
+ /* read size reg */
+ if (winRegInfo.sizeRegOffs)
+ {
+ decRegs.sizeReg = MV_REG_READ(winRegInfo.sizeRegOffs);
+ }
+ else
+ {
+ decRegs.sizeReg =0;
+ }
+
+ if (MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+ {
+ mvOsPrintf("mvPexTargetWinGet: mvCtrlRegToAddrDec Failed \n");
+ return MV_ERROR;
+
+ }
+
+ if (decRegs.sizeReg & PXWCR_WIN_EN)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+
+ }
+
+
+ #if 0
+ if (-1 == pAddrDecWin->addrWin.size)
+ {
+ return MV_ERROR;
+ }
+ #endif
+
+
+ /* get target bar */
+ if ((decRegs.sizeReg & PXWCR_WIN_BAR_MAP_MASK) == PXWCR_WIN_BAR_MAP_BAR1 )
+ {
+ pAddrDecWin->targetBar = 1;
+ }
+ else if ((decRegs.sizeReg & PXWCR_WIN_BAR_MAP_MASK) ==
+ PXWCR_WIN_BAR_MAP_BAR2 )
+ {
+ pAddrDecWin->targetBar = 2;
+ }
+
+ /* attrib and targetId */
+ pAddrDecWin->attrib = (decRegs.sizeReg & PXWCR_ATTRIB_MASK) >>
+ PXWCR_ATTRIB_OFFS;
+ pAddrDecWin->targetId = (decRegs.sizeReg & PXWCR_TARGET_MASK) >>
+ PXWCR_TARGET_OFFS;
+
+ targetAttrib.attrib = pAddrDecWin->attrib;
+ targetAttrib.targetId = pAddrDecWin->targetId;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ return MV_OK;
+
+}
+
+
+/*******************************************************************************
+* mvPexTargetWinEnable - Enable/disable a PEX BAR window
+*
+* DESCRIPTION:
+* This function enable/disable a PEX BAR window.
+* if parameter 'enable' == MV_TRUE the routine will enable the
+* window, thus enabling PEX accesses for that BAR (before enabling the
+* window it is tested for overlapping). Otherwise, the window will
+* be disabled.
+*
+* INPUT:
+* pexIf - PEX interface number.
+* bar - BAR to be accessed by slave.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexTargetWinEnable(MV_U32 pexIf,MV_U32 winNum, MV_BOOL enable)
+{
+ PEX_WIN_REG_INFO winRegInfo;
+ MV_PEX_DEC_WIN addrDecWin;
+
+ /* Parameter checking */
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexTargetWinEnable: ERR. Invalid PEX If %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+
+ if (winNum >= PEX_MAX_TARGET_WIN)
+ {
+ mvOsPrintf("mvPexTargetWinEnable ERR. Invalid PEX winNum %d\n", winNum);
+ return MV_BAD_PARAM;
+
+ }
+
+
+ /* get the pex Window registers offsets */
+ pexWinRegInfoGet(pexIf,winNum,&winRegInfo);
+
+
+ /* if the address windows is disabled , we only disable the appropriare
+ pex window and ignore other settings */
+
+ if (MV_FALSE == enable)
+ {
+
+ /* this is not relevant to default and expantion rom
+ windows */
+ if (winRegInfo.sizeRegOffs)
+ {
+ if ((MV_PEX_WIN_DEFAULT != winNum)&&
+ (MV_PEX_WIN_EXP_ROM != winNum))
+ {
+ MV_REG_BIT_RESET(winRegInfo.sizeRegOffs, PXWCR_WIN_EN);
+ }
+ }
+
+ }
+ else
+ {
+ if (MV_OK != mvPexTargetWinGet(pexIf,winNum, &addrDecWin))
+ {
+ mvOsPrintf("mvPexTargetWinEnable: mvPexTargetWinGet Failed\n");
+ return MV_ERROR;
+ }
+
+ /* Check if the requested window overlaps with current windows */
+ if (MV_TRUE == pexWinOverlapDetect(pexIf,winNum, &addrDecWin.addrWin))
+ {
+ mvOsPrintf("mvPexTargetWinEnable: ERR. Target %d overlap\n", winNum);
+ return MV_BAD_PARAM;
+ }
+
+ if (MV_FALSE == pexIsWinWithinBar(pexIf,&addrDecWin.addrWin))
+ {
+ mvOsPrintf("mvPexTargetWinEnable: Win %d should be in bar boundries\n",
+ winNum);
+ return MV_BAD_PARAM;
+ }
+
+
+ /* this is not relevant to default and expantion rom
+ windows */
+ if (winRegInfo.sizeRegOffs)
+ {
+ if ((MV_PEX_WIN_DEFAULT != winNum)&&
+ (MV_PEX_WIN_EXP_ROM != winNum))
+ {
+ MV_REG_BIT_SET(winRegInfo.sizeRegOffs, PXWCR_WIN_EN);
+ }
+ }
+
+
+ }
+
+ return MV_OK;
+
+}
+
+
+
+/*******************************************************************************
+* mvPexTargetWinRemap - Set PEX to target address window remap.
+*
+* DESCRIPTION:
+* The PEX interface supports remap of the BAR original address window.
+* For each BAR it is possible to define a remap address. For example
+* an address 0x12345678 that hits BAR 0x10 (SDRAM CS[0]) will be modified
+* according to remap register but will also be targeted to the
+* SDRAM CS[0].
+*
+* INPUT:
+* pexIf - PEX interface number.
+* bar - Peripheral target enumerator accessed by slave.
+* pAddrWin - Address window to be checked.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexTargetWinRemap(MV_U32 pexIf, MV_U32 winNum,
+ MV_PEX_REMAP_WIN *pAddrWin)
+{
+
+ PEX_WIN_REG_INFO winRegInfo;
+
+ /* Parameter checking */
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexTargetWinRemap: ERR. Invalid PEX interface num %d\n",
+ pexIf);
+ return MV_BAD_PARAM;
+ }
+ if (MV_PEX_WIN_DEFAULT == winNum)
+ {
+ mvOsPrintf("mvPexTargetWinRemap: ERR. Invalid PEX win num %d\n",
+ winNum);
+ return MV_BAD_PARAM;
+
+ }
+
+ if (MV_IS_NOT_ALIGN(pAddrWin->addrWin.baseLow, PXWRR_REMAP_ALIGNMENT))
+ {
+ mvOsPrintf("mvPexTargetWinRemap: Error remap PEX interface %d win %d."\
+ "\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ pexIf,
+ winNum,
+ pAddrWin->addrWin.baseLow,
+ pAddrWin->addrWin.size);
+
+ return MV_ERROR;
+ }
+
+ pexWinRegInfoGet(pexIf, winNum, &winRegInfo);
+
+ /* Set remap low register value */
+ MV_REG_WRITE(winRegInfo.remapLowRegOffs, pAddrWin->addrWin.baseLow);
+
+ /* Skip base high settings if the BAR has only base low (32-bit) */
+ if (0 != winRegInfo.remapHighRegOffs)
+ {
+ MV_REG_WRITE(winRegInfo.remapHighRegOffs, pAddrWin->addrWin.baseHigh);
+ }
+
+
+ if (pAddrWin->enable == MV_TRUE)
+ {
+ MV_REG_BIT_SET(winRegInfo.remapLowRegOffs,PXWRR_REMAP_EN);
+ }
+ else
+ {
+ MV_REG_BIT_RESET(winRegInfo.remapLowRegOffs,PXWRR_REMAP_EN);
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvPexTargetWinRemapEnable -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+
+MV_STATUS mvPexTargetWinRemapEnable(MV_U32 pexIf, MV_U32 winNum,
+ MV_BOOL enable)
+{
+ PEX_WIN_REG_INFO winRegInfo;
+
+ /* Parameter checking */
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexTargetWinRemap: ERR. Invalid PEX interface num %d\n",
+ pexIf);
+ return MV_BAD_PARAM;
+ }
+ if (MV_PEX_WIN_DEFAULT == winNum)
+ {
+ mvOsPrintf("mvPexTargetWinRemap: ERR. Invalid PEX win num %d\n",
+ winNum);
+ return MV_BAD_PARAM;
+
+ }
+
+
+ pexWinRegInfoGet(pexIf, winNum, &winRegInfo);
+
+ if (enable == MV_TRUE)
+ {
+ MV_REG_BIT_SET(winRegInfo.remapLowRegOffs,PXWRR_REMAP_EN);
+ }
+ else
+ {
+ MV_REG_BIT_RESET(winRegInfo.remapLowRegOffs,PXWRR_REMAP_EN);
+ }
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvPexBarSet - Set PEX bar address and size
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexBarSet(MV_U32 pexIf,
+ MV_U32 barNum,
+ MV_PEX_BAR *pAddrWin)
+{
+ MV_U32 regBaseLow;
+ MV_U32 regSize,sizeToReg;
+
+
+ /* check parameters */
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexBarSet: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+
+ if(barNum >= PEX_MAX_BARS)
+ {
+ mvOsPrintf("mvPexBarSet: ERR. Invalid bar number %d\n", barNum);
+ return MV_BAD_PARAM;
+ }
+
+
+ if (pAddrWin->addrWin.size == 0)
+ {
+ mvOsPrintf("mvPexBarSet: Size zero is Illigal\n" );
+ return MV_BAD_PARAM;
+ }
+
+
+ /* Check if the window complies with PEX spec */
+ if (MV_TRUE != pexBarIsValid(pAddrWin->addrWin.baseLow,
+ pAddrWin->addrWin.size))
+ {
+ mvOsPrintf("mvPexBarSet: ERR. Target %d window invalid\n", barNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* 2) Check if the requested bar overlaps with current bars */
+ if (MV_TRUE == pexBarOverlapDetect(pexIf,barNum, &pAddrWin->addrWin))
+ {
+ mvOsPrintf("mvPexBarSet: ERR. Target %d overlap\n", barNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* Get size register value according to window size */
+ sizeToReg = ctrlSizeToReg(pAddrWin->addrWin.size, PXBCR_BAR_SIZE_ALIGNMENT);
+
+ /* Read bar size */
+ if (PEX_INTER_REGS_BAR != barNum) /* internal registers have no size */
+ {
+ regSize = MV_REG_READ(PEX_BAR_CTRL_REG(pexIf,barNum));
+
+ /* Size parameter validity check. */
+ if (-1 == sizeToReg)
+ {
+ mvOsPrintf("mvPexBarSet: ERR. Target BAR %d size invalid.\n",barNum);
+ return MV_BAD_PARAM;
+ }
+
+ regSize &= ~PXBCR_BAR_SIZE_MASK;
+ regSize |= (sizeToReg << PXBCR_BAR_SIZE_OFFS) ;
+
+ MV_REG_WRITE(PEX_BAR_CTRL_REG(pexIf,barNum),regSize);
+
+ }
+
+ /* set size */
+
+
+
+ /* Read base address low */
+ regBaseLow = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf,
+ PEX_MV_BAR_BASE(barNum)));
+
+ /* clear current base */
+ if (PEX_INTER_REGS_BAR == barNum)
+ {
+ regBaseLow &= ~PXBIR_BASE_MASK;
+ regBaseLow |= (pAddrWin->addrWin.baseLow & PXBIR_BASE_MASK);
+ }
+ else
+ {
+ regBaseLow &= ~PXBR_BASE_MASK;
+ regBaseLow |= (pAddrWin->addrWin.baseLow & PXBR_BASE_MASK);
+ }
+
+ /* if we had a previous value that contain the bar type (MeM\IO), we want to
+ restore it */
+ regBaseLow |= PEX_BAR_DEFAULT_ATTRIB;
+
+
+
+ /* write base low */
+ MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_MV_BAR_BASE(barNum)),
+ regBaseLow);
+
+ if (pAddrWin->addrWin.baseHigh != 0)
+ {
+ /* Read base address high */
+ MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_MV_BAR_BASE_HIGH(barNum)),
+ pAddrWin->addrWin.baseHigh);
+
+ }
+
+ /* lastly enable the Bar */
+ if (pAddrWin->enable == MV_TRUE)
+ {
+ if (PEX_INTER_REGS_BAR != barNum) /* internal registers
+ are enabled always */
+ {
+ MV_REG_BIT_SET(PEX_BAR_CTRL_REG(pexIf,barNum),PXBCR_BAR_EN);
+ }
+ }
+ else if (MV_FALSE == pAddrWin->enable)
+ {
+ if (PEX_INTER_REGS_BAR != barNum) /* internal registers
+ are enabled always */
+ {
+ MV_REG_BIT_RESET(PEX_BAR_CTRL_REG(pexIf,barNum),PXBCR_BAR_EN);
+ }
+
+ }
+
+
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPexBarGet - Get PEX bar address and size
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+
+MV_STATUS mvPexBarGet(MV_U32 pexIf,
+ MV_U32 barNum,
+ MV_PEX_BAR *pAddrWin)
+{
+ /* check parameters */
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexBarGet: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+
+ if(barNum >= PEX_MAX_BARS)
+ {
+ mvOsPrintf("mvPexBarGet: ERR. Invalid bar number %d\n", barNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* read base low */
+ pAddrWin->addrWin.baseLow =
+ MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_MV_BAR_BASE(barNum)));
+
+
+ if (PEX_INTER_REGS_BAR == barNum)
+ {
+ pAddrWin->addrWin.baseLow &= PXBIR_BASE_MASK;
+ }
+ else
+ {
+ pAddrWin->addrWin.baseLow &= PXBR_BASE_MASK;
+ }
+
+
+ /* read base high */
+ pAddrWin->addrWin.baseHigh =
+ MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_MV_BAR_BASE_HIGH(barNum)));
+
+
+ /* Read bar size */
+ if (PEX_INTER_REGS_BAR != barNum) /* internal registers have no size */
+ {
+ pAddrWin->addrWin.size = MV_REG_READ(PEX_BAR_CTRL_REG(pexIf,barNum));
+
+ /* check if enable or not */
+ if (pAddrWin->addrWin.size & PXBCR_BAR_EN)
+ {
+ pAddrWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrWin->enable = MV_FALSE;
+ }
+
+ /* now get the size */
+ pAddrWin->addrWin.size &= PXBCR_BAR_SIZE_MASK;
+ pAddrWin->addrWin.size >>= PXBCR_BAR_SIZE_OFFS;
+
+ pAddrWin->addrWin.size = ctrlRegToSize(pAddrWin->addrWin.size,
+ PXBCR_BAR_SIZE_ALIGNMENT);
+
+ }
+ else /* PEX_INTER_REGS_BAR */
+ {
+ pAddrWin->addrWin.size = INTER_REGS_SIZE;
+ pAddrWin->enable = MV_TRUE;
+ }
+
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvPexBarEnable -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+
+
+MV_STATUS mvPexBarEnable(MV_U32 pexIf, MV_U32 barNum, MV_BOOL enable)
+{
+
+ MV_PEX_BAR pexBar;
+
+ /* check parameters */
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexBarEnable: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+
+
+ if(barNum >= PEX_MAX_BARS)
+ {
+ mvOsPrintf("mvPexBarEnable: ERR. Invalid bar number %d\n", barNum);
+ return MV_BAD_PARAM;
+ }
+
+ if (PEX_INTER_REGS_BAR == barNum)
+ {
+ if (MV_TRUE == enable)
+ {
+ return MV_OK;
+ }
+ else
+ {
+ return MV_ERROR;
+ }
+ }
+
+
+ if (MV_FALSE == enable)
+ {
+ /* disable bar and quit */
+ MV_REG_BIT_RESET(PEX_BAR_CTRL_REG(pexIf,barNum),PXBCR_BAR_EN);
+ return MV_OK;
+ }
+
+ /* else */
+
+ if (mvPexBarGet(pexIf,barNum,&pexBar) != MV_OK)
+ {
+ mvOsPrintf("mvPexBarEnable: mvPexBarGet Failed\n");
+ return MV_ERROR;
+
+ }
+
+ if (MV_TRUE == pexBar.enable)
+ {
+ /* it is already enabled !!! */
+ return MV_OK;
+ }
+
+ /* else enable the bar*/
+
+ pexBar.enable = MV_TRUE;
+
+ if (mvPexBarSet(pexIf,barNum,&pexBar) != MV_OK)
+ {
+ mvOsPrintf("mvPexBarEnable: mvPexBarSet Failed\n");
+ return MV_ERROR;
+
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* pexWinOverlapDetect - Detect address windows overlapping
+*
+* DESCRIPTION:
+* This function detects address window overlapping of a given address
+* window in PEX BARs.
+*
+* INPUT:
+* pAddrWin - Address window to be checked.
+* bar - BAR to be accessed by slave.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL pexWinOverlapDetect(MV_U32 pexIf,
+ MV_U32 winNum,
+ MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 win;
+ MV_PEX_DEC_WIN addrDecWin;
+
+
+ for(win = 0; win < PEX_MAX_TARGET_WIN -2 ; win++)
+ {
+ /* don't check our target or illegal targets */
+ if (winNum == win)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvPexTargetWinGet(pexIf, win, &addrDecWin))
+ {
+ mvOsPrintf("pexWinOverlapDetect: ERR. TargetWinGet failed win=%x\n",
+ win);
+ return MV_ERROR;
+ }
+
+ /* Do not check disabled windows */
+ if (MV_FALSE == addrDecWin.enable)
+ {
+ continue;
+ }
+
+
+ if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &addrDecWin.addrWin))
+ {
+ mvOsPrintf("pexWinOverlapDetect: winNum %d overlap current %d\n",
+ winNum, win);
+ return MV_TRUE;
+ }
+ }
+
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* pexIsWinWithinBar - Detect if address is within PEX bar boundries
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL pexIsWinWithinBar(MV_U32 pexIf,
+ MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 bar;
+ MV_PEX_BAR addrDecWin;
+
+ for(bar = 0; bar < PEX_MAX_BARS; bar++)
+ {
+
+ /* Get window parameters */
+ if (MV_OK != mvPexBarGet(pexIf, bar, &addrDecWin))
+ {
+ mvOsPrintf("pexIsWinWithinBar: ERR. mvPexBarGet failed\n");
+ return MV_ERROR;
+ }
+
+ /* Do not check disabled bars */
+ if (MV_FALSE == addrDecWin.enable)
+ {
+ continue;
+ }
+
+
+ if(MV_TRUE == ctrlWinWithinWinTest(pAddrWin, &addrDecWin.addrWin))
+ {
+ return MV_TRUE;
+ }
+ }
+
+ return MV_FALSE;
+
+}
+
+/*******************************************************************************
+* pexBarOverlapDetect - Detect address windows overlapping
+*
+* DESCRIPTION:
+* This function detects address window overlapping of a given address
+* window in PEX BARs.
+*
+* INPUT:
+* pAddrWin - Address window to be checked.
+* bar - BAR to be accessed by slave.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL pexBarOverlapDetect(MV_U32 pexIf,
+ MV_U32 barNum,
+ MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 bar;
+ MV_PEX_BAR barDecWin;
+
+
+ for(bar = 0; bar < PEX_MAX_BARS; bar++)
+ {
+ /* don't check our target or illegal targets */
+ if (barNum == bar)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvPexBarGet(pexIf, bar, &barDecWin))
+ {
+ mvOsPrintf("pexBarOverlapDetect: ERR. TargetWinGet failed\n");
+ return MV_ERROR;
+ }
+
+ /* don'nt check disabled bars */
+ if (barDecWin.enable == MV_FALSE)
+ {
+ continue;
+ }
+
+
+ if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &barDecWin.addrWin))
+ {
+ mvOsPrintf("pexBarOverlapDetect: winNum %d overlap current %d\n",
+ barNum, bar);
+ return MV_TRUE;
+ }
+ }
+
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* pexBarIsValid - Check if the given address window is valid
+*
+* DESCRIPTION:
+* PEX spec restrict BAR base to be aligned to BAR size.
+* This function checks if the given address window is valid.
+*
+* INPUT:
+* baseLow - 32bit low base address.
+* size - Window size.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the address window is valid, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_STATUS pexBarIsValid(MV_U32 baseLow, MV_U32 size)
+{
+
+ /* PCI spec restrict BAR base to be aligned to BAR size */
+ if(MV_IS_NOT_ALIGN(baseLow, size))
+ {
+ return MV_ERROR;
+ }
+ else
+ {
+ return MV_TRUE;
+ }
+
+ return MV_TRUE;
+}
+
+/*******************************************************************************
+* pexBarRegInfoGet - Get BAR register information
+*
+* DESCRIPTION:
+* PEX BARs registers offsets are inconsecutive.
+* This function gets a PEX BAR register information like register offsets
+* and function location of the BAR.
+*
+* INPUT:
+* pexIf - PEX interface number.
+* bar - The PEX BAR in question.
+*
+* OUTPUT:
+* pBarRegInfo - BAR register info struct.
+*
+* RETURN:
+* MV_BAD_PARAM when bad parameters ,MV_ERROR on error ,othewise MV_OK
+*
+*******************************************************************************/
+static MV_STATUS pexWinRegInfoGet(MV_U32 pexIf,
+ MV_U32 winNum,
+ PEX_WIN_REG_INFO *pWinRegInfo)
+{
+
+ if ((winNum >= 0)&&(winNum <=3))
+ {
+ pWinRegInfo->baseLowRegOffs = PEX_WIN0_3_BASE_REG(pexIf,winNum);
+ pWinRegInfo->baseHighRegOffs = 0;
+ pWinRegInfo->sizeRegOffs = PEX_WIN0_3_CTRL_REG(pexIf,winNum);
+ pWinRegInfo->remapLowRegOffs = PEX_WIN0_3_REMAP_REG(pexIf,winNum);
+ pWinRegInfo->remapHighRegOffs = 0;
+ }
+ else if ((winNum >= 4)&&(winNum <=5))
+ {
+ pWinRegInfo->baseLowRegOffs = PEX_WIN4_5_BASE_REG(pexIf,winNum);
+ pWinRegInfo->baseHighRegOffs = 0;
+ pWinRegInfo->sizeRegOffs = PEX_WIN4_5_CTRL_REG(pexIf,winNum);
+ pWinRegInfo->remapLowRegOffs = PEX_WIN4_5_REMAP_REG(pexIf,winNum);
+ pWinRegInfo->remapHighRegOffs = PEX_WIN4_5_REMAP_HIGH_REG(pexIf,winNum);
+
+ }
+ else if (MV_PEX_WIN_DEFAULT == winNum)
+ {
+ pWinRegInfo->baseLowRegOffs = 0;
+ pWinRegInfo->baseHighRegOffs = 0;
+ pWinRegInfo->sizeRegOffs = PEX_WIN_DEFAULT_CTRL_REG(pexIf);
+ pWinRegInfo->remapLowRegOffs = 0;
+ pWinRegInfo->remapHighRegOffs = 0;
+ }
+ else if (MV_PEX_WIN_EXP_ROM == winNum)
+ {
+ pWinRegInfo->baseLowRegOffs = 0;
+ pWinRegInfo->baseHighRegOffs = 0;
+ pWinRegInfo->sizeRegOffs = PEX_WIN_EXP_ROM_CTRL_REG(pexIf);
+ pWinRegInfo->remapLowRegOffs = PEX_WIN_EXP_ROM_REMAP_REG(pexIf);
+ pWinRegInfo->remapHighRegOffs = 0;
+
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* pexBarNameGet - Get the string name of PEX BAR.
+*
+* DESCRIPTION:
+* This function get the string name of PEX BAR.
+*
+* INPUT:
+* bar - PEX bar number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* pointer to the string name of PEX BAR.
+*
+*******************************************************************************/
+const MV_8* pexBarNameGet( MV_U32 bar )
+{
+ switch( bar )
+ {
+ case PEX_INTER_REGS_BAR:
+ return "Internal Regs Bar0....";
+ case PEX_DRAM_BAR:
+ return "DRAM Bar1.............";
+ case PEX_DEVICE_BAR:
+ return "Devices Bar2..........";
+ default:
+ return "Bar unknown";
+ }
+}
+/*******************************************************************************
+* mvPexAddrDecShow - Print the PEX address decode map (BARs and windows).
+*
+* DESCRIPTION:
+* This function print the PEX address decode map (BARs and windows).
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvPexAddrDecShow(MV_VOID)
+{
+ MV_PEX_BAR pexBar;
+ MV_PEX_DEC_WIN win;
+ MV_U32 pexIf;
+ MV_U32 bar,winNum;
+
+ for( pexIf = 0; pexIf < mvCtrlPexMaxIfGet(); pexIf++ )
+ {
+ if (MV_FALSE == mvCtrlPwrClckGet(PEX_UNIT_ID, pexIf)) continue;
+ mvOsOutput( "\n" );
+ mvOsOutput( "PEX%d:\n", pexIf );
+ mvOsOutput( "-----\n" );
+
+ mvOsOutput( "\nPex Bars \n\n");
+
+ for( bar = 0; bar < PEX_MAX_BARS; bar++ )
+ {
+ memset( &pexBar, 0, sizeof(MV_PEX_BAR) );
+
+ mvOsOutput( "%s ", pexBarNameGet(bar) );
+
+ if( mvPexBarGet( pexIf, bar, &pexBar ) == MV_OK )
+ {
+ if( pexBar.enable )
+ {
+ mvOsOutput( "base %08x, ", pexBar.addrWin.baseLow );
+ mvSizePrint( pexBar.addrWin.size );
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+ mvOsOutput( "\nPex Decode Windows\n\n");
+
+ for( winNum = 0; winNum < PEX_MAX_TARGET_WIN - 2; winNum++)
+ {
+ memset( &win, 0,sizeof(MV_PEX_DEC_WIN) );
+
+ mvOsOutput( "win%d - ", winNum );
+
+ if ( mvPexTargetWinGet(pexIf,winNum,&win) == MV_OK)
+ {
+ if (win.enable)
+ {
+ mvOsOutput( "%s base %08x, ",
+ mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+ mvOsOutput( "...." );
+ mvSizePrint( win.addrWin.size );
+
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+
+
+ }
+ }
+
+ memset( &win, 0,sizeof(MV_PEX_DEC_WIN) );
+
+ mvOsOutput( "default win - " );
+
+ if ( mvPexTargetWinGet(pexIf, MV_PEX_WIN_DEFAULT, &win) == MV_OK)
+ {
+ mvOsOutput( "%s ",
+ mvCtrlTargetNameGet(win.target) );
+ mvOsOutput( "\n" );
+ }
+ memset( &win, 0,sizeof(MV_PEX_DEC_WIN) );
+
+ mvOsOutput( "Expansion ROM - " );
+
+ if ( mvPexTargetWinGet(pexIf, MV_PEX_WIN_EXP_ROM, &win) == MV_OK)
+ {
+ mvOsOutput( "%s ",
+ mvCtrlTargetNameGet(win.target) );
+ mvOsOutput( "\n" );
+ }
+
+ }
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.h
new file mode 100644
index 000000000..3505613b7
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.h
@@ -0,0 +1,348 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCSysPEXH
+#define __INCSysPEXH
+
+#include "mvCommon.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+/* 4KB granularity */
+#define MINIMUM_WINDOW_SIZE 0x1000
+#define MINIMUM_BAR_SIZE 0x1000
+#define MINIMUM_BAR_SIZE_MASK 0xFFFFF000
+#define BAR_SIZE_OFFS 12
+#define BAR_SIZE_MASK (0xFFFFF << BAR_SIZE_OFFS)
+
+
+
+#define MV_PEX_WIN_DEFAULT 6
+#define MV_PEX_WIN_EXP_ROM 7
+#define PEX_MAX_TARGET_WIN 8
+
+
+#define PEX_MAX_BARS 3
+#define PEX_INTER_REGS_BAR 0
+#define PEX_DRAM_BAR 1
+#define PEX_DEVICE_BAR 2
+
+/*************************************/
+/* PCI Express BAR Control Registers */
+/*************************************/
+#define PEX_BAR_CTRL_REG(pexIf,bar) (0x41804 + (bar-1)*4- (pexIf)*0x10000)
+#define PEX_EXP_ROM_BAR_CTRL_REG(pexIf) (0x4180C - (pexIf)*0x10000)
+
+
+/* PCI Express BAR Control Register */
+/* PEX_BAR_CTRL_REG (PXBCR) */
+
+#define PXBCR_BAR_EN BIT0
+#define PXBCR_BAR_SIZE_OFFS 16
+#define PXBCR_BAR_SIZE_MASK (0xffff << PXBCR_BAR_SIZE_OFFS)
+#define PXBCR_BAR_SIZE_ALIGNMENT 0x10000
+
+
+
+/* PCI Express Expansion ROM BAR Control Register */
+/* PEX_EXP_ROM_BAR_CTRL_REG (PXERBCR) */
+
+#define PXERBCR_EXPROM_EN BIT0
+#define PXERBCR_EXPROMSZ_OFFS 19
+#define PXERBCR_EXPROMSZ_MASK (0xf << PXERBCR_EXPROMSZ_OFFS)
+#define PXERBCR_EXPROMSZ_512KB (0x0 << PXERBCR_EXPROMSZ_OFFS)
+#define PXERBCR_EXPROMSZ_1024KB (0x1 << PXERBCR_EXPROMSZ_OFFS)
+#define PXERBCR_EXPROMSZ_2048KB (0x3 << PXERBCR_EXPROMSZ_OFFS)
+#define PXERBCR_EXPROMSZ_4096KB (0x7 << PXERBCR_EXPROMSZ_OFFS)
+
+/************************************************/
+/* PCI Express Address Window Control Registers */
+/************************************************/
+#define PEX_WIN0_3_CTRL_REG(pexIf,winNum) \
+ (0x41820 + (winNum) * 0x10 - (pexIf) * 0x10000)
+#define PEX_WIN0_3_BASE_REG(pexIf,winNum) \
+ (0x41824 + (winNum) * 0x10 - (pexIf) * 0x10000)
+#define PEX_WIN0_3_REMAP_REG(pexIf,winNum) \
+ (0x4182C + (winNum) * 0x10 - (pexIf) * 0x10000)
+#define PEX_WIN4_5_CTRL_REG(pexIf,winNum) \
+ (0x41860 + (winNum - 4) * 0x20 - (pexIf) * 0x10000)
+#define PEX_WIN4_5_BASE_REG(pexIf,winNum) \
+ (0x41864 + (winNum - 4) * 0x20 - (pexIf) * 0x10000)
+#define PEX_WIN4_5_REMAP_REG(pexIf,winNum) \
+ (0x4186C + (winNum - 4) * 0x20 - (pexIf) * 0x10000)
+#define PEX_WIN4_5_REMAP_HIGH_REG(pexIf,winNum) \
+ (0x41870 + (winNum - 4) * 0x20 - (pexIf) * 0x10000)
+
+#define PEX_WIN_DEFAULT_CTRL_REG(pexIf) (0x418B0 - (pexIf) * 0x10000)
+#define PEX_WIN_EXP_ROM_CTRL_REG(pexIf) (0x418C0 - (pexIf) * 0x10000)
+#define PEX_WIN_EXP_ROM_REMAP_REG(pexIf) (0x418C4 - (pexIf) * 0x10000)
+
+/* PCI Express Window Control Register */
+/* PEX_WIN_CTRL_REG (PXWCR) */
+
+#define PXWCR_WIN_EN BIT0 /* Window Enable.*/
+
+#define PXWCR_WIN_BAR_MAP_OFFS 1 /* Mapping to BAR.*/
+#define PXWCR_WIN_BAR_MAP_MASK BIT1
+#define PXWCR_WIN_BAR_MAP_BAR1 (0 << PXWCR_WIN_BAR_MAP_OFFS)
+#define PXWCR_WIN_BAR_MAP_BAR2 (1 << PXWCR_WIN_BAR_MAP_OFFS)
+
+#define PXWCR_TARGET_OFFS 4 /*Unit ID */
+#define PXWCR_TARGET_MASK (0xf << PXWCR_TARGET_OFFS)
+
+#define PXWCR_ATTRIB_OFFS 8 /* target attributes */
+#define PXWCR_ATTRIB_MASK (0xff << PXWCR_ATTRIB_OFFS)
+
+#define PXWCR_SIZE_OFFS 16 /* size */
+#define PXWCR_SIZE_MASK (0xffff << PXWCR_SIZE_OFFS)
+#define PXWCR_SIZE_ALIGNMENT 0x10000
+
+/* PCI Express Window Base Register */
+/* PEX_WIN_BASE_REG (PXWBR)*/
+
+#define PXWBR_BASE_OFFS 16 /* address[31:16] */
+#define PXWBR_BASE_MASK (0xffff << PXWBR_BASE_OFFS)
+#define PXWBR_BASE_ALIGNMENT 0x10000
+
+/* PCI Express Window Remap Register */
+/* PEX_WIN_REMAP_REG (PXWRR)*/
+
+#define PXWRR_REMAP_EN BIT0
+#define PXWRR_REMAP_OFFS 16
+#define PXWRR_REMAP_MASK (0xffff << PXWRR_REMAP_OFFS)
+#define PXWRR_REMAP_ALIGNMENT 0x10000
+
+/* PCI Express Window Remap (High) Register */
+/* PEX_WIN_REMAP_HIGH_REG (PXWRHR)*/
+
+#define PXWRHR_REMAP_HIGH_OFFS 0
+#define PXWRHR_REMAP_HIGH_MASK (0xffffffff << PXWRHR_REMAP_HIGH_OFFS)
+
+/* PCI Express Default Window Control Register */
+/* PEX_WIN_DEFAULT_CTRL_REG (PXWDCR) */
+
+#define PXWDCR_TARGET_OFFS 4 /*Unit ID */
+#define PXWDCR_TARGET_MASK (0xf << PXWDCR_TARGET_OFFS)
+#define PXWDCR_ATTRIB_OFFS 8 /* target attributes */
+#define PXWDCR_ATTRIB_MASK (0xff << PXWDCR_ATTRIB_OFFS)
+
+/* PCI Express Expansion ROM Window Control Register */
+/* PEX_WIN_EXP_ROM_CTRL_REG (PXWERCR)*/
+
+#define PXWERCR_TARGET_OFFS 4 /*Unit ID */
+#define PXWERCR_TARGET_MASK (0xf << PXWERCR_TARGET_OFFS)
+#define PXWERCR_ATTRIB_OFFS 8 /* target attributes */
+#define PXWERCR_ATTRIB_MASK (0xff << PXWERCR_ATTRIB_OFFS)
+
+/* PCI Express Expansion ROM Window Remap Register */
+/* PEX_WIN_EXP_ROM_REMAP_REG (PXWERRR)*/
+
+#define PXWERRR_REMAP_EN BIT0
+#define PXWERRR_REMAP_OFFS 16
+#define PXWERRR_REMAP_MASK (0xffff << PXWERRR_REMAP_OFFS)
+#define PXWERRR_REMAP_ALIGNMENT 0x10000
+
+
+
+/*PEX_MEMORY_BAR_BASE_ADDR(barNum) (PXMBBA)*/
+/* PCI Express BAR0 Internal Register*/
+/*PEX BAR0_INTER_REG (PXBIR)*/
+
+#define PXBIR_IOSPACE BIT0 /* Memory Space Indicator */
+
+#define PXBIR_TYPE_OFFS 1 /* BAR Type/Init Val. */
+#define PXBIR_TYPE_MASK (0x3 << PXBIR_TYPE_OFFS)
+#define PXBIR_TYPE_32BIT_ADDR (0x0 << PXBIR_TYPE_OFFS)
+#define PXBIR_TYPE_64BIT_ADDR (0x2 << PXBIR_TYPE_OFFS)
+
+#define PXBIR_PREFETCH_EN BIT3 /* Prefetch Enable */
+
+#define PXBIR_BASE_OFFS 20 /* Base address. Address bits [31:20] */
+#define PXBIR_BASE_MASK (0xfff << PXBIR_BASE_OFFS)
+#define PXBIR_BASE_ALIGNMET (1 << PXBIR_BASE_OFFS)
+
+
+/* PCI Express BAR0 Internal (High) Register*/
+/*PEX BAR0_INTER_REG_HIGH (PXBIRH)*/
+
+#define PXBIRH_BASE_OFFS 0 /* Base address. Bits [63:32] */
+#define PXBIRH_BASE_MASK (0xffffffff << PBBHR_BASE_OFFS)
+
+
+#define PEX_BAR_DEFAULT_ATTRIB 0xc /* Memory - Prefetch - 64 bit address */
+#define PEX_BAR0_DEFAULT_ATTRIB PEX_BAR_DEFAULT_ATTRIB
+#define PEX_BAR1_DEFAULT_ATTRIB PEX_BAR_DEFAULT_ATTRIB
+#define PEX_BAR2_DEFAULT_ATTRIB PEX_BAR_DEFAULT_ATTRIB
+
+
+/* PCI Express BAR1 Register */
+/* PCI Express BAR2 Register*/
+/*PEX BAR1_REG (PXBR)*/
+/*PEX BAR2_REG (PXBR)*/
+
+#define PXBR_IOSPACE BIT0 /* Memory Space Indicator */
+
+#define PXBR_TYPE_OFFS 1 /* BAR Type/Init Val. */
+#define PXBR_TYPE_MASK (0x3 << PXBR_TYPE_OFFS)
+#define PXBR_TYPE_32BIT_ADDR (0x0 << PXBR_TYPE_OFFS)
+#define PXBR_TYPE_64BIT_ADDR (0x2 << PXBR_TYPE_OFFS)
+
+#define PXBR_PREFETCH_EN BIT3 /* Prefetch Enable */
+
+#define PXBR_BASE_OFFS 16 /* Base address. Address bits [31:16] */
+#define PXBR_BASE_MASK (0xffff << PXBR_BASE_OFFS)
+#define PXBR_BASE_ALIGNMET (1 << PXBR_BASE_OFFS)
+
+
+/* PCI Express BAR1 (High) Register*/
+/* PCI Express BAR2 (High) Register*/
+/*PEX BAR1_REG_HIGH (PXBRH)*/
+/*PEX BAR2_REG_HIGH (PXBRH)*/
+
+#define PXBRH_BASE_OFFS 0 /* Base address. Address bits [63:32] */
+#define PXBRH_BASE_MASK (0xffffffff << PXBRH_BASE_OFFS)
+
+/* PCI Express Expansion ROM BAR Register*/
+/*PEX_EXPANSION_ROM_BASE_ADDR_REG (PXERBAR)*/
+
+#define PXERBAR_EXPROMEN BIT0 /* Expansion ROM Enable */
+
+#define PXERBAR_BASE_512K_OFFS 19 /* Expansion ROM Base Address */
+#define PXERBAR_BASE_512K_MASK (0x1fff << PXERBAR_BASE_512K_OFFS)
+
+#define PXERBAR_BASE_1MB_OFFS 20 /* Expansion ROM Base Address */
+#define PXERBAR_BASE_1MB_MASK (0xfff << PXERBAR_BASE_1MB_OFFS)
+
+#define PXERBAR_BASE_2MB_OFFS 21 /* Expansion ROM Base Address */
+#define PXERBAR_BASE_2MB_MASK (0x7ff << PXERBAR_BASE_2MB_OFFS)
+
+#define PXERBAR_BASE_4MB_OFFS 22 /* Expansion ROM Base Address */
+#define PXERBAR_BASE_4MB_MASK (0x3ff << PXERBAR_BASE_4MB_OFFS)
+
+/* PEX Bar attributes */
+typedef struct _mvPexBar
+{
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+}MV_PEX_BAR;
+
+/* PEX Remap Window attributes */
+typedef struct _mvPexRemapWin
+{
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+}MV_PEX_REMAP_WIN;
+
+/* PEX Remap Window attributes */
+typedef struct _mvPexDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_U32 targetBar;
+ MV_U8 attrib; /* chip select attributes */
+ MV_TARGET_ID targetId; /* Target Id of this MV_TARGET */
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+}MV_PEX_DEC_WIN;
+
+/* Global Functions prototypes */
+/* mvPexHalInit - Initialize PEX interfaces*/
+MV_STATUS mvPexInit(MV_U32 pexIf, MV_PEX_TYPE pexType);
+
+
+/* mvPexTargetWinSet - Set PEX to peripheral target address window BAR*/
+MV_STATUS mvPexTargetWinSet(MV_U32 pexIf, MV_U32 winNum,
+ MV_PEX_DEC_WIN *pAddrDecWin);
+
+/* mvPexTargetWinGet - Get PEX to peripheral target address window*/
+MV_STATUS mvPexTargetWinGet(MV_U32 pexIf, MV_U32 winNum,
+ MV_PEX_DEC_WIN *pAddrDecWin);
+
+/* mvPexTargetWinEnable - Enable/disable a PEX BAR window*/
+MV_STATUS mvPexTargetWinEnable(MV_U32 pexIf,MV_U32 winNum, MV_BOOL enable);
+
+/* mvPexTargetWinRemap - Set PEX to target address window remap.*/
+MV_STATUS mvPexTargetWinRemap(MV_U32 pexIf, MV_U32 winNum,
+ MV_PEX_REMAP_WIN *pAddrWin);
+
+/* mvPexTargetWinRemapEnable -enable\disable a PEX Window remap.*/
+MV_STATUS mvPexTargetWinRemapEnable(MV_U32 pexIf, MV_U32 winNum,
+ MV_BOOL enable);
+
+/* mvPexBarSet - Set PEX bar address and size */
+MV_STATUS mvPexBarSet(MV_U32 pexIf, MV_U32 barNum, MV_PEX_BAR *addrWin);
+
+/* mvPexBarGet - Get PEX bar address and size */
+MV_STATUS mvPexBarGet(MV_U32 pexIf, MV_U32 barNum, MV_PEX_BAR *addrWin);
+
+/* mvPexBarEnable - enable\disable a PEX bar*/
+MV_STATUS mvPexBarEnable(MV_U32 pexIf, MV_U32 barNum, MV_BOOL enable);
+
+/* mvPexAddrDecShow - Display address decode windows attributes */
+MV_VOID mvPexAddrDecShow(MV_VOID);
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.c
new file mode 100644
index 000000000..f100a12d1
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.c
@@ -0,0 +1,430 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "cpu/mvCpu.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "sata/CoreDriver/mvRegs.h"
+#include "ctrlEnv/sys/mvSysSata.h"
+
+MV_TARGET sataAddrDecPrioTab[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+ SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+ SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+ SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+ SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+ PEX0_MEM,
+#endif
+ TBL_TERM
+};
+
+
+/*******************************************************************************
+* sataWinOverlapDetect - Detect SATA address windows overlapping
+*
+* DESCRIPTION:
+* An unpredicted behaviur is expected in case SATA address decode
+* windows overlapps.
+* This function detects SATA address decode windows overlapping of a
+* specified window. The function does not check the window itself for
+* overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* winNum - address decode window number.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+* from registers.
+*
+*******************************************************************************/
+static MV_STATUS sataWinOverlapDetect(int dev, MV_U32 winNum,
+ MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 winNumIndex;
+ MV_SATA_DEC_WIN addrDecWin;
+
+ for(winNumIndex=0; winNumIndex<MV_SATA_MAX_ADDR_DECODE_WIN; winNumIndex++)
+ {
+ /* Do not check window itself */
+ if (winNumIndex == winNum)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvSataWinGet(dev, winNumIndex, &addrDecWin))
+ {
+ mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* Do not check disabled windows */
+ if(addrDecWin.enable == MV_FALSE)
+ {
+ continue;
+ }
+
+ if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+ {
+ return MV_TRUE;
+ }
+ }
+ return MV_FALSE;
+}
+
+
+/*******************************************************************************
+* mvSataWinSet - Set SATA target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window, also known as address decode window.
+* After setting this target window, the SATA will be able to access the
+* target within the address window.
+*
+* INPUT:
+* winNum - SATA target address decode window number.
+* pAddrDecWin - SATA target window data structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if address window overlapps with other address decode windows.
+* MV_BAD_PARAM if base address is invalid parameter or target is
+* unknown.
+*
+*******************************************************************************/
+MV_STATUS mvSataWinSet(int dev, MV_U32 winNum, MV_SATA_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttribs;
+ MV_DEC_REGS decRegs;
+
+ /* Parameter checking */
+ if (winNum >= MV_SATA_MAX_ADDR_DECODE_WIN)
+ {
+ mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlapps with current windows */
+ if (MV_TRUE == sataWinOverlapDetect(dev, winNum, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("%s: ERR. Window %d overlap\n", __FUNCTION__, winNum);
+ return MV_ERROR;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvSataWinSet:Error setting SATA window %d to "\
+ "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum,
+ mvCtrlTargetNameGet(pAddrDecWin->target),
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ decRegs.baseReg = 0;
+ decRegs.sizeReg = 0;
+
+ if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+ {
+ mvOsPrintf("%s: mvCtrlAddrDecToReg Failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ mvCtrlAttribGet(pAddrDecWin->target, &targetAttribs);
+
+ /* set attributes */
+ decRegs.sizeReg &= ~MV_SATA_WIN_ATTR_MASK;
+ decRegs.sizeReg |= (targetAttribs.attrib << MV_SATA_WIN_ATTR_OFFSET);
+
+ /* set target ID */
+ decRegs.sizeReg &= ~MV_SATA_WIN_TARGET_MASK;
+ decRegs.sizeReg |= (targetAttribs.targetId << MV_SATA_WIN_TARGET_OFFSET);
+
+ if (pAddrDecWin->enable == MV_TRUE)
+ {
+ decRegs.sizeReg |= MV_SATA_WIN_ENABLE_MASK;
+ }
+ else
+ {
+ decRegs.sizeReg &= ~MV_SATA_WIN_ENABLE_MASK;
+ }
+
+ MV_REG_WRITE( MV_SATA_WIN_CTRL_REG(dev, winNum), decRegs.sizeReg);
+ MV_REG_WRITE( MV_SATA_WIN_BASE_REG(dev, winNum), decRegs.baseReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSataWinGet - Get SATA peripheral target address window.
+*
+* DESCRIPTION:
+* Get SATA peripheral target address window.
+*
+* INPUT:
+* winNum - SATA target address decode window number.
+*
+* OUTPUT:
+* pAddrDecWin - SATA target window data structure.
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvSataWinGet(int dev, MV_U32 winNum, MV_SATA_DEC_WIN *pAddrDecWin)
+{
+ MV_DEC_REGS decRegs;
+ MV_TARGET_ATTRIB targetAttrib;
+
+ /* Parameter checking */
+ if (winNum >= MV_SATA_MAX_ADDR_DECODE_WIN)
+ {
+ mvOsPrintf("%s (dev=%d): ERR. Invalid winNum %d\n",
+ __FUNCTION__, dev, winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ decRegs.baseReg = MV_REG_READ( MV_SATA_WIN_BASE_REG(dev, winNum) );
+ decRegs.sizeReg = MV_REG_READ( MV_SATA_WIN_CTRL_REG(dev, winNum) );
+
+ if (MV_OK != mvCtrlRegToAddrDec(&decRegs, &pAddrDecWin->addrWin) )
+ {
+ mvOsPrintf("%s: mvCtrlRegToAddrDec Failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* attrib and targetId */
+ targetAttrib.attrib = (decRegs.sizeReg & MV_SATA_WIN_ATTR_MASK) >>
+ MV_SATA_WIN_ATTR_OFFSET;
+ targetAttrib.targetId = (decRegs.sizeReg & MV_SATA_WIN_TARGET_MASK) >>
+ MV_SATA_WIN_TARGET_OFFSET;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ /* Check if window is enabled */
+ if(decRegs.sizeReg & MV_SATA_WIN_ENABLE_MASK)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+ }
+ return MV_OK;
+}
+/*******************************************************************************
+* mvSataAddrDecShow - Print the SATA address decode map.
+*
+* DESCRIPTION:
+* This function print the SATA address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvSataAddrDecShow(MV_VOID)
+{
+
+ MV_SATA_DEC_WIN win;
+ int i,j;
+
+
+
+ for( j = 0; j < MV_SATA_MAX_CHAN; j++ )
+ {
+ if (MV_FALSE == mvCtrlPwrClckGet(SATA_UNIT_ID, j))
+ return;
+
+ mvOsOutput( "\n" );
+ mvOsOutput( "SATA %d:\n", j );
+ mvOsOutput( "----\n" );
+
+ for( i = 0; i < MV_SATA_MAX_ADDR_DECODE_WIN; i++ )
+ {
+ memset( &win, 0, sizeof(MV_SATA_DEC_WIN) );
+
+ mvOsOutput( "win%d - ", i );
+
+ if( mvSataWinGet(j, i, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ mvOsOutput( "%s base %08x, ",
+ mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+ mvOsOutput( "...." );
+
+ mvSizePrint( win.addrWin.size );
+
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+ }
+}
+
+
+/*******************************************************************************
+* mvSataWinInit - Initialize the integrated SATA target address window.
+*
+* DESCRIPTION:
+* Initialize the SATA peripheral target address window.
+*
+* INPUT:
+*
+*
+* OUTPUT:
+*
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvSataWinInit(MV_VOID)
+{
+ int winNum;
+ MV_SATA_DEC_WIN sataWin;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+ MV_U32 status, winPrioIndex = 0;
+
+ /* Initiate Sata address decode */
+
+ /* First disable all address decode windows */
+ for(winNum = 0; winNum < MV_SATA_MAX_ADDR_DECODE_WIN; winNum++)
+ {
+ MV_U32 regVal = MV_REG_READ(MV_SATA_WIN_CTRL_REG(0, winNum));
+ regVal &= ~MV_SATA_WIN_ENABLE_MASK;
+ MV_REG_WRITE(MV_SATA_WIN_CTRL_REG(0, winNum), regVal);
+ }
+
+ winNum = 0;
+ while( (sataAddrDecPrioTab[winPrioIndex] != TBL_TERM) &&
+ (winNum < MV_SATA_MAX_ADDR_DECODE_WIN) )
+ {
+ /* first get attributes from CPU If */
+ status = mvCpuIfTargetWinGet(sataAddrDecPrioTab[winPrioIndex],
+ &cpuAddrDecWin);
+
+ if(MV_NO_SUCH == status)
+ {
+ winPrioIndex++;
+ continue;
+ }
+ if (MV_OK != status)
+ {
+ mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+ sataWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ sataWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ sataWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ sataWin.enable = MV_TRUE;
+ sataWin.target = sataAddrDecPrioTab[winPrioIndex];
+
+ if(MV_OK != mvSataWinSet(0/*dev*/, winNum, &sataWin))
+ {
+ return MV_ERROR;
+ }
+ winNum++;
+ }
+ winPrioIndex++;
+ }
+ return MV_OK;
+}
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.h
new file mode 100644
index 000000000..325fb8d06
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.h
@@ -0,0 +1,128 @@
+
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCMVSysSataAddrDech
+#define __INCMVSysSataAddrDech
+
+#include "mvCommon.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _mvSataDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+} MV_SATA_DEC_WIN;
+
+
+#define MV_SATA_MAX_ADDR_DECODE_WIN 4
+
+#define MV_SATA_WIN_CTRL_REG(dev, win) (SATA_REG_BASE + 0x30 + ((win)<<4))
+#define MV_SATA_WIN_BASE_REG(dev, win) (SATA_REG_BASE + 0x34 + ((win)<<4))
+
+/* BITs in Bridge Interrupt Cause and Mask registers */
+#define MV_SATA_ADDR_DECODE_ERROR_BIT 0
+#define MV_SATA_ADDR_DECODE_ERROR_MASK (1<<MV_SATA_ADDR_DECODE_ERROR_BIT)
+
+/* BITs in Windows 0-3 Control and Base Registers */
+#define MV_SATA_WIN_ENABLE_BIT 0
+#define MV_SATA_WIN_ENABLE_MASK (1<<MV_SATA_WIN_ENABLE_BIT)
+
+#define MV_SATA_WIN_TARGET_OFFSET 4
+#define MV_SATA_WIN_TARGET_MASK (0xF<<MV_SATA_WIN_TARGET_OFFSET)
+
+#define MV_SATA_WIN_ATTR_OFFSET 8
+#define MV_SATA_WIN_ATTR_MASK (0xFF<<MV_SATA_WIN_ATTR_OFFSET)
+
+#define MV_SATA_WIN_SIZE_OFFSET 16
+#define MV_SATA_WIN_SIZE_MASK (0xFFFF<<MV_SATA_WIN_SIZE_OFFSET)
+
+#define MV_SATA_WIN_BASE_OFFSET 16
+#define MV_SATA_WIN_BASE_MASK (0xFFFF<<MV_SATA_WIN_BASE_OFFSET)
+
+MV_STATUS mvSataWinGet(int dev, MV_U32 winNum, MV_SATA_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSataWinSet(int dev, MV_U32 winNum, MV_SATA_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSataWinByTargetGet(MV_TARGET target, MV_SATA_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSataWinInit(MV_VOID);
+MV_VOID mvSataAddrDecShow(MV_VOID);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
+
+
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c
new file mode 100644
index 000000000..6d2a91924
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c
@@ -0,0 +1,427 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "cpu/mvCpu.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "mvRegs.h"
+#include "ctrlEnv/sys/mvSysSdmmc.h"
+
+MV_TARGET sdmmcAddrDecPrioTab[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+ SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+ SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+ SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+ SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+ PEX0_MEM,
+#endif
+ TBL_TERM
+};
+
+
+/*******************************************************************************
+* sdmmcWinOverlapDetect - Detect SDMMC address windows overlapping
+*
+* DESCRIPTION:
+* An unpredicted behaviur is expected in case SDMMC address decode
+* windows overlapps.
+* This function detects SDMMC address decode windows overlapping of a
+* specified window. The function does not check the window itself for
+* overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* winNum - address decode window number.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+* from registers.
+*
+*******************************************************************************/
+static MV_STATUS sdmmcWinOverlapDetect(int dev, MV_U32 winNum,
+ MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 winNumIndex;
+ MV_SDMMC_DEC_WIN addrDecWin;
+
+ for(winNumIndex=0; winNumIndex<MV_SDMMC_MAX_ADDR_DECODE_WIN; winNumIndex++)
+ {
+ /* Do not check window itself */
+ if (winNumIndex == winNum)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvSdmmcWinGet(dev, winNumIndex, &addrDecWin))
+ {
+ mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* Do not check disabled windows */
+ if(addrDecWin.enable == MV_FALSE)
+ {
+ continue;
+ }
+
+ if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+ {
+ return MV_TRUE;
+ }
+ }
+ return MV_FALSE;
+}
+
+
+/*******************************************************************************
+* mvSdmmcWinSet - Set SDMMC target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window, also known as address decode window.
+* After setting this target window, the SDMMC will be able to access the
+* target within the address window.
+*
+* INPUT:
+* winNum - SDMMC target address decode window number.
+* pAddrDecWin - SDMMC target window data structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if address window overlapps with other address decode windows.
+* MV_BAD_PARAM if base address is invalid parameter or target is
+* unknown.
+*
+*******************************************************************************/
+MV_STATUS mvSdmmcWinSet(int dev, MV_U32 winNum, MV_SDMMC_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttribs;
+ MV_DEC_REGS decRegs;
+
+ /* Parameter checking */
+ if (winNum >= MV_SDMMC_MAX_ADDR_DECODE_WIN)
+ {
+ mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlapps with current windows */
+ if (MV_TRUE == sdmmcWinOverlapDetect(dev, winNum, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("%s: ERR. Window %d overlap\n", __FUNCTION__, winNum);
+ return MV_ERROR;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvSdmmcWinSet:Error setting SDMMC window %d to "\
+ "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum,
+ mvCtrlTargetNameGet(pAddrDecWin->target),
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ decRegs.baseReg = 0;
+ decRegs.sizeReg = 0;
+
+ if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+ {
+ mvOsPrintf("%s: mvCtrlAddrDecToReg Failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ mvCtrlAttribGet(pAddrDecWin->target, &targetAttribs);
+
+ /* set attributes */
+ decRegs.sizeReg &= ~MV_SDMMC_WIN_ATTR_MASK;
+ decRegs.sizeReg |= (targetAttribs.attrib << MV_SDMMC_WIN_ATTR_OFFSET);
+
+ /* set target ID */
+ decRegs.sizeReg &= ~MV_SDMMC_WIN_TARGET_MASK;
+ decRegs.sizeReg |= (targetAttribs.targetId << MV_SDMMC_WIN_TARGET_OFFSET);
+
+ if (pAddrDecWin->enable == MV_TRUE)
+ {
+ decRegs.sizeReg |= MV_SDMMC_WIN_ENABLE_MASK;
+ }
+ else
+ {
+ decRegs.sizeReg &= ~MV_SDMMC_WIN_ENABLE_MASK;
+ }
+
+ MV_REG_WRITE( MV_SDMMC_WIN_CTRL_REG(dev, winNum), decRegs.sizeReg);
+ MV_REG_WRITE( MV_SDMMC_WIN_BASE_REG(dev, winNum), decRegs.baseReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSdmmcWinGet - Get SDMMC peripheral target address window.
+*
+* DESCRIPTION:
+* Get SDMMC peripheral target address window.
+*
+* INPUT:
+* winNum - SDMMC target address decode window number.
+*d
+* OUTPUT:
+* pAddrDecWin - SDMMC target window data structure.
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvSdmmcWinGet(int dev, MV_U32 winNum, MV_SDMMC_DEC_WIN *pAddrDecWin)
+{
+ MV_DEC_REGS decRegs;
+ MV_TARGET_ATTRIB targetAttrib;
+
+ /* Parameter checking */
+ if (winNum >= MV_SDMMC_MAX_ADDR_DECODE_WIN)
+ {
+ mvOsPrintf("%s (dev=%d): ERR. Invalid winNum %d\n",
+ __FUNCTION__, dev, winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ decRegs.baseReg = MV_REG_READ( MV_SDMMC_WIN_BASE_REG(dev, winNum) );
+ decRegs.sizeReg = MV_REG_READ( MV_SDMMC_WIN_CTRL_REG(dev, winNum) );
+
+ if (MV_OK != mvCtrlRegToAddrDec(&decRegs, &pAddrDecWin->addrWin) )
+ {
+ mvOsPrintf("%s: mvCtrlRegToAddrDec Failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* attrib and targetId */
+ targetAttrib.attrib = (decRegs.sizeReg & MV_SDMMC_WIN_ATTR_MASK) >>
+ MV_SDMMC_WIN_ATTR_OFFSET;
+ targetAttrib.targetId = (decRegs.sizeReg & MV_SDMMC_WIN_TARGET_MASK) >>
+ MV_SDMMC_WIN_TARGET_OFFSET;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ /* Check if window is enabled */
+ if(decRegs.sizeReg & MV_SDMMC_WIN_ENABLE_MASK)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+ }
+ return MV_OK;
+}
+/*******************************************************************************
+* mvSdmmcAddrDecShow - Print the SDMMC address decode map.
+*
+* DESCRIPTION:
+* This function print the SDMMC address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvSdmmcAddrDecShow(MV_VOID)
+{
+
+ MV_SDMMC_DEC_WIN win;
+ int i,j=0;
+
+
+
+ if (MV_FALSE == mvCtrlPwrClckGet(SDIO_UNIT_ID, 0))
+ return;
+
+ mvOsOutput( "\n" );
+ mvOsOutput( "SDMMC %d:\n", j );
+ mvOsOutput( "----\n" );
+
+ for( i = 0; i < MV_SDMMC_MAX_ADDR_DECODE_WIN; i++ )
+ {
+ memset( &win, 0, sizeof(MV_SDMMC_DEC_WIN) );
+
+ mvOsOutput( "win%d - ", i );
+
+ if( mvSdmmcWinGet(j, i, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ mvOsOutput( "%s base %08x, ",
+ mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+ mvOsOutput( "...." );
+
+ mvSizePrint( win.addrWin.size );
+
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+}
+
+
+/*******************************************************************************
+* mvSdmmcWinInit - Initialize the integrated SDMMC target address window.
+*
+* DESCRIPTION:
+* Initialize the SDMMC peripheral target address window.
+*
+* INPUT:
+*
+*
+* OUTPUT:
+*
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvSdmmcWinInit(MV_VOID)
+{
+ int winNum;
+ MV_SDMMC_DEC_WIN sdmmcWin;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+ MV_U32 status, winPrioIndex = 0;
+
+ /* Initiate Sdmmc address decode */
+
+ /* First disable all address decode windows */
+ for(winNum = 0; winNum < MV_SDMMC_MAX_ADDR_DECODE_WIN; winNum++)
+ {
+ MV_U32 regVal = MV_REG_READ(MV_SDMMC_WIN_CTRL_REG(0, winNum));
+ regVal &= ~MV_SDMMC_WIN_ENABLE_MASK;
+ MV_REG_WRITE(MV_SDMMC_WIN_CTRL_REG(0, winNum), regVal);
+ }
+
+ winNum = 0;
+ while( (sdmmcAddrDecPrioTab[winPrioIndex] != TBL_TERM) &&
+ (winNum < MV_SDMMC_MAX_ADDR_DECODE_WIN) )
+ {
+ /* first get attributes from CPU If */
+ status = mvCpuIfTargetWinGet(sdmmcAddrDecPrioTab[winPrioIndex],
+ &cpuAddrDecWin);
+
+ if(MV_NO_SUCH == status)
+ {
+ winPrioIndex++;
+ continue;
+ }
+ if (MV_OK != status)
+ {
+ mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+ sdmmcWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ sdmmcWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ sdmmcWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ sdmmcWin.enable = MV_TRUE;
+ sdmmcWin.target = sdmmcAddrDecPrioTab[winPrioIndex];
+
+ if(MV_OK != mvSdmmcWinSet(0/*dev*/, winNum, &sdmmcWin))
+ {
+ return MV_ERROR;
+ }
+ winNum++;
+ }
+ winPrioIndex++;
+ }
+ return MV_OK;
+}
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h
new file mode 100644
index 000000000..4c50a2b69
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h
@@ -0,0 +1,125 @@
+
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCMVSysSdmmcAddrDech
+#define __INCMVSysSdmmcAddrDech
+
+#include "mvCommon.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _mvSdmmcDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+} MV_SDMMC_DEC_WIN;
+
+
+#define MV_SDMMC_MAX_ADDR_DECODE_WIN 4
+
+#define MV_SDMMC_WIN_CTRL_REG(dev, win) (MV_SDIO_REG_BASE + 0x108 + ((win)<<3))
+#define MV_SDMMC_WIN_BASE_REG(dev, win) (MV_SDIO_REG_BASE + 0x10c + ((win)<<3))
+
+
+/* BITs in Windows 0-3 Control and Base Registers */
+#define MV_SDMMC_WIN_ENABLE_BIT 0
+#define MV_SDMMC_WIN_ENABLE_MASK (1<<MV_SDMMC_WIN_ENABLE_BIT)
+
+#define MV_SDMMC_WIN_TARGET_OFFSET 4
+#define MV_SDMMC_WIN_TARGET_MASK (0xF<<MV_SDMMC_WIN_TARGET_OFFSET)
+
+#define MV_SDMMC_WIN_ATTR_OFFSET 8
+#define MV_SDMMC_WIN_ATTR_MASK (0xFF<<MV_SDMMC_WIN_ATTR_OFFSET)
+
+#define MV_SDMMC_WIN_SIZE_OFFSET 16
+#define MV_SDMMC_WIN_SIZE_MASK (0xFFFF<<MV_SDMMC_WIN_SIZE_OFFSET)
+
+#define MV_SDMMC_WIN_BASE_OFFSET 16
+#define MV_SDMMC_WIN_BASE_MASK (0xFFFF<<MV_SDMMC_WIN_BASE_OFFSET)
+
+MV_STATUS mvSdmmcWinGet(int dev, MV_U32 winNum, MV_SDMMC_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSdmmcWinSet(int dev, MV_U32 winNum, MV_SDMMC_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSdmmcWinByTargetGet(MV_TARGET target, MV_SDMMC_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSdmmcWinInit(MV_VOID);
+MV_VOID mvSdmmcAddrDecShow(MV_VOID);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
+
+
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c
new file mode 100644
index 000000000..ecf6944b8
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c
@@ -0,0 +1,462 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvSysTdm.h"
+
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+static MV_TARGET tdmAddrDecPrioTap[] =
+{
+ PEX0_MEM,
+ SDRAM_CS0,
+ SDRAM_CS1,
+ SDRAM_CS2,
+ SDRAM_CS3,
+ DEVICE_CS0,
+ DEVICE_CS1,
+ DEVICE_CS2,
+ DEV_BOOCS,
+ PEX0_IO,
+ TBL_TERM
+};
+
+static MV_STATUS tdmWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+
+/*******************************************************************************
+* mvTdmWinInit - Initialize TDM address decode windows
+*
+* DESCRIPTION:
+* This function initialize TDM window decode unit. It set the
+* default address decode
+* windows of the unit.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if setting fail.
+*******************************************************************************/
+
+MV_STATUS mvTdmWinInit(void)
+{
+ MV_U32 winNum;
+ MV_U32 winPrioIndex = 0;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+ MV_TDM_DEC_WIN tdmWin;
+ MV_STATUS status;
+
+ /*Disable all windows*/
+ for (winNum = 0; winNum < TDM_MBUS_MAX_WIN; winNum++)
+ {
+ mvTdmWinEnable(winNum, MV_FALSE);
+ }
+
+ for (winNum = 0; ((tdmAddrDecPrioTap[winPrioIndex] != TBL_TERM) &&
+ (winNum < TDM_MBUS_MAX_WIN)); )
+ {
+ status = mvCpuIfTargetWinGet(tdmAddrDecPrioTap[winPrioIndex],
+ &cpuAddrDecWin);
+ if (MV_NO_SUCH == status)
+ {
+ winPrioIndex++;
+ continue;
+ }
+ if (MV_OK != status)
+ {
+ mvOsPrintf("mvTdmInit: ERR. mvCpuIfTargetWinGet failed\n");
+ return MV_ERROR;
+ }
+
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+ tdmWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ tdmWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ tdmWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ tdmWin.enable = MV_TRUE;
+ tdmWin.target = tdmAddrDecPrioTap[winPrioIndex];
+ if (MV_OK != mvTdmWinSet(winNum, &tdmWin))
+ {
+ return MV_ERROR;
+ }
+ winNum++;
+ }
+ winPrioIndex++;
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvTdmWinSet - Set TDM target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window, also known as address decode window.
+* After setting this target window, the TDM will be able to access the
+* target within the address window.
+*
+* INPUT:
+* winNum - TDM to target address decode window number.
+* pAddrDecWin - TDM target window data structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if address window overlapps with other address decode windows.
+* MV_BAD_PARAM if base address is invalid parameter or target is
+* unknown.
+*
+*******************************************************************************/
+
+MV_STATUS mvTdmWinSet(MV_U32 winNum, MV_TDM_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttribs;
+ MV_DEC_REGS decRegs;
+ MV_U32 ctrlReg = 0;
+
+ /* Parameter checking */
+ if (winNum >= TDM_MBUS_MAX_WIN)
+ {
+ mvOsPrintf("mvTdmWinSet: ERR. Invalid win num %d\n",winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlapps with current windows */
+ if (MV_TRUE == tdmWinOverlapDetect(winNum, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("mvTdmWinSet: ERR. Window %d overlap\n", winNum);
+ return MV_ERROR;
+ }
+
+ /* check if address is aligned to the size */
+ if (MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvTdmWinSet: Error setting TDM window %d to "\
+ "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum,
+ mvCtrlTargetNameGet(pAddrDecWin->target),
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ decRegs.baseReg = MV_REG_READ(TDM_WIN_BASE_REG(winNum));
+ decRegs.sizeReg = (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_SIZE_MASK) >> TDM_WIN_SIZE_OFFS;
+
+ if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+ {
+ mvOsPrintf("mvTdmWinSet: mvCtrlAddrDecToReg Failed\n");
+ return MV_ERROR;
+ }
+
+ mvCtrlAttribGet(pAddrDecWin->target, &targetAttribs);
+
+ /* for the safe side we disable the window before writing the new
+ values */
+ mvTdmWinEnable(winNum, MV_FALSE);
+
+ ctrlReg |= (targetAttribs.attrib << TDM_WIN_ATTRIB_OFFS);
+ ctrlReg |= (targetAttribs.targetId << TDM_WIN_TARGET_OFFS);
+ ctrlReg |= (decRegs.sizeReg & TDM_WIN_SIZE_MASK);
+
+ /* Write to address base and control registers */
+ MV_REG_WRITE(TDM_WIN_BASE_REG(winNum), decRegs.baseReg);
+ MV_REG_WRITE(TDM_WIN_CTRL_REG(winNum), ctrlReg);
+ /* Enable address decode target window */
+ if (pAddrDecWin->enable == MV_TRUE)
+ {
+ mvTdmWinEnable(winNum, MV_TRUE);
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvTdmWinGet - Get peripheral target address window.
+*
+* DESCRIPTION:
+* Get TDM peripheral target address window.
+*
+* INPUT:
+* winNum - TDM to target address decode window number.
+*
+* OUTPUT:
+* pAddrDecWin - TDM target window data structure.
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+
+MV_STATUS mvTdmWinGet(MV_U32 winNum, MV_TDM_DEC_WIN *pAddrDecWin)
+{
+
+ MV_DEC_REGS decRegs;
+ MV_TARGET_ATTRIB targetAttrib;
+
+ /* Parameter checking */
+ if (winNum >= TDM_MBUS_MAX_WIN)
+ {
+ mvOsPrintf("mvTdmWinGet: ERR. Invalid winNum %d\n", winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ decRegs.baseReg = MV_REG_READ(TDM_WIN_BASE_REG(winNum));
+ decRegs.sizeReg = (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_SIZE_MASK) >> TDM_WIN_SIZE_OFFS;
+
+ if (MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+ {
+ mvOsPrintf("mvTdmWinGet: mvCtrlRegToAddrDec Failed \n");
+ return MV_ERROR;
+ }
+
+ /* attrib and targetId */
+ targetAttrib.attrib =
+ (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_ATTRIB_MASK) >> TDM_WIN_ATTRIB_OFFS;
+ targetAttrib.targetId =
+ (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_TARGET_MASK) >> TDM_WIN_TARGET_OFFS;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ /* Check if window is enabled */
+ if (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_ENABLE_MASK)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvTdmWinEnable - Enable/disable a TDM to target address window
+*
+* DESCRIPTION:
+* This function enable/disable a TDM to target address window.
+* According to parameter 'enable' the routine will enable the
+* window, thus enabling TDM accesses (before enabling the window it is
+* tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+* winNum - TDM to target address decode window number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_ERROR if decode window number was wrong or enabled window overlapps.
+*
+*******************************************************************************/
+MV_STATUS mvTdmWinEnable(int winNum, MV_BOOL enable)
+{
+ MV_TDM_DEC_WIN addrDecWin;
+
+ if (MV_TRUE == enable)
+ {
+ if (winNum >= TDM_MBUS_MAX_WIN)
+ {
+ mvOsPrintf("mvTdmWinEnable:ERR. Invalid winNum%d\n",winNum);
+ return MV_ERROR;
+ }
+
+ /* First check for overlap with other enabled windows */
+ /* Get current window */
+ if (MV_OK != mvTdmWinGet(winNum, &addrDecWin))
+ {
+ mvOsPrintf("mvTdmWinEnable:ERR. targetWinGet fail\n");
+ return MV_ERROR;
+ }
+ /* Check for overlapping */
+ if (MV_FALSE == tdmWinOverlapDetect(winNum, &(addrDecWin.addrWin)))
+ {
+ /* No Overlap. Enable address decode target window */
+ MV_REG_BIT_SET(TDM_WIN_CTRL_REG(winNum), TDM_WIN_ENABLE_MASK);
+ }
+ else
+ { /* Overlap detected */
+ mvOsPrintf("mvTdmWinEnable:ERR. Overlap detected\n");
+ return MV_ERROR;
+ }
+ }
+ else
+ {
+ MV_REG_BIT_RESET(TDM_WIN_CTRL_REG(winNum), TDM_WIN_ENABLE_MASK);
+ }
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* tdmWinOverlapDetect - Detect TDM address windows overlapping
+*
+* DESCRIPTION:
+* An unpredicted behaviour is expected in case TDM address decode
+* windows overlapps.
+* This function detects TDM address decode windows overlapping of a
+* specified window. The function does not check the window itself for
+* overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* winNum - address decode window number.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+* from registers.
+*
+*******************************************************************************/
+static MV_STATUS tdmWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 winNumIndex;
+ MV_TDM_DEC_WIN addrDecWin;
+
+ for (winNumIndex = 0; winNumIndex < TDM_MBUS_MAX_WIN; winNumIndex++)
+ {
+ /* Do not check window itself */
+ if (winNumIndex == winNum)
+ {
+ continue;
+ }
+ /* Do not check disabled windows */
+ if (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_ENABLE_MASK)
+ {
+ /* Get window parameters */
+ if (MV_OK != mvTdmWinGet(winNumIndex, &addrDecWin))
+ {
+ DB(mvOsPrintf("dmaWinOverlapDetect: ERR. TargetWinGet failed\n"));
+ return MV_ERROR;
+ }
+
+ if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+ {
+ return MV_TRUE;
+ }
+ }
+ }
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvTdmAddrDecShow - Print the TDM address decode map.
+*
+* DESCRIPTION:
+* This function print the TDM address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvTdmAddrDecShow(MV_VOID)
+{
+ MV_TDM_DEC_WIN win;
+ int i;
+
+ mvOsOutput( "\n" );
+ mvOsOutput( "TDM:\n" );
+ mvOsOutput( "----\n" );
+
+ for( i = 0; i < TDM_MBUS_MAX_WIN; i++ )
+ {
+ memset( &win, 0, sizeof(MV_TDM_DEC_WIN) );
+
+ mvOsOutput( "win%d - ", i );
+
+ if (mvTdmWinGet(i, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ mvOsOutput( "%s base %08x, ",
+ mvCtrlTargetNameGet(win.target), win.addrWin.baseLow);
+ mvOsOutput( "...." );
+ mvSizePrint( win.addrWin.size );
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h
new file mode 100644
index 000000000..0d3140f5e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSysTdmh
+#define __INCmvSysTdmh
+
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+typedef struct _mvTdmDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+} MV_TDM_DEC_WIN;
+
+MV_STATUS mvTdmWinInit(MV_VOID);
+MV_STATUS mvTdmWinSet(MV_U32 winNum, MV_TDM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvTdmWinGet(MV_U32 winNum, MV_TDM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvTdmWinEnable(int winNum, MV_BOOL enable);
+MV_VOID mvTdmAddrDecShow(MV_VOID);
+
+
+#define TDM_MBUS_MAX_WIN 4
+#define TDM_WIN_CTRL_REG(win) ((TDM_REG_BASE + 0x4030) + (win<<4))
+#define TDM_WIN_BASE_REG(win) ((TDM_REG_BASE +0x4034) + (win<<4))
+
+/* TDM_WIN_CTRL_REG bits */
+#define TDM_WIN_ENABLE_OFFS 0
+#define TDM_WIN_ENABLE_MASK (1<<TDM_WIN_ENABLE_OFFS)
+#define TDM_WIN_ENABLE 1
+#define TDM_WIN_TARGET_OFFS 4
+#define TDM_WIN_TARGET_MASK (0xf<<TDM_WIN_TARGET_OFFS)
+#define TDM_WIN_ATTRIB_OFFS 8
+#define TDM_WIN_ATTRIB_MASK (0xff<<TDM_WIN_ATTRIB_OFFS)
+#define TDM_WIN_SIZE_OFFS 16
+#define TDM_WIN_SIZE_MASK (0xffff<<TDM_WIN_SIZE_OFFS)
+
+/* TDM_WIN_BASE_REG bits */
+#define TDM_BASE_OFFS 16
+#define TDM_BASE_MASK (0xffff<<TDM_BASE_OFFS)
+
+#endif /*__INCmvSysTdmh*/
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.c
new file mode 100644
index 000000000..4415c7c26
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.c
@@ -0,0 +1,591 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "ctrlEnv/sys/mvSysTs.h"
+
+
+typedef struct _mvTsuDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+}MV_TSU_DEC_WIN;
+
+
+MV_TARGET tsuAddrDecPrioTap[] =
+{
+#if defined(MV_INCLUDE_PEX)
+ PEX0_MEM,
+#endif
+#if defined(MV_INCLUDE_PCI)
+ PCI0_MEM,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS0)
+ SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+ SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+ SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+ SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS0)
+ DEVICE_CS0,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+ DEVICE_CS1,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS2)
+ DEVICE_CS2,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS3)
+ DEVICE_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+ PEX0_IO,
+#endif
+#if defined(MV_INCLUDE_PCI)
+ PCI0_IO,
+#endif
+ TBL_TERM
+};
+
+static MV_STATUS tsuWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+static MV_STATUS mvTsuWinSet(MV_U32 winNum, MV_TSU_DEC_WIN *pAddrDecWin);
+static MV_STATUS mvTsuWinGet(MV_U32 winNum, MV_TSU_DEC_WIN *pAddrDecWin);
+MV_STATUS mvTsuWinEnable(MV_U32 winNum,MV_BOOL enable);
+
+/*******************************************************************************
+* mvTsuWinInit
+*
+* DESCRIPTION:
+* Initialize the TSU unit address decode windows.
+*
+* INPUT:
+* None.
+* OUTPUT:
+* None.
+* RETURN:
+* MV_OK - on success,
+*
+*******************************************************************************/
+MV_STATUS mvTsuWinInit(void)
+{
+ MV_U32 winNum, status, winPrioIndex=0;
+ MV_TSU_DEC_WIN tsuWin;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+
+ /* First disable all address decode windows */
+ for(winNum = 0; winNum < TSU_MAX_DECODE_WIN; winNum++)
+ {
+ MV_REG_BIT_RESET(MV_TSU_WIN_CTRL_REG(winNum),
+ TSU_WIN_CTRL_EN_MASK);
+ }
+
+ /* Go through all windows in user table until table terminator */
+ for(winNum = 0; ((tsuAddrDecPrioTap[winPrioIndex] != TBL_TERM) &&
+ (winNum < TSU_MAX_DECODE_WIN));)
+ {
+ /* first get attributes from CPU If */
+ status = mvCpuIfTargetWinGet(tsuAddrDecPrioTap[winPrioIndex],
+ &cpuAddrDecWin);
+
+ if(MV_NO_SUCH == status)
+ {
+ winPrioIndex++;
+ continue;
+ }
+ if(MV_OK != status)
+ {
+ mvOsPrintf("mvTsuWinInit: ERR. mvCpuIfTargetWinGet failed\n");
+ return MV_ERROR;
+ }
+
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+ tsuWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ tsuWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ tsuWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ tsuWin.enable = MV_TRUE;
+ tsuWin.target = tsuAddrDecPrioTap[winPrioIndex];
+
+ if(MV_OK != mvTsuWinSet(winNum, &tsuWin))
+ {
+ mvOsPrintf("mvTsuWinInit: ERR. mvTsuWinSet failed winNum=%d\n",
+ winNum);
+ return MV_ERROR;
+ }
+ winNum++;
+ }
+ winPrioIndex ++;
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvTsuWinSet
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window, also known as address decode window.
+* After setting this target window, the TSU will be able to access the
+* target within the address window.
+*
+* INPUT:
+* winNum - TSU to target address decode window number.
+* pAddrDecWin - TSU target window data structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR - if address window overlapps with other address decode
+* windows.
+* MV_BAD_PARAM - if base address is invalid parameter or target is
+* unknown.
+*
+*******************************************************************************/
+MV_STATUS mvTsuWinSet(MV_U32 winNum, MV_TSU_DEC_WIN *pAddrDecWin)
+{
+ MV_TARGET_ATTRIB targetAttribs;
+ MV_DEC_REGS decRegs;
+
+ /* Parameter checking */
+ if(winNum >= TSU_MAX_DECODE_WIN)
+ {
+ mvOsPrintf("mvTsuWinSet: ERR. Invalid win num %d\n",winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlapps with current windows */
+ if(MV_TRUE == tsuWinOverlapDetect(winNum, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("mvTsuWinSet: ERR. Window %d overlap\n", winNum);
+ return MV_ERROR;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow,pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvTsuWinSet: Error setting TSU window %d to target "
+ "%s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum, mvCtrlTargetNameGet(pAddrDecWin->target),
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ decRegs.baseReg = MV_REG_READ(MV_TSU_WIN_BASE_REG(winNum));
+ decRegs.sizeReg = MV_REG_READ(MV_TSU_WIN_CTRL_REG(winNum));
+
+ if(MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+ {
+ mvOsPrintf("mvTsuWinSet: mvCtrlAddrDecToReg Failed\n");
+ return MV_ERROR;
+ }
+
+ mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+ /* set attributes */
+ decRegs.sizeReg &= ~TSU_WIN_CTRL_ATTR_MASK;
+ decRegs.sizeReg |= targetAttribs.attrib << TSU_WIN_CTRL_ATTR_OFFS;
+ /* set target ID */
+ decRegs.sizeReg &= ~TSU_WIN_CTRL_TARGET_MASK;
+ decRegs.sizeReg |= targetAttribs.targetId << TSU_WIN_CTRL_TARGET_OFFS;
+
+ /* for the safe side we disable the window before writing the new */
+ /* values */
+ mvTsuWinEnable(winNum, MV_FALSE);
+ MV_REG_WRITE(MV_TSU_WIN_CTRL_REG(winNum),decRegs.sizeReg);
+
+ /* Write to address decode Size Register */
+ MV_REG_WRITE(MV_TSU_WIN_BASE_REG(winNum), decRegs.baseReg);
+
+ /* Enable address decode target window */
+ if(pAddrDecWin->enable == MV_TRUE)
+ {
+ mvTsuWinEnable(winNum,MV_TRUE);
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvTsuWinGet
+*
+* DESCRIPTION:
+* Get TSU peripheral target address window.
+*
+* INPUT:
+* winNum - TSU to target address decode window number.
+*
+* OUTPUT:
+* pAddrDecWin - TSU target window data structure.
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvTsuWinGet(MV_U32 winNum, MV_TSU_DEC_WIN *pAddrDecWin)
+{
+ MV_DEC_REGS decRegs;
+ MV_TARGET_ATTRIB targetAttrib;
+
+ /* Parameter checking */
+ if(winNum >= TSU_MAX_DECODE_WIN)
+ {
+ mvOsPrintf("mvTsuWinGet: ERR. Invalid winNum %d\n", winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ decRegs.baseReg = MV_REG_READ(MV_TSU_WIN_BASE_REG(winNum));
+ decRegs.sizeReg = MV_REG_READ(MV_TSU_WIN_CTRL_REG(winNum));
+
+ if(MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+ {
+ mvOsPrintf("mvTsuWinGet: mvCtrlRegToAddrDec Failed \n");
+ return MV_ERROR;
+ }
+
+ /* attrib and targetId */
+ targetAttrib.attrib =
+ (decRegs.sizeReg & TSU_WIN_CTRL_ATTR_MASK) >> TSU_WIN_CTRL_ATTR_OFFS;
+ targetAttrib.targetId =
+ (decRegs.sizeReg & TSU_WIN_CTRL_TARGET_MASK) >> TSU_WIN_CTRL_TARGET_OFFS;
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ /* Check if window is enabled */
+ if((MV_REG_READ(MV_TSU_WIN_CTRL_REG(winNum)) & TSU_WIN_CTRL_EN_MASK))
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvTsuWinEnable
+*
+* DESCRIPTION:
+* This function enable/disable a TSU to target address window.
+* According to parameter 'enable' the routine will enable the
+* window, thus enabling TSU accesses (before enabling the window it is
+* tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+* winNum - TSU to target address decode window number.
+* enable - Enable / disable parameter.
+*
+* OUTPUT:
+* N/A
+*
+* RETURN:
+* MV_ERROR if decode window number was wrong or enabled window overlapps.
+*
+*******************************************************************************/
+MV_STATUS mvTsuWinEnable(MV_U32 winNum,MV_BOOL enable)
+{
+ MV_TSU_DEC_WIN addrDecWin;
+
+ /* Parameter checking */
+ if(winNum >= TSU_MAX_DECODE_WIN)
+ {
+ mvOsPrintf("mvTsuWinEnable: ERR. Invalid winNum%d\n",winNum);
+ return MV_ERROR;
+ }
+
+ if(enable == MV_TRUE)
+ {
+ /* First check for overlap with other enabled windows */
+ /* Get current window. */
+ if(MV_OK != mvTsuWinGet(winNum,&addrDecWin))
+ {
+ mvOsPrintf("mvTsuWinEnable: ERR. targetWinGet fail\n");
+ return MV_ERROR;
+ }
+ /* Check for overlapping. */
+ if(MV_FALSE == tsuWinOverlapDetect(winNum,&(addrDecWin.addrWin)))
+ {
+ /* No Overlap. Enable address decode target window */
+ MV_REG_BIT_SET(MV_TSU_WIN_CTRL_REG(winNum),
+ TSU_WIN_CTRL_EN_MASK);
+ }
+ else
+ {
+ /* Overlap detected */
+ mvOsPrintf("mvTsuWinEnable: ERR. Overlap detected\n");
+ return MV_ERROR;
+ }
+ }
+ else
+ {
+ /* Disable address decode target window */
+ MV_REG_BIT_RESET(MV_TSU_WIN_CTRL_REG(winNum),
+ TSU_WIN_CTRL_EN_MASK);
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvTsuWinTargetGet
+*
+* DESCRIPTION:
+* Get Window number associated with target
+*
+* INPUT:
+* target - Target ID to get the window number for.
+* OUTPUT:
+*
+* RETURN:
+* window number or 0xFFFFFFFF on error.
+*
+*******************************************************************************/
+MV_U32 mvTsuWinTargetGet(MV_TARGET target)
+{
+ MV_TSU_DEC_WIN decWin;
+ MV_U32 winNum;
+
+ /* Check parameters */
+ if(target >= MAX_TARGETS)
+ {
+ mvOsPrintf("mvTsuWinTargetGet: target %d is Illigal\n", target);
+ return 0xffffffff;
+ }
+
+ for(winNum = 0; winNum < TSU_MAX_DECODE_WIN; winNum++)
+ {
+ if(mvTsuWinGet(winNum,&decWin) != MV_OK)
+ {
+ mvOsPrintf("mvTsuWinGet: window returned error\n");
+ return 0xffffffff;
+ }
+
+ if (decWin.enable == MV_TRUE)
+ {
+ if(decWin.target == target)
+ {
+ return winNum;
+ }
+ }
+ }
+ return 0xFFFFFFFF;
+}
+
+
+/*******************************************************************************
+* tsuWinOverlapDetect
+*
+* DESCRIPTION:
+* Detect TSU address windows overlapping
+* An unpredicted behaviur is expected in case TSU address decode
+* windows overlapps.
+* This function detects TSU address decode windows overlapping of a
+* specified window. The function does not check the window itself for
+* overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* winNum - address decode window number.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+* from registers.
+*
+*******************************************************************************/
+static MV_STATUS tsuWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 ctrlReg;
+ MV_U32 winNumIndex;
+ MV_TSU_DEC_WIN addrDecWin;
+
+ for(winNumIndex = 0; winNumIndex < TSU_MAX_DECODE_WIN; winNumIndex++)
+ {
+ /* Do not check window itself */
+ if(winNumIndex == winNum)
+ {
+ continue;
+ }
+
+ /* Do not check disabled windows */
+ ctrlReg = MV_REG_READ(MV_TSU_WIN_CTRL_REG(winNumIndex));
+ if((ctrlReg & TSU_WIN_CTRL_EN_MASK) == 0)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvTsuWinGet(winNumIndex, &addrDecWin))
+ {
+ mvOsPrintf("tsuWinOverlapDetect: ERR. mvTsuWinGet failed\n");
+ return MV_ERROR;
+ }
+
+ if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+ {
+ return MV_TRUE;
+ }
+ }
+ return MV_FALSE;
+}
+
+
+/*******************************************************************************
+* mvTsuAddrDecShow
+*
+* DESCRIPTION:
+* Print the TSU address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+void mvTsuAddrDecShow(void)
+{
+ MV_TSU_DEC_WIN win;
+ int i;
+
+ if (MV_FALSE == mvCtrlPwrClckGet(TS_UNIT_ID, 0))
+ return;
+
+ mvOsOutput( "\n" );
+ mvOsOutput( "TSU:\n");
+ mvOsOutput( "----\n" );
+
+ for(i = 0; i < TSU_MAX_DECODE_WIN; i++)
+ {
+ memset(&win, 0, sizeof(TSU_MAX_DECODE_WIN));
+ mvOsOutput( "win%d - ", i );
+
+ if(mvTsuWinGet(i, &win ) == MV_OK )
+ {
+ if(win.enable == MV_TRUE)
+ {
+ mvOsOutput("%s base %08x, ",
+ mvCtrlTargetNameGet(win.target),
+ win.addrWin.baseLow);
+ mvOsOutput( "...." );
+ mvSizePrint(win.addrWin.size );
+ mvOsOutput( "\n" );
+ }
+ else
+ {
+ mvOsOutput( "disable\n" );
+ }
+ }
+ }
+ return;
+}
+
+
+/*******************************************************************************
+* mvTsuInit
+*
+* DESCRIPTION:
+* Initialize the TSU unit, and get unit out of reset.
+*
+* INPUT:
+* coreClock - The core clock at which the TSU should operate.
+* mode - The mode on configure the unit into (serial/parallel).
+* memHandle - Memory handle used for memory allocations.
+* OUTPUT:
+* None.
+* RETURN:
+* MV_OK - on success,
+*
+*******************************************************************************/
+MV_STATUS mvTsuInit(MV_TSU_CORE_CLOCK coreClock, MV_TSU_PORTS_MODE mode,
+ void *osHandle)
+{
+ MV_STATUS status;
+
+ status = mvTsuWinInit();
+ if(status == MV_OK)
+ status = mvTsuHalInit(coreClock,mode,osHandle);
+
+ return status;
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.h
new file mode 100644
index 000000000..42825893c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.h
@@ -0,0 +1,110 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSysTsh
+#define __INCmvSysTsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* includes */
+#include "ts/mvTsu.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+#define TSU_MAX_DECODE_WIN 4
+
+
+/*******************************************/
+/* TSU Windows Registers */
+/*******************************************/
+#define MV_TSU_WIN_CTRL_REG(win) (TSU_GLOBAL_REG_BASE +0x30 + 0x10 * win)
+#define MV_TSU_WIN_BASE_REG(win) (TSU_GLOBAL_REG_BASE +0x34 + 0x10 * win)
+
+/* TSU windows control register. */
+#define TSU_WIN_CTRL_EN_MASK (0x1 << 0)
+#define TSU_WIN_CTRL_TARGET_OFFS 4
+#define TSU_WIN_CTRL_TARGET_MASK (0xF << TSU_WIN_CTRL_TARGET_OFFS)
+#define TSU_WIN_CTRL_ATTR_OFFS 8
+#define TSU_WIN_CTRL_ATTR_MASK (0xFF << TSU_WIN_CTRL_ATTR_OFFS)
+#define TSU_WIN_CTRL_SIZE_OFFS 16
+#define TSU_WIN_CTRL_SIZE_MASK (0xFFFF << TSU_WIN_CTRL_SIZE_OFFS)
+
+/* TSU windows base register. */
+#define TSU_WIN_BASE_OFFS 16
+#define TSU_WIN_BASE_MASK (0xFFFF << TSU_WIN_BASE_OFFS)
+
+MV_STATUS mvTsuWinInit(void);
+
+void mvTsuAddrDecShow(void);
+MV_STATUS mvTsuInit(MV_TSU_CORE_CLOCK coreClock, MV_TSU_PORTS_MODE mode,
+ void *osHandle);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvTsh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c
new file mode 100644
index 000000000..195b5e13e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c
@@ -0,0 +1,497 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "ctrlEnv/sys/mvSysUsb.h"
+
+MV_TARGET usbAddrDecPrioTab[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+ SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+ SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+ SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+ SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_CESA) && defined(USB_UNDERRUN_WA)
+ CRYPT_ENG,
+#endif
+#if defined(MV_INCLUDE_PEX)
+ PEX0_MEM,
+#endif
+ TBL_TERM
+};
+
+
+
+MV_STATUS mvUsbInit(int dev, MV_BOOL isHost)
+{
+ MV_STATUS status;
+
+ status = mvUsbWinInit(dev);
+ if(status != MV_OK)
+ return status;
+
+ return mvUsbHalInit(dev, isHost);
+}
+
+
+/*******************************************************************************
+* usbWinOverlapDetect - Detect USB address windows overlapping
+*
+* DESCRIPTION:
+* An unpredicted behaviur is expected in case USB address decode
+* windows overlapps.
+* This function detects USB address decode windows overlapping of a
+* specified window. The function does not check the window itself for
+* overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* winNum - address decode window number.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+* from registers.
+*
+*******************************************************************************/
+static MV_STATUS usbWinOverlapDetect(int dev, MV_U32 winNum,
+ MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 winNumIndex;
+ MV_DEC_WIN addrDecWin;
+
+ for(winNumIndex=0; winNumIndex<MV_USB_MAX_ADDR_DECODE_WIN; winNumIndex++)
+ {
+ /* Do not check window itself */
+ if (winNumIndex == winNum)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvUsbWinGet(dev, winNumIndex, &addrDecWin))
+ {
+ mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* Do not check disabled windows */
+ if(addrDecWin.enable == MV_FALSE)
+ {
+ continue;
+ }
+
+ if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+ {
+ return MV_TRUE;
+ }
+ }
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvUsbWinSet - Set USB target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window, also known as address decode window.
+* After setting this target window, the USB will be able to access the
+* target within the address window.
+*
+* INPUT:
+* winNum - USB target address decode window number.
+* pAddrDecWin - USB target window data structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR if address window overlapps with other address decode windows.
+* MV_BAD_PARAM if base address is invalid parameter or target is
+* unknown.
+*
+*******************************************************************************/
+MV_STATUS mvUsbWinSet(int dev, MV_U32 winNum, MV_DEC_WIN *pDecWin)
+{
+ MV_DEC_WIN_PARAMS winParams;
+ MV_U32 sizeReg, baseReg;
+
+ /* Parameter checking */
+ if (winNum >= MV_USB_MAX_ADDR_DECODE_WIN)
+ {
+ mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlapps with current windows */
+ if (MV_TRUE == usbWinOverlapDetect(dev, winNum, &pDecWin->addrWin))
+ {
+ mvOsPrintf("%s: ERR. Window %d overlap\n", __FUNCTION__, winNum);
+ return MV_ERROR;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pDecWin->addrWin.baseLow, pDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvUsbWinSet:Error setting USB window %d to "\
+ "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ winNum,
+ mvCtrlTargetNameGet(pDecWin->target),
+ pDecWin->addrWin.baseLow,
+ pDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ if(MV_OK != mvCtrlAddrDecToParams(pDecWin, &winParams))
+ {
+ mvOsPrintf("%s: mvCtrlAddrDecToParams Failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* set Size, Attributes and TargetID */
+ sizeReg = (((winParams.targetId << MV_USB_WIN_TARGET_OFFSET) & MV_USB_WIN_TARGET_MASK) |
+ ((winParams.attrib << MV_USB_WIN_ATTR_OFFSET) & MV_USB_WIN_ATTR_MASK) |
+ ((winParams.size << MV_USB_WIN_SIZE_OFFSET) & MV_USB_WIN_SIZE_MASK));
+
+#if defined(MV645xx) || defined(MV646xx)
+ /* If window is DRAM with HW cache coherency, make sure bit2 is set */
+ sizeReg &= ~MV_USB_WIN_BURST_WR_LIMIT_MASK;
+
+ if((MV_TARGET_IS_DRAM(pDecWin->target)) &&
+ (pDecWin->addrWinAttr.cachePolicy != NO_COHERENCY))
+ {
+ sizeReg |= MV_USB_WIN_BURST_WR_32BIT_LIMIT;
+ }
+ else
+ {
+ sizeReg |= MV_USB_WIN_BURST_WR_NO_LIMIT;
+ }
+#endif /* MV645xx || MV646xx */
+
+ if (pDecWin->enable == MV_TRUE)
+ {
+ sizeReg |= MV_USB_WIN_ENABLE_MASK;
+ }
+ else
+ {
+ sizeReg &= ~MV_USB_WIN_ENABLE_MASK;
+ }
+
+ /* Update Base value */
+ baseReg = (winParams.baseAddr & MV_USB_WIN_BASE_MASK);
+
+ MV_REG_WRITE( MV_USB_WIN_CTRL_REG(dev, winNum), sizeReg);
+ MV_REG_WRITE( MV_USB_WIN_BASE_REG(dev, winNum), baseReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvUsbWinGet - Get USB peripheral target address window.
+*
+* DESCRIPTION:
+* Get USB peripheral target address window.
+*
+* INPUT:
+* winNum - USB target address decode window number.
+*
+* OUTPUT:
+* pDecWin - USB target window data structure.
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvUsbWinGet(int dev, MV_U32 winNum, MV_DEC_WIN *pDecWin)
+{
+ MV_DEC_WIN_PARAMS winParam;
+ MV_U32 sizeReg, baseReg;
+
+ /* Parameter checking */
+ if (winNum >= MV_USB_MAX_ADDR_DECODE_WIN)
+ {
+ mvOsPrintf("%s (dev=%d): ERR. Invalid winNum %d\n",
+ __FUNCTION__, dev, winNum);
+ return MV_NOT_SUPPORTED;
+ }
+
+ baseReg = MV_REG_READ( MV_USB_WIN_BASE_REG(dev, winNum) );
+ sizeReg = MV_REG_READ( MV_USB_WIN_CTRL_REG(dev, winNum) );
+
+ /* Check if window is enabled */
+ if(sizeReg & MV_USB_WIN_ENABLE_MASK)
+ {
+ pDecWin->enable = MV_TRUE;
+
+ /* Extract window parameters from registers */
+ winParam.targetId = (sizeReg & MV_USB_WIN_TARGET_MASK) >> MV_USB_WIN_TARGET_OFFSET;
+ winParam.attrib = (sizeReg & MV_USB_WIN_ATTR_MASK) >> MV_USB_WIN_ATTR_OFFSET;
+ winParam.size = (sizeReg & MV_USB_WIN_SIZE_MASK) >> MV_USB_WIN_SIZE_OFFSET;
+ winParam.baseAddr = (baseReg & MV_USB_WIN_BASE_MASK);
+
+ /* Translate the decode window parameters to address decode struct */
+ if (MV_OK != mvCtrlParamsToAddrDec(&winParam, pDecWin))
+ {
+ mvOsPrintf("Failed to translate register parameters to USB address" \
+ " decode window structure\n");
+ return MV_ERROR;
+ }
+ }
+ else
+ {
+ pDecWin->enable = MV_FALSE;
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvUsbWinInit -
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+* MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvUsbWinInit(int dev)
+{
+ MV_STATUS status;
+ MV_DEC_WIN usbWin;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+ int winNum;
+ MV_U32 winPrioIndex = 0;
+
+ /* First disable all address decode windows */
+ for(winNum = 0; winNum < MV_USB_MAX_ADDR_DECODE_WIN; winNum++)
+ {
+ MV_REG_BIT_RESET(MV_USB_WIN_CTRL_REG(dev, winNum), MV_USB_WIN_ENABLE_MASK);
+ }
+
+ /* Go through all windows in user table until table terminator */
+ winNum = 0;
+ while( (usbAddrDecPrioTab[winPrioIndex] != TBL_TERM) &&
+ (winNum < MV_USB_MAX_ADDR_DECODE_WIN) )
+ {
+ /* first get attributes from CPU If */
+ status = mvCpuIfTargetWinGet(usbAddrDecPrioTab[winPrioIndex],
+ &cpuAddrDecWin);
+
+ if(MV_NO_SUCH == status)
+ {
+ winPrioIndex++;
+ continue;
+ }
+ if (MV_OK != status)
+ {
+ mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+ usbWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ usbWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ usbWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ usbWin.enable = MV_TRUE;
+ usbWin.target = usbAddrDecPrioTab[winPrioIndex];
+
+#if defined(MV645xx) || defined(MV646xx)
+ /* Get the default attributes for that target window */
+ mvCtrlDefAttribGet(usbWin.target, &usbWin.addrWinAttr);
+#endif /* MV645xx || MV646xx */
+
+ if(MV_OK != mvUsbWinSet(dev, winNum, &usbWin))
+ {
+ return MV_ERROR;
+ }
+ winNum++;
+ }
+ winPrioIndex++;
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvUsbAddrDecShow - Print the USB address decode map.
+*
+* DESCRIPTION:
+* This function print the USB address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvUsbAddrDecShow(MV_VOID)
+{
+ MV_DEC_WIN addrDecWin;
+ int i, winNum;
+
+ mvOsOutput( "\n" );
+ mvOsOutput( "USB:\n" );
+ mvOsOutput( "----\n" );
+
+ for(i=0; i<mvCtrlUsbMaxGet(); i++)
+ {
+ mvOsOutput( "Device %d:\n", i);
+
+ for(winNum = 0; winNum < MV_USB_MAX_ADDR_DECODE_WIN; winNum++)
+ {
+ memset(&addrDecWin, 0, sizeof(MV_DEC_WIN) );
+
+ mvOsOutput( "win%d - ", winNum );
+
+ if( mvUsbWinGet(i, winNum, &addrDecWin ) == MV_OK )
+ {
+ if( addrDecWin.enable )
+ {
+ mvOsOutput( "%s base %08x, ",
+ mvCtrlTargetNameGet(addrDecWin.target), addrDecWin.addrWin.baseLow );
+
+ mvSizePrint( addrDecWin.addrWin.size );
+
+#if defined(MV645xx) || defined(MV646xx)
+ switch( addrDecWin.addrWinAttr.swapType)
+ {
+ case MV_BYTE_SWAP:
+ mvOsOutput( "BYTE_SWAP, " );
+ break;
+ case MV_NO_SWAP:
+ mvOsOutput( "NO_SWAP , " );
+ break;
+ case MV_BYTE_WORD_SWAP:
+ mvOsOutput( "BYTE_WORD_SWAP, " );
+ break;
+ case MV_WORD_SWAP:
+ mvOsOutput( "WORD_SWAP, " );
+ break;
+ default:
+ mvOsOutput( "SWAP N/A , " );
+ }
+
+ switch( addrDecWin.addrWinAttr.cachePolicy )
+ {
+ case NO_COHERENCY:
+ mvOsOutput( "NO_COHERENCY , " );
+ break;
+ case WT_COHERENCY:
+ mvOsOutput( "WT_COHERENCY , " );
+ break;
+ case WB_COHERENCY:
+ mvOsOutput( "WB_COHERENCY , " );
+ break;
+ default:
+ mvOsOutput( "COHERENCY N/A, " );
+ }
+
+ switch( addrDecWin.addrWinAttr.pcixNoSnoop )
+ {
+ case 0:
+ mvOsOutput( "PCI-X NS inactive, " );
+ break;
+ case 1:
+ mvOsOutput( "PCI-X NS active , " );
+ break;
+ default:
+ mvOsOutput( "PCI-X NS N/A , " );
+ }
+
+ switch( addrDecWin.addrWinAttr.p2pReq64 )
+ {
+ case 0:
+ mvOsOutput( "REQ64 force" );
+ break;
+ case 1:
+ mvOsOutput( "REQ64 detect" );
+ break;
+ default:
+ mvOsOutput( "REQ64 N/A" );
+ }
+#endif /* MV645xx || MV646xx */
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+ }
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h
new file mode 100644
index 000000000..07f98de9c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h
@@ -0,0 +1,125 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSysUsbh
+#define __INCmvSysUsbh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* includes */
+#include "usb/mvUsb.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+#define MV_USB_MAX_ADDR_DECODE_WIN 4
+
+/*******************************************/
+/* USB Bridge Registers */
+/*******************************************/
+#define MV_USB_BRIDGE_CTRL_REG(dev) (USB_REG_BASE(dev) + 0x300)
+
+#define MV_USB_WIN_CTRL_REG(dev, win) (USB_REG_BASE(dev) + 0x320 + ((win)<<4))
+#define MV_USB_WIN_BASE_REG(dev, win) (USB_REG_BASE(dev) + 0x324 + ((win)<<4))
+
+/* BITs in Windows 0-3 Control and Base Registers */
+#define MV_USB_WIN_ENABLE_BIT 0
+#define MV_USB_WIN_ENABLE_MASK (1 << MV_USB_WIN_ENABLE_BIT)
+
+#define MV_USB_WIN_BURST_WR_LIMIT_BIT 1
+#define MV_USB_WIN_BURST_WR_LIMIT_MASK (1 << MV_USB_WIN_BURST_WR_LIMIT_BIT)
+#define MV_USB_WIN_BURST_WR_NO_LIMIT (0 << MV_USB_WIN_BURST_WR_LIMIT_BIT)
+#define MV_USB_WIN_BURST_WR_32BIT_LIMIT (1 << MV_USB_WIN_BURST_WR_LIMIT_BIT)
+
+#define MV_USB_WIN_TARGET_OFFSET 4
+#define MV_USB_WIN_TARGET_MASK (0xF << MV_USB_WIN_TARGET_OFFSET)
+
+#define MV_USB_WIN_ATTR_OFFSET 8
+#define MV_USB_WIN_ATTR_MASK (0xFF << MV_USB_WIN_ATTR_OFFSET)
+
+#define MV_USB_WIN_SIZE_OFFSET 16
+#define MV_USB_WIN_SIZE_MASK (0xFFFF << MV_USB_WIN_SIZE_OFFSET)
+
+#define MV_USB_WIN_BASE_OFFSET 16
+#define MV_USB_WIN_BASE_MASK (0xFFFF << MV_USB_WIN_BASE_OFFSET)
+
+
+#define MV_USB_BRIDGE_IPG_REG(dev) (USB_REG_BASE(dev) + 0x360)
+
+
+MV_STATUS mvUsbInit(int dev, MV_BOOL isHost);
+
+MV_STATUS mvUsbWinInit(int dev);
+MV_STATUS mvUsbWinSet(int dev, MV_U32 winNum, MV_DEC_WIN *pAddrWin);
+MV_STATUS mvUsbWinGet(int dev, MV_U32 winNum, MV_DEC_WIN *pAddrWin);
+
+void mvUsbAddrDecShow(void);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvUsbh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.c
new file mode 100644
index 000000000..f9d0ab37d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.c
@@ -0,0 +1,662 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "xor/mvXor.h"
+#include "mvSysXor.h"
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+
+static MV_STATUS xorWinOverlapDetect(MV_U32 unit,MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+
+MV_TARGET xorAddrDecPrioTap[] =
+{
+#if defined(MV_INCLUDE_DEVICE_CS0)
+ DEVICE_CS0,
+#endif
+#if defined(MV_INCLUDE_PEX)
+ PEX0_MEM,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS0)
+ SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+ SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+ SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+ SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+ DEVICE_CS1,
+#endif
+#if defined(MV_INCLUDE_CESA)
+ CRYPT_ENG,
+#endif
+ TBL_TERM
+};
+static MV_STATUS mvXorInitWinsUnit (MV_U32 unit)
+{
+ MV_U32 winNum;
+ MV_XOR_DEC_WIN addrDecWin;
+ MV_CPU_DEC_WIN cpuAddrDecWin;
+ MV_U32 status;
+ MV_U32 winPrioIndex=0;
+
+ /* Initiate XOR address decode */
+
+ /* First disable all address decode windows */
+ for(winNum = 0; winNum < XOR_MAX_ADDR_DEC_WIN; winNum++)
+ {
+ mvXorTargetWinEnable(unit,winNum, MV_FALSE);
+ }
+
+ /* Go through all windows in user table until table terminator */
+ for (winNum = 0; ((xorAddrDecPrioTap[winPrioIndex] != TBL_TERM) &&
+ (winNum < XOR_MAX_ADDR_DEC_WIN));)
+ {
+ /* first get attributes from CPU If */
+ status = mvCpuIfTargetWinGet(xorAddrDecPrioTap[winPrioIndex],
+ &cpuAddrDecWin);
+
+ if(MV_NO_SUCH == status)
+ {
+ winPrioIndex++;
+ continue;
+ }
+ if (MV_OK != status)
+ {
+ mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+
+ if (cpuAddrDecWin.enable == MV_TRUE)
+ {
+
+ addrDecWin.target = xorAddrDecPrioTap[winPrioIndex];
+ addrDecWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+ addrDecWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+ addrDecWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+ addrDecWin.enable = MV_TRUE;
+
+ if (MV_OK != mvXorTargetWinSet(unit,winNum, &addrDecWin))
+ {
+ DB(mvOsPrintf("mvXorInit: ERR. mvDmaTargetWinSet failed\n"));
+ return MV_ERROR;
+ }
+ winNum++;
+ }
+ winPrioIndex++;
+
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvXorInit - Initialize XOR engine
+*
+* DESCRIPTION:
+* This function initialize XOR unit. It set the default address decode
+* windows of the unit.
+* Note that if the address window is disabled in xorAddrDecMap, the
+* window parameters will be set but the window will remain disabled.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*******************************************************************************/
+MV_STATUS mvXorInit (MV_VOID)
+{
+ MV_U32 i;
+
+ /* Initiate XOR address decode */
+ for(i = 0; i < MV_XOR_MAX_UNIT; i++)
+ mvXorInitWinsUnit(i);
+
+ mvXorHalInit(MV_XOR_MAX_CHAN);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorTargetWinSet - Set XOR target address window
+*
+* DESCRIPTION:
+* This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+* address window. After setting this target window, the XOR will be
+* able to access the target within the address window.
+*
+* INPUT:
+* winNum - One of the possible XOR memory decode windows.
+* target - Peripheral target enumerator.
+* base - Window base address.
+* size - Window size.
+* enable - Window enable/disable.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorTargetWinSet(MV_U32 unit, MV_U32 winNum, MV_XOR_DEC_WIN *pAddrDecWin)
+{
+ MV_DEC_REGS xorDecRegs;
+ MV_TARGET_ATTRIB targetAttribs;
+ MV_U32 chan;
+
+ /* Parameter checking */
+ if (winNum >= XOR_MAX_ADDR_DEC_WIN)
+ {
+ DB(mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum));
+ return MV_BAD_PARAM;
+ }
+ if (pAddrDecWin == NULL)
+ {
+ DB(mvOsPrintf("%s: ERR. pAddrDecWin is NULL pointer\n", __FUNCTION__ ));
+ return MV_BAD_PTR;
+ }
+ /* Check if the requested window overlaps with current windows */
+ if (MV_TRUE == xorWinOverlapDetect(unit, winNum, &pAddrDecWin->addrWin))
+ {
+ DB(mvOsPrintf("%s: ERR. Window %d overlap\n",__FUNCTION__,winNum));
+ return MV_ERROR;
+ }
+
+ xorDecRegs.baseReg = MV_REG_READ(XOR_BASE_ADDR_REG(unit,winNum));
+ xorDecRegs.sizeReg = MV_REG_READ(XOR_SIZE_MASK_REG(unit,winNum));
+
+ /* Get Base Address and size registers values */
+ if(MV_OK != mvCtrlAddrDecToReg(&pAddrDecWin->addrWin, &xorDecRegs))
+ {
+ DB(mvOsPrintf("%s: ERR. Invalid addr dec window\n",__FUNCTION__));
+ return MV_BAD_PARAM;
+ }
+
+
+ mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+ /* set attributes */
+ xorDecRegs.baseReg &= ~XEBARX_ATTR_MASK;
+ xorDecRegs.baseReg |= targetAttribs.attrib << XEBARX_ATTR_OFFS;
+ /* set target ID */
+ xorDecRegs.baseReg &= ~XEBARX_TARGET_MASK;
+ xorDecRegs.baseReg |= targetAttribs.targetId << XEBARX_TARGET_OFFS;
+
+
+ /* Write to address decode Base Address Register */
+ MV_REG_WRITE(XOR_BASE_ADDR_REG(unit,winNum), xorDecRegs.baseReg);
+
+ /* Write to Size Register */
+ MV_REG_WRITE(XOR_SIZE_MASK_REG(unit,winNum), xorDecRegs.sizeReg);
+
+ for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++)
+ {
+ if (pAddrDecWin->enable)
+ {
+ MV_REG_BIT_SET(XOR_WINDOW_CTRL_REG(unit,chan),
+ XEXWCR_WIN_EN_MASK(winNum));
+ }
+ else
+ {
+ MV_REG_BIT_RESET(XOR_WINDOW_CTRL_REG(unit,chan),
+ XEXWCR_WIN_EN_MASK(winNum));
+ }
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorTargetWinGet - Get xor peripheral target address window.
+*
+* DESCRIPTION:
+* Get xor peripheral target address window.
+*
+* INPUT:
+* winNum - One of the possible XOR memory decode windows.
+*
+* OUTPUT:
+* base - Window base address.
+* size - Window size.
+* enable - window enable/disable.
+*
+* RETURN:
+* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorTargetWinGet(MV_U32 unit,MV_U32 winNum, MV_XOR_DEC_WIN *pAddrDecWin)
+{
+ MV_DEC_REGS xorDecRegs;
+ MV_TARGET_ATTRIB targetAttrib;
+ MV_U32 chan=0,chanWinEn;
+
+ /* Parameter checking */
+ if (winNum >= XOR_MAX_ADDR_DEC_WIN)
+ {
+ DB(mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__ , winNum));
+ return MV_ERROR;
+ }
+
+ if (NULL == pAddrDecWin)
+ {
+ DB(mvOsPrintf("%s: ERR. pAddrDecWin is NULL pointer\n", __FUNCTION__ ));
+ return MV_BAD_PTR;
+ }
+
+ chanWinEn = MV_REG_READ(XOR_WINDOW_CTRL_REG(unit,0)) & XEXWCR_WIN_EN_MASK(winNum);
+
+ for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++) /* we should scan here all channels per unit */
+ {
+ /* Check if enable bit is equal for all channels */
+ if ((MV_REG_READ(XOR_WINDOW_CTRL_REG(unit,chan)) &
+ XEXWCR_WIN_EN_MASK(winNum)) != chanWinEn)
+ {
+ mvOsPrintf("%s: ERR. Window enable field must be equal in "
+ "all channels(chan=%d)\n",__FUNCTION__, chan);
+ return MV_ERROR;
+ }
+ }
+
+
+
+ xorDecRegs.baseReg = MV_REG_READ(XOR_BASE_ADDR_REG(unit,winNum));
+ xorDecRegs.sizeReg = MV_REG_READ(XOR_SIZE_MASK_REG(unit,winNum));
+
+ if (MV_OK != mvCtrlRegToAddrDec(&xorDecRegs, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("%s: ERR. mvCtrlRegToAddrDec failed\n", __FUNCTION__);
+ return MV_ERROR;
+ }
+
+ /* attrib and targetId */
+ targetAttrib.attrib =
+ (xorDecRegs.baseReg & XEBARX_ATTR_MASK) >> XEBARX_ATTR_OFFS;
+ targetAttrib.targetId =
+ (xorDecRegs.baseReg & XEBARX_TARGET_MASK) >> XEBARX_TARGET_OFFS;
+
+
+ pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+ if(chanWinEn)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else pAddrDecWin->enable = MV_FALSE;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorTargetWinEnable - Enable/disable a Xor address decode window
+*
+* DESCRIPTION:
+* This function enable/disable a XOR address decode window.
+* if parameter 'enable' == MV_TRUE the routine will enable the
+* window, thus enabling XOR accesses (before enabling the window it is
+* tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+* winNum - Decode window number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorTargetWinEnable(MV_U32 unit,MV_U32 winNum, MV_BOOL enable)
+{
+ MV_XOR_DEC_WIN addrDecWin;
+ MV_U32 chan;
+
+ /* Parameter checking */
+ if (winNum >= XOR_MAX_ADDR_DEC_WIN)
+ {
+ DB(mvOsPrintf("%s: ERR. Invalid winNum%d\n", __FUNCTION__, winNum));
+ return MV_ERROR;
+ }
+
+ if (enable == MV_TRUE)
+ {
+ /* Get current window */
+ if (MV_OK != mvXorTargetWinGet(unit,winNum, &addrDecWin))
+ {
+ DB(mvOsPrintf("%s: ERR. targetWinGet fail\n", __FUNCTION__));
+ return MV_ERROR;
+ }
+
+ /* Check for overlapping */
+ if (MV_TRUE == xorWinOverlapDetect(unit,winNum, &(addrDecWin.addrWin)))
+ {
+ /* Overlap detected */
+ DB(mvOsPrintf("%s: ERR. Overlap detected\n", __FUNCTION__));
+ return MV_ERROR;
+ }
+
+ /* No Overlap. Enable address decode target window */
+ for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++)
+ {
+ MV_REG_BIT_SET(XOR_WINDOW_CTRL_REG(unit,chan),
+ XEXWCR_WIN_EN_MASK(winNum));
+ }
+
+ }
+ else
+ {
+ /* Disable address decode target window */
+
+ for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++)
+ {
+ MV_REG_BIT_RESET(XOR_WINDOW_CTRL_REG(unit,chan),
+ XEXWCR_WIN_EN_MASK(winNum));
+ }
+
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorSetProtWinSet - Configure access attributes of a XOR engine
+* to one of the XOR memory windows.
+*
+* DESCRIPTION:
+* Each engine can be configured with access attributes for each of the
+* memory spaces. This function sets access attributes
+* to a given window for the given engine
+*
+* INPUTS:
+* chan - One of the possible engines.
+* winNum - One of the possible XOR memory spaces.
+* access - Protection access rights.
+* write - Write rights.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorProtWinSet (MV_U32 unit,MV_U32 chan, MV_U32 winNum, MV_BOOL access,
+ MV_BOOL write)
+{
+ MV_U32 temp;
+
+ /* Parameter checking */
+ if (chan >= MV_XOR_MAX_CHAN_PER_UNIT)
+ {
+ DB(mvOsPrintf("%s: ERR. Invalid chan num %d\n", __FUNCTION__ , chan));
+ return MV_BAD_PARAM;
+ }
+ if (winNum >= XOR_MAX_ADDR_DEC_WIN)
+ {
+ DB(mvOsPrintf("%s: ERR. Invalid win num %d\n", __FUNCTION__, winNum));
+ return MV_BAD_PARAM;
+ }
+
+ temp = MV_REG_READ(XOR_WINDOW_CTRL_REG(unit,chan)) &
+ (~XEXWCR_WIN_ACC_MASK(winNum));
+
+ /* if access is disable */
+ if (!access)
+ {
+ /* disable access */
+ temp |= XEXWCR_WIN_ACC_NO_ACC(winNum);
+ }
+ /* if access is enable */
+ else
+ {
+ /* if write is enable */
+ if (write)
+ {
+ /* enable write */
+ temp |= XEXWCR_WIN_ACC_RW(winNum);
+ }
+ /* if write is disable */
+ else
+ {
+ /* disable write */
+ temp |= XEXWCR_WIN_ACC_RO(winNum);
+ }
+ }
+ MV_REG_WRITE(XOR_WINDOW_CTRL_REG(unit,chan),temp);
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorPciRemap - Set XOR remap register for PCI address windows.
+*
+* DESCRIPTION:
+* only Windows 0-3 can be remapped.
+*
+* INPUT:
+* winNum - window number
+* pAddrDecWin - pointer to address space window structure
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorPciRemap(MV_U32 unit,MV_U32 winNum, MV_U32 addrHigh)
+{
+ /* Parameter checking */
+ if (winNum >= XOR_MAX_REMAP_WIN)
+ {
+ DB(mvOsPrintf("%s: ERR. Invalid win num %d\n", __FUNCTION__, winNum));
+ return MV_BAD_PARAM;
+ }
+
+ MV_REG_WRITE(XOR_HIGH_ADDR_REMAP_REG(unit,winNum), addrHigh);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* xorWinOverlapDetect - Detect XOR address windows overlaping
+*
+* DESCRIPTION:
+* An unpredicted behaviour is expected in case XOR address decode
+* windows overlaps.
+* This function detects XOR address decode windows overlaping of a
+* specified window. The function does not check the window itself for
+* overlaping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* winNum - address decode window number.
+* pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlap current address
+* decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+* from registers.
+*
+*******************************************************************************/
+static MV_STATUS xorWinOverlapDetect(MV_U32 unit,MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+ MV_U32 baseAddrEnableReg;
+ MV_U32 winNumIndex,chan;
+ MV_XOR_DEC_WIN addrDecWin;
+
+ if (pAddrWin == NULL)
+ {
+ DB(mvOsPrintf("%s: ERR. pAddrWin is NULL pointer\n", __FUNCTION__ ));
+ return MV_BAD_PTR;
+ }
+
+ for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++)
+ {
+ /* Read base address enable register. Do not check disabled windows */
+ baseAddrEnableReg = MV_REG_READ(XOR_WINDOW_CTRL_REG(unit,chan));
+
+ for (winNumIndex = 0; winNumIndex < XOR_MAX_ADDR_DEC_WIN; winNumIndex++)
+ {
+ /* Do not check window itself */
+ if (winNumIndex == winNum)
+ {
+ continue;
+ }
+
+ /* Do not check disabled windows */
+ if ((baseAddrEnableReg & XEXWCR_WIN_EN_MASK(winNumIndex)) == 0)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvXorTargetWinGet(unit,winNumIndex, &addrDecWin))
+ {
+ DB(mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__ ));
+ return MV_ERROR;
+ }
+
+ if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+ {
+ return MV_TRUE;
+ }
+ }
+ }
+
+ return MV_FALSE;
+}
+
+static MV_VOID mvXorAddrDecShowUnit(MV_U32 unit)
+{
+ MV_XOR_DEC_WIN win;
+ int i;
+
+ mvOsOutput( "\n" );
+ mvOsOutput( "XOR %d:\n", unit );
+ mvOsOutput( "----\n" );
+
+ for( i = 0; i < XOR_MAX_ADDR_DEC_WIN; i++ )
+ {
+ memset( &win, 0, sizeof(MV_XOR_DEC_WIN) );
+
+ mvOsOutput( "win%d - ", i );
+
+ if( mvXorTargetWinGet(unit, i, &win ) == MV_OK )
+ {
+ if( win.enable )
+ {
+ mvOsOutput( "%s base %x, ",
+ mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+
+ mvSizePrint( win.addrWin.size );
+
+ mvOsOutput( "\n" );
+ }
+ else
+ mvOsOutput( "disable\n" );
+ }
+ }
+}
+
+/*******************************************************************************
+* mvXorAddrDecShow - Print the XOR address decode map.
+*
+* DESCRIPTION:
+* This function print the XOR address decode map.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID mvXorAddrDecShow(MV_VOID)
+{
+ int i;
+
+ for( i = 0; i < MV_XOR_MAX_UNIT; i++ )
+ mvXorAddrDecShowUnit(i);
+
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.h
new file mode 100644
index 000000000..73b2d9e6a
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.h
@@ -0,0 +1,140 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCMVSysXorh
+#define __INCMVSysXorh
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+#define XOR_MAX_ADDR_DEC_WIN 8 /* Maximum address decode windows */
+#define XOR_MAX_REMAP_WIN 4 /* Maximum address arbiter windows */
+
+/* XOR Engine Address Decoding Register Map */
+#define XOR_WINDOW_CTRL_REG(unit,chan) (XOR_UNIT_BASE(unit)+(0x240 + ((chan) * 4)))
+#define XOR_BASE_ADDR_REG(unit,winNum) (XOR_UNIT_BASE(unit)+(0x250 + ((winNum) * 4)))
+#define XOR_SIZE_MASK_REG(unit,winNum) (XOR_UNIT_BASE(unit)+(0x270 + ((winNum) * 4)))
+#define XOR_HIGH_ADDR_REMAP_REG(unit,winNum) (XOR_UNIT_BASE(unit)+(0x290 + ((winNum) * 4)))
+
+/* XOR Engine [0..1] Window Control Registers (XExWCR) */
+#define XEXWCR_WIN_EN_OFFS(winNum) (winNum)
+#define XEXWCR_WIN_EN_MASK(winNum) (1 << (XEXWCR_WIN_EN_OFFS(winNum)))
+#define XEXWCR_WIN_EN_ENABLE(winNum) (1 << (XEXWCR_WIN_EN_OFFS(winNum)))
+#define XEXWCR_WIN_EN_DISABLE(winNum) (0 << (XEXWCR_WIN_EN_OFFS(winNum)))
+
+#define XEXWCR_WIN_ACC_OFFS(winNum) ((2 * winNum) + 16)
+#define XEXWCR_WIN_ACC_MASK(winNum) (3 << (XEXWCR_WIN_ACC_OFFS(winNum)))
+#define XEXWCR_WIN_ACC_NO_ACC(winNum) (0 << (XEXWCR_WIN_ACC_OFFS(winNum)))
+#define XEXWCR_WIN_ACC_RO(winNum) (1 << (XEXWCR_WIN_ACC_OFFS(winNum)))
+#define XEXWCR_WIN_ACC_RW(winNum) (3 << (XEXWCR_WIN_ACC_OFFS(winNum)))
+
+/* XOR Engine Base Address Registers (XEBARx) */
+#define XEBARX_TARGET_OFFS (0)
+#define XEBARX_TARGET_MASK (0xF << XEBARX_TARGET_OFFS)
+#define XEBARX_ATTR_OFFS (8)
+#define XEBARX_ATTR_MASK (0xFF << XEBARX_ATTR_OFFS)
+#define XEBARX_BASE_OFFS (16)
+#define XEBARX_BASE_MASK (0xFFFF << XEBARX_BASE_OFFS)
+
+/* XOR Engine Size Mask Registers (XESMRx) */
+#define XESMRX_SIZE_MASK_OFFS (16)
+#define XESMRX_SIZE_MASK_MASK (0xFFFF << XESMRX_SIZE_MASK_OFFS)
+
+/* XOR Engine High Address Remap Register (XEHARRx1) */
+#define XEHARRX_REMAP_OFFS (0)
+#define XEHARRX_REMAP_MASK (0xFFFFFFFF << XEHARRX_REMAP_OFFS)
+
+typedef struct _mvXorDecWin
+{
+ MV_TARGET target;
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+
+}MV_XOR_DEC_WIN;
+
+MV_STATUS mvXorInit (MV_VOID);
+MV_STATUS mvXorTargetWinSet(MV_U32 unit, MV_U32 winNum,
+ MV_XOR_DEC_WIN *pAddrDecWin);
+MV_STATUS mvXorTargetWinGet(MV_U32 unit, MV_U32 winNum,
+ MV_XOR_DEC_WIN *pAddrDecWin);
+MV_STATUS mvXorTargetWinEnable(MV_U32 unit,
+ MV_U32 winNum, MV_BOOL enable);
+MV_STATUS mvXorProtWinSet (MV_U32 unit,MV_U32 chan, MV_U32 winNum, MV_BOOL access,
+ MV_BOOL write);
+MV_STATUS mvXorPciRemap(MV_U32 unit, MV_U32 winNum, MV_U32 addrHigh);
+
+MV_VOID mvXorAddrDecShow(MV_VOID);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.c
new file mode 100644
index 000000000..80325fc12
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.c
@@ -0,0 +1,75 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "device/mvDevice.h"
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.h
new file mode 100644
index 000000000..935077966
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDeviceH
+#define __INCmvDeviceH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "device/mvDeviceRegs.h"
+
+
+#endif /* #ifndef __INCmvDeviceH */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDeviceRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDeviceRegs.h
new file mode 100644
index 000000000..80778ad93
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDeviceRegs.h
@@ -0,0 +1,101 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDeviceRegsH
+#define __INCmvDeviceRegsH
+
+#ifndef MV_ASMLANGUAGE
+#include "ctrlEnv/mvCtrlEnvLib.h"
+/* This enumerator describes the Marvell controller possible devices that */
+/* can be connected to its device interface. */
+typedef enum _mvDevice
+{
+#if defined(MV_INCLUDE_DEVICE_CS0)
+ DEV_CS0 = 0, /* Device connected to dev CS[0] */
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+ DEV_CS1 = 1, /* Device connected to dev CS[1] */
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS2)
+ DEV_CS2 = 2, /* Device connected to dev CS[2] */
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS3)
+ DEV_CS3 = 3, /* Device connected to dev CS[2] */
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS4)
+ DEV_CS4 = 4, /* Device connected to BOOT dev */
+#endif
+ MV_DEV_MAX_CS = MV_DEVICE_MAX_CS
+}MV_DEVICE;
+
+
+#endif /* MV_ASMLANGUAGE */
+
+
+#define NAND_CTRL_REG 0x10470
+
+#define NAND_ACTCEBOOT_BIT BIT1
+
+
+#endif /* #ifndef __INCmvDeviceRegsH */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/mvCompVer.txt
new file mode 100644
index 000000000..40531164c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/kw_family/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.5
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c
new file mode 100644
index 000000000..75f7e88cf
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c
@@ -0,0 +1,211 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+/*******************************************************************************
+* mvOsCpuArchLib.c - Marvell CPU architecture library
+*
+* DESCRIPTION:
+* This library introduce Marvell API for OS dependent CPU architecture
+* APIs. This library introduce single CPU architecture services APKI
+* cross OS.
+*
+* DEPENDENCIES:
+* None.
+*
+*******************************************************************************/
+
+/* includes */
+#include <asm/processor.h>
+#include "mvOs.h"
+
+static MV_U32 read_p15_c0 (void);
+
+/* defines */
+#define ARM_ID_REVISION_OFFS 0
+#define ARM_ID_REVISION_MASK (0xf << ARM_ID_REVISION_OFFS)
+
+#define ARM_ID_PART_NUM_OFFS 4
+#define ARM_ID_PART_NUM_MASK (0xfff << ARM_ID_PART_NUM_OFFS)
+
+#define ARM_ID_ARCH_OFFS 16
+#define ARM_ID_ARCH_MASK (0xf << ARM_ID_ARCH_OFFS)
+
+#define ARM_ID_VAR_OFFS 20
+#define ARM_ID_VAR_MASK (0xf << ARM_ID_VAR_OFFS)
+
+#define ARM_ID_ASCII_OFFS 24
+#define ARM_ID_ASCII_MASK (0xff << ARM_ID_ASCII_OFFS)
+
+
+
+void* mvOsIoCachedMalloc( void* osHandle, MV_U32 size, MV_ULONG* pPhyAddr,
+ MV_U32 *memHandle)
+{
+ void *p = kmalloc( size, GFP_KERNEL );
+ *pPhyAddr = pci_map_single( osHandle, p, 0, PCI_DMA_BIDIRECTIONAL );
+ return p;
+}
+void* mvOsIoUncachedMalloc( void* osHandle, MV_U32 size, MV_ULONG* pPhyAddr,
+ MV_U32 *memHandle)
+{
+ return pci_alloc_consistent( osHandle, size, (dma_addr_t *)pPhyAddr );
+}
+
+void mvOsIoUncachedFree( void* osHandle, MV_U32 size, MV_ULONG phyAddr, void* pVirtAddr,
+ MV_U32 memHandle)
+{
+ return pci_free_consistent( osHandle, size, pVirtAddr, (dma_addr_t)phyAddr );
+}
+
+void mvOsIoCachedFree( void* osHandle, MV_U32 size, MV_ULONG phyAddr, void* pVirtAddr,
+ MV_U32 memHandle )
+{
+ return kfree( pVirtAddr );
+}
+
+int mvOsRand(void)
+{
+ int rand;
+ get_random_bytes(&rand, sizeof(rand) );
+ return rand;
+}
+
+/*******************************************************************************
+* mvOsCpuVerGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit CPU Revision
+*
+*******************************************************************************/
+MV_U32 mvOsCpuRevGet( MV_VOID )
+{
+ return ((read_p15_c0() & ARM_ID_REVISION_MASK ) >> ARM_ID_REVISION_OFFS);
+}
+/*******************************************************************************
+* mvOsCpuPartGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit CPU Part number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuPartGet( MV_VOID )
+{
+ return ((read_p15_c0() & ARM_ID_PART_NUM_MASK ) >> ARM_ID_PART_NUM_OFFS);
+}
+/*******************************************************************************
+* mvOsCpuArchGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit CPU Architicture number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuArchGet( MV_VOID )
+{
+ return ((read_p15_c0() & ARM_ID_ARCH_MASK ) >> ARM_ID_ARCH_OFFS);
+}
+/*******************************************************************************
+* mvOsCpuVarGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit CPU Variant number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuVarGet( MV_VOID )
+{
+ return ((read_p15_c0() & ARM_ID_VAR_MASK ) >> ARM_ID_VAR_OFFS);
+}
+/*******************************************************************************
+* mvOsCpuAsciiGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit CPU Variant number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuAsciiGet( MV_VOID )
+{
+ return ((read_p15_c0() & ARM_ID_ASCII_MASK ) >> ARM_ID_ASCII_OFFS);
+}
+
+
+
+/*
+static unsigned long read_p15_c0 (void)
+*/
+/* read co-processor 15, register #0 (ID register) */
+static MV_U32 read_p15_c0 (void)
+{
+ MV_U32 value;
+
+ __asm__ __volatile__(
+ "mrc p15, 0, %0, c0, c0, 0 @ read control reg\n"
+ : "=r" (value)
+ :
+ : "memory");
+
+ return value;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h
new file mode 100644
index 000000000..8da562a40
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h
@@ -0,0 +1,423 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef _MV_OS_LNX_H_
+#define _MV_OS_LNX_H_
+
+
+#ifdef __KERNEL__
+/* for kernel space */
+#include <linux/autoconf.h>
+#include <linux/interrupt.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/hardirq.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include <linux/random.h>
+
+#include "dbg-trace.h"
+
+extern void mv_early_printk(char *fmt,...);
+
+#define MV_ASM __asm__ __volatile__
+#define INLINE inline
+#define MV_TRC_REC TRC_REC
+#define mvOsPrintf printk
+#define mvOsEarlyPrintf mv_early_printk
+#define mvOsOutput printk
+#define mvOsSPrintf sprintf
+#define mvOsMalloc(_size_) kmalloc(_size_,GFP_ATOMIC)
+#define mvOsFree kfree
+#define mvOsMemcpy memcpy
+#define mvOsSleep(_mils_) mdelay(_mils_)
+#define mvOsTaskLock()
+#define mvOsTaskUnlock()
+#define strtol simple_strtoul
+#define mvOsDelay(x) mdelay(x)
+#define mvOsUDelay(x) udelay(x)
+#define mvCopyFromOs copy_from_user
+#define mvCopyToOs copy_to_user
+
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+
+#ifdef MV_NDEBUG
+#define mvOsAssert(cond)
+#else
+#define mvOsAssert(cond) { do { if(!(cond)) { BUG(); } }while(0); }
+#endif /* MV_NDEBUG */
+
+#else /* __KERNEL__ */
+
+/* for user space applications */
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#define INLINE inline
+#define mvOsPrintf printf
+#define mvOsOutput printf
+#define mvOsMalloc(_size_) malloc(_size_)
+#define mvOsFree free
+#define mvOsAssert(cond) assert(cond)
+
+#endif /* __KERNEL__ */
+#define mvOsIoVirtToPhy(pDev, pVirtAddr) \
+ pci_map_single( (pDev), (pVirtAddr), 0, PCI_DMA_BIDIRECTIONAL )
+
+#define mvOsCacheClear(pDev, p, size ) \
+ pci_map_single( (pDev), (p), (size), PCI_DMA_BIDIRECTIONAL)
+
+#define mvOsCacheFlush(pDev, p, size ) \
+ pci_map_single( (pDev), (p), (size), PCI_DMA_TODEVICE)
+
+#define mvOsCacheInvalidate(pDev, p, size) \
+ pci_map_single( (pDev), (p), (size), PCI_DMA_FROMDEVICE )
+
+#define mvOsCacheUnmap(pDev, phys, size) \
+ pci_unmap_single( (pDev), (dma_addr_t)(phys), (size), PCI_DMA_FROMDEVICE )
+
+
+#define CPU_PHY_MEM(x) (MV_U32)x
+#define CPU_MEMIO_CACHED_ADDR(x) (void*)x
+#define CPU_MEMIO_UNCACHED_ADDR(x) (void*)x
+
+
+/* CPU architecture dependent 32, 16, 8 bit read/write IO addresses */
+#define MV_MEMIO32_WRITE(addr, data) \
+ ((*((volatile unsigned int*)(addr))) = ((unsigned int)(data)))
+
+#define MV_MEMIO32_READ(addr) \
+ ((*((volatile unsigned int*)(addr))))
+
+#define MV_MEMIO16_WRITE(addr, data) \
+ ((*((volatile unsigned short*)(addr))) = ((unsigned short)(data)))
+
+#define MV_MEMIO16_READ(addr) \
+ ((*((volatile unsigned short*)(addr))))
+
+#define MV_MEMIO8_WRITE(addr, data) \
+ ((*((volatile unsigned char*)(addr))) = ((unsigned char)(data)))
+
+#define MV_MEMIO8_READ(addr) \
+ ((*((volatile unsigned char*)(addr))))
+
+
+/* No Fast Swap implementation (in assembler) for ARM */
+#define MV_32BIT_LE_FAST(val) MV_32BIT_LE(val)
+#define MV_16BIT_LE_FAST(val) MV_16BIT_LE(val)
+#define MV_32BIT_BE_FAST(val) MV_32BIT_BE(val)
+#define MV_16BIT_BE_FAST(val) MV_16BIT_BE(val)
+
+/* 32 and 16 bit read/write in big/little endian mode */
+
+/* 16bit write in little endian mode */
+#define MV_MEMIO_LE16_WRITE(addr, data) \
+ MV_MEMIO16_WRITE(addr, MV_16BIT_LE_FAST(data))
+
+/* 16bit read in little endian mode */
+static __inline MV_U16 MV_MEMIO_LE16_READ(MV_U32 addr)
+{
+ MV_U16 data;
+
+ data= (MV_U16)MV_MEMIO16_READ(addr);
+
+ return (MV_U16)MV_16BIT_LE_FAST(data);
+}
+
+/* 32bit write in little endian mode */
+#define MV_MEMIO_LE32_WRITE(addr, data) \
+ MV_MEMIO32_WRITE(addr, MV_32BIT_LE_FAST(data))
+
+/* 32bit read in little endian mode */
+static __inline MV_U32 MV_MEMIO_LE32_READ(MV_U32 addr)
+{
+ MV_U32 data;
+
+ data= (MV_U32)MV_MEMIO32_READ(addr);
+
+ return (MV_U32)MV_32BIT_LE_FAST(data);
+}
+
+static __inline void mvOsBCopy(char* srcAddr, char* dstAddr, int byteCount)
+{
+ while(byteCount != 0)
+ {
+ *dstAddr = *srcAddr;
+ dstAddr++;
+ srcAddr++;
+ byteCount--;
+ }
+}
+
+static INLINE MV_U64 mvOsDivMod64(MV_U64 divided, MV_U64 divisor, MV_U64* modulu)
+{
+ MV_U64 division = 0;
+
+ if(divisor == 1)
+ return divided;
+
+ while(divided >= divisor)
+ {
+ division++;
+ divided -= divisor;
+ }
+ if (modulu != NULL)
+ *modulu = divided;
+
+ return division;
+}
+
+#if defined(MV_BRIDGE_SYNC_REORDER)
+extern MV_U32 *mvUncachedParam;
+
+static __inline void mvOsBridgeReorderWA(void)
+{
+ volatile MV_U32 val = 0;
+
+ val = mvUncachedParam[0];
+}
+#endif
+
+
+/* Flash APIs */
+#define MV_FL_8_READ MV_MEMIO8_READ
+#define MV_FL_16_READ MV_MEMIO_LE16_READ
+#define MV_FL_32_READ MV_MEMIO_LE32_READ
+#define MV_FL_8_DATA_READ MV_MEMIO8_READ
+#define MV_FL_16_DATA_READ MV_MEMIO16_READ
+#define MV_FL_32_DATA_READ MV_MEMIO32_READ
+#define MV_FL_8_WRITE MV_MEMIO8_WRITE
+#define MV_FL_16_WRITE MV_MEMIO_LE16_WRITE
+#define MV_FL_32_WRITE MV_MEMIO_LE32_WRITE
+#define MV_FL_8_DATA_WRITE MV_MEMIO8_WRITE
+#define MV_FL_16_DATA_WRITE MV_MEMIO16_WRITE
+#define MV_FL_32_DATA_WRITE MV_MEMIO32_WRITE
+
+
+/* CPU cache information */
+#define CPU_I_CACHE_LINE_SIZE 32 /* 2do: replace 32 with linux core macro */
+#define CPU_D_CACHE_LINE_SIZE 32 /* 2do: replace 32 with linux core macro */
+
+#ifdef CONFIG_L2_CACHE_ENABLE
+/* Data cache flush one line */
+#define mvOsCacheLineFlushInv(handle, addr) \
+{ \
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c14, 1" : : "r" (addr));\
+ __asm__ __volatile__ ("mcr p15, 1, %0, c15, c10, 1" : : "r" (addr));\
+ __asm__ __volatile__ ("mcr p15, 0, r0, c7, c10, 4"); \
+}
+
+#else
+
+/* Data cache flush one line */
+#define mvOsCacheLineFlushInv(handle, addr) \
+{ \
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c14, 1" : : "r" (addr));\
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" : : "r" (addr)); \
+}
+#endif
+
+#ifdef CONFIG_L2_CACHE_ENABLE
+#define mvOsCacheLineInv(handle,addr) \
+{ \
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c6, 1" : : "r" (addr)); \
+ __asm__ __volatile__ ("mcr p15, 1, %0, c15, c11, 1" : : "r" (addr)); \
+}
+#else
+#define mvOsCacheLineInv(handle,addr) \
+{ \
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c6, 1" : : "r" (addr)); \
+}
+#endif
+
+#ifdef CONFIG_L2_CACHE_ENABLE
+/* Data cache flush one line */
+#define mvOsCacheLineFlush(handle, addr) \
+{ \
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 1" : : "r" (addr));\
+ __asm__ __volatile__ ("mcr p15, 1, %0, c15, c9, 1" : : "r" (addr));\
+ __asm__ __volatile__ ("mcr p15, 0, r0, c7, c10, 4"); \
+}
+
+#else
+/* Data cache flush one line */
+#define mvOsCacheLineFlush(handle, addr) \
+{ \
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 1" : : "r" (addr));\
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" : : "r" (addr)); \
+}
+#endif
+
+static __inline void mvOsPrefetch(const void *ptr)
+{
+#ifdef CONFIG_USE_DSP
+ __asm__ __volatile__(
+ "pld\t%0"
+ :
+ : "o" (*(char *)ptr)
+ : "cc");
+#else
+ return;
+#endif
+}
+
+
+/* Flush CPU pipe */
+#define CPU_PIPE_FLUSH
+
+
+
+
+
+/* register manipulations */
+
+/******************************************************************************
+* This debug function enable the write of each register that u-boot access to
+* to an array in the DRAM, the function record only MV_REG_WRITE access.
+* The function could not be operate when booting from flash.
+* In order to print the array we use the printreg command.
+******************************************************************************/
+/* #define REG_DEBUG */
+#if defined(REG_DEBUG)
+extern int reg_arry[2048][2];
+extern int reg_arry_index;
+#endif
+
+/* Marvell controller register read/write macros */
+#define MV_REG_VALUE(offset) \
+ (MV_MEMIO32_READ((INTER_REGS_BASE | (offset))))
+
+#define MV_REG_READ(offset) \
+ (MV_MEMIO_LE32_READ(INTER_REGS_BASE | (offset)))
+
+#if defined(REG_DEBUG)
+#define MV_REG_WRITE(offset, val) \
+ MV_MEMIO_LE32_WRITE((INTER_REGS_BASE | (offset)), (val)); \
+ { \
+ reg_arry[reg_arry_index][0] = (INTER_REGS_BASE | (offset));\
+ reg_arry[reg_arry_index][1] = (val);\
+ reg_arry_index++;\
+ }
+#else
+#define MV_REG_WRITE(offset, val) \
+ MV_MEMIO_LE32_WRITE((INTER_REGS_BASE | (offset)), (val));
+#endif
+
+#define MV_REG_BYTE_READ(offset) \
+ (MV_MEMIO8_READ((INTER_REGS_BASE | (offset))))
+
+#if defined(REG_DEBUG)
+#define MV_REG_BYTE_WRITE(offset, val) \
+ MV_MEMIO8_WRITE((INTER_REGS_BASE | (offset)), (val)); \
+ { \
+ reg_arry[reg_arry_index][0] = (INTER_REGS_BASE | (offset));\
+ reg_arry[reg_arry_index][1] = (val);\
+ reg_arry_index++;\
+ }
+#else
+#define MV_REG_BYTE_WRITE(offset, val) \
+ MV_MEMIO8_WRITE((INTER_REGS_BASE | (offset)), (val))
+#endif
+
+#if defined(REG_DEBUG)
+#define MV_REG_BIT_SET(offset, bitMask) \
+ (MV_MEMIO32_WRITE((INTER_REGS_BASE | (offset)), \
+ (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)) | \
+ MV_32BIT_LE_FAST(bitMask)))); \
+ { \
+ reg_arry[reg_arry_index][0] = (INTER_REGS_BASE | (offset));\
+ reg_arry[reg_arry_index][1] = (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)));\
+ reg_arry_index++;\
+ }
+#else
+#define MV_REG_BIT_SET(offset, bitMask) \
+ (MV_MEMIO32_WRITE((INTER_REGS_BASE | (offset)), \
+ (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)) | \
+ MV_32BIT_LE_FAST(bitMask))))
+#endif
+
+#if defined(REG_DEBUG)
+#define MV_REG_BIT_RESET(offset,bitMask) \
+ (MV_MEMIO32_WRITE((INTER_REGS_BASE | (offset)), \
+ (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)) & \
+ MV_32BIT_LE_FAST(~bitMask)))); \
+ { \
+ reg_arry[reg_arry_index][0] = (INTER_REGS_BASE | (offset));\
+ reg_arry[reg_arry_index][1] = (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)));\
+ reg_arry_index++;\
+ }
+#else
+#define MV_REG_BIT_RESET(offset,bitMask) \
+ (MV_MEMIO32_WRITE((INTER_REGS_BASE | (offset)), \
+ (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)) & \
+ MV_32BIT_LE_FAST(~bitMask))))
+#endif
+
+
+
+/* ARM architecture APIs */
+MV_U32 mvOsCpuRevGet (MV_VOID);
+MV_U32 mvOsCpuPartGet (MV_VOID);
+MV_U32 mvOsCpuArchGet (MV_VOID);
+MV_U32 mvOsCpuVarGet (MV_VOID);
+MV_U32 mvOsCpuAsciiGet (MV_VOID);
+
+/* Other APIs */
+void* mvOsIoCachedMalloc( void* osHandle, MV_U32 size, MV_ULONG* pPhyAddr, MV_U32 *memHandle);
+void* mvOsIoUncachedMalloc( void* osHandle, MV_U32 size, MV_ULONG* pPhyAddr, MV_U32 *memHandle );
+void mvOsIoUncachedFree( void* osHandle, MV_U32 size, MV_ULONG phyAddr, void* pVirtAddr, MV_U32 memHandle );
+void mvOsIoCachedFree( void* osHandle, MV_U32 size, MV_ULONG phyAddr, void* pVirtAddr, MV_U32 memHandle );
+int mvOsRand(void);
+
+#endif /* _MV_OS_LNX_H_ */
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOsSata.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOsSata.h
new file mode 100644
index 000000000..c925a9e9a
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/linux_oss/mvOsSata.h
@@ -0,0 +1,158 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+/*******************************************************************************
+* mvOsLinux.h - O.S. interface header file for Linux
+*
+* DESCRIPTION:
+* This header file contains OS dependent definition under Linux
+*
+* DEPENDENCIES:
+* Linux kernel header files.
+*
+* FILE REVISION NUMBER:
+* $Revision: 1.1 $
+*******************************************************************************/
+
+#ifndef __INCmvOsLinuxh
+#define __INCmvOsLinuxh
+
+/* Includes */
+#include <linux/autoconf.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/pci.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include "mvOs.h"
+
+
+/* Definitions */
+#define MV_DEFAULT_QUEUE_DEPTH 2
+#define MV_SATA_SUPPORT_EDMA_SINGLE_DATA_REGION
+#define MV_SATA_SUPPORT_GEN2E_128_QUEUE_LEN
+
+#ifdef CONFIG_MV88F6082
+ #define MV_SATA_OVERRIDE_SW_QUEUE_SIZE
+ #define MV_SATA_REQUESTED_SW_QUEUE_SIZE 2
+ #undef MV_SATA_SUPPORT_GEN2E_128_QUEUE_LEN
+#endif
+
+/* System dependent macro for flushing CPU write cache */
+#if defined (MV_BRIDGE_SYNC_REORDER)
+#define MV_CPU_WRITE_BUFFER_FLUSH() do { \
+ wmb(); \
+ mvOsBridgeReorderWA(); \
+ } while (0)
+#else
+#define MV_CPU_WRITE_BUFFER_FLUSH() wmb()
+#endif /* CONFIG_MV78XX0 */
+
+/* System dependent little endian from / to CPU conversions */
+#define MV_CPU_TO_LE16(x) cpu_to_le16(x)
+#define MV_CPU_TO_LE32(x) cpu_to_le32(x)
+
+#define MV_LE16_TO_CPU(x) le16_to_cpu(x)
+#define MV_LE32_TO_CPU(x) le32_to_cpu(x)
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define MV_BIG_ENDIAN_BITFIELD
+#endif
+
+/* System dependent register read / write in byte/word/dword variants */
+#define MV_REG_WRITE_BYTE(base, offset, val) writeb(val, base + offset)
+#define MV_REG_WRITE_WORD(base, offset, val) writew(val, base + offset)
+#define MV_REG_WRITE_DWORD(base, offset, val) writel(val, base + offset)
+#define MV_REG_READ_BYTE(base, offset) readb(base + offset)
+#define MV_REG_READ_WORD(base, offset) readw(base + offset)
+#define MV_REG_READ_DWORD(base, offset) readl(base + offset)
+
+
+/* Typedefs */
+
+/* System dependant typedefs */
+typedef void *MV_VOID_PTR;
+typedef u32 *MV_U32_PTR;
+typedef u16 *MV_U16_PTR;
+typedef u8 *MV_U8_PTR;
+typedef char *MV_CHAR_PTR;
+typedef void *MV_BUS_ADDR_T;
+typedef unsigned long MV_CPU_FLAGS;
+
+
+/* Structures */
+/* System dependent structure */
+typedef struct mvOsSemaphore
+{
+ int notUsed;
+} MV_OS_SEMAPHORE;
+
+
+/* Functions (User implemented)*/
+
+/* Semaphore init, take and release */
+#define mvOsSemInit(x) MV_TRUE
+#define mvOsSemTake(x)
+#define mvOsSemRelease(x)
+
+/* Interrupt masking and unmasking functions */
+MV_CPU_FLAGS mvOsSaveFlagsAndMaskCPUInterrupts(MV_VOID);
+MV_VOID mvOsRestoreFlags(MV_CPU_FLAGS);
+
+/* Delay function in micro seconds resolution */
+void mvMicroSecondsDelay(MV_VOID_PTR, MV_U32);
+
+/* Typedefs */
+typedef enum mvBoolean
+{
+ MV_SFALSE, MV_STRUE
+} MV_BOOLEAN;
+
+/* System logging function */
+#include "mvLog.h"
+/* Enable READ/WRITE Long SCSI command only when driver is compiled for debugging */
+#ifdef MV_LOGGER
+#define MV_SATA_SUPPORT_READ_WRITE_LONG
+#endif
+
+#define MV_IAL_LOG_ID 3
+
+#endif /* __INCmvOsLinuxh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h
new file mode 100644
index 000000000..d761060c9
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h
@@ -0,0 +1,375 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+
+*******************************************************************************/
+/*******************************************************************************
+* mvSysHwCfg.h - Marvell system HW configuration file
+*
+* DESCRIPTION:
+* None.
+*
+* DEPENDENCIES:
+* None.
+*
+*******************************************************************************/
+
+#ifndef __INCmvSysHwConfigh
+#define __INCmvSysHwConfigh
+
+#include "../../../../include/linux/autoconf.h"
+
+#define CONFIG_MARVELL 1
+
+/* includes */
+#define _1K 0x00000400
+#define _4K 0x00001000
+#define _8K 0x00002000
+#define _16K 0x00004000
+#define _32K 0x00008000
+#define _64K 0x00010000
+#define _128K 0x00020000
+#define _256K 0x00040000
+#define _512K 0x00080000
+
+#define _1M 0x00100000
+#define _2M 0x00200000
+#define _4M 0x00400000
+#define _8M 0x00800000
+#define _16M 0x01000000
+#define _32M 0x02000000
+#define _64M 0x04000000
+#define _128M 0x08000000
+#define _256M 0x10000000
+#define _512M 0x20000000
+
+#define _1G 0x40000000
+#define _2G 0x80000000
+
+/****************************************/
+/* Soc supporeted Units definitions */
+/****************************************/
+
+#ifdef CONFIG_MV_INCLUDE_PEX
+#define MV_INCLUDE_PEX
+#endif
+#ifdef CONFIG_MV_INCLUDE_TWSI
+#define MV_INCLUDE_TWSI
+#endif
+#ifdef CONFIG_MV_INCLUDE_CESA
+#define MV_INCLUDE_CESA
+#endif
+#ifdef CONFIG_MV_INCLUDE_GIG_ETH
+#define MV_INCLUDE_GIG_ETH
+#endif
+#ifdef CONFIG_MV_INCLUDE_INTEG_SATA
+#define MV_INCLUDE_INTEG_SATA
+#define MV_INCLUDE_SATA
+#endif
+#ifdef CONFIG_MV_INCLUDE_USB
+#define MV_INCLUDE_USB
+#define MV_USB_VOLTAGE_FIX
+#endif
+#ifdef CONFIG_MV_INCLUDE_NAND
+#define MV_INCLUDE_NAND
+#endif
+#ifdef CONFIG_MV_INCLUDE_TDM
+#define MV_INCLUDE_TDM
+#endif
+#ifdef CONFIG_MV_INCLUDE_XOR
+#define MV_INCLUDE_XOR
+#endif
+#ifdef CONFIG_MV_INCLUDE_TWSI
+#define MV_INCLUDE_TWSI
+#endif
+#ifdef CONFIG_MV_INCLUDE_UART
+#define MV_INCLUDE_UART
+#endif
+#ifdef CONFIG_MV_INCLUDE_SPI
+#define MV_INCLUDE_SPI
+#endif
+#ifdef CONFIG_MV_INCLUDE_SFLASH_MTD
+#define MV_INCLUDE_SFLASH_MTD
+#endif
+#ifdef CONFIG_MV_INCLUDE_AUDIO
+#define MV_INCLUDE_AUDIO
+#endif
+#ifdef CONFIG_MV_INCLUDE_TS
+#define MV_INCLUDE_TS
+#endif
+#ifdef CONFIG_MV_INCLUDE_SDIO
+#define MV_INCLUDE_SDIO
+#endif
+
+
+/* NAND flash stuff */
+#ifdef CONFIG_MV_NAND_BOOT
+#define MV_NAND_BOOT
+#endif
+#ifdef CONFIG_MV_NAND
+#define MV_NAND
+#endif
+
+/* SPI flash stuff */
+#ifdef CONFIG_MV_SPI_BOOT
+#define MV_SPI_BOOT
+#endif
+
+
+/****************************************************************/
+/************* General configuration ********************/
+/****************************************************************/
+
+/* Enable Clock Power Control */
+#define MV_INCLUDE_CLK_PWR_CNTRL
+
+/* Disable the DEVICE BAR in the PEX */
+#define MV_DISABLE_PEX_DEVICE_BAR
+
+/* Allow the usage of early printings during initialization */
+#define MV_INCLUDE_EARLY_PRINTK
+
+/****************************************************************/
+/************* NFP configuration ********************************/
+/****************************************************************/
+#define MV_NFP_SEC_Q_SIZE 64
+#define MV_NFP_SEC_REQ_Q_SIZE 1000
+
+
+
+/****************************************************************/
+/************* CESA configuration ********************/
+/****************************************************************/
+
+#ifdef MV_INCLUDE_CESA
+
+#define MV_CESA_MAX_CHAN 4
+
+/* Use 2K of SRAM */
+#define MV_CESA_MAX_BUF_SIZE 1600
+
+#endif /* MV_INCLUDE_CESA */
+
+#if defined(CONFIG_MV_INCLUDE_GIG_ETH)
+
+#ifdef CONFIG_MV_NFP_STATS
+#define MV_FP_STATISTICS
+#else
+#undef MV_FP_STATISTICS
+#endif
+/* Default configuration for SKB_REUSE: 0 - Disabled, 1 - Enabled */
+#define MV_ETH_SKB_REUSE_DEFAULT 1
+/* Default configuration for TX_EN workaround: 0 - Disabled, 1 - Enabled */
+#define MV_ETH_TX_EN_DEFAULT 0
+
+/* un-comment if you want to perform tx_done from within the poll function */
+/* #define ETH_TX_DONE_ISR */
+
+/* put descriptors in uncached memory */
+/* #define ETH_DESCR_UNCACHED */
+
+/* Descriptors location: DRAM/internal-SRAM */
+#define ETH_DESCR_IN_SDRAM
+#undef ETH_DESCR_IN_SRAM /* No integrated SRAM in 88Fxx81 devices */
+
+#if defined(ETH_DESCR_IN_SRAM)
+#if defined(ETH_DESCR_UNCACHED)
+ #define ETH_DESCR_CONFIG_STR "Uncached descriptors in integrated SRAM"
+#else
+ #define ETH_DESCR_CONFIG_STR "Cached descriptors in integrated SRAM"
+#endif
+#elif defined(ETH_DESCR_IN_SDRAM)
+#if defined(ETH_DESCR_UNCACHED)
+ #define ETH_DESCR_CONFIG_STR "Uncached descriptors in DRAM"
+#else
+ #define ETH_DESCR_CONFIG_STR "Cached descriptors in DRAM"
+#endif
+#else
+ #error "Ethernet descriptors location undefined"
+#endif /* ETH_DESCR_IN_SRAM or ETH_DESCR_IN_SDRAM*/
+
+/* SW Sync-Barrier: not relevant for 88fxx81*/
+/* Reasnable to define this macro when descriptors in SRAM and buffers in DRAM */
+/* In RX the CPU theoretically might see himself as the descriptor owner, */
+/* although the buffer hadn't been written to DRAM yet. Performance cost. */
+/* #define INCLUDE_SYNC_BARR */
+
+/* Buffers cache coherency method (buffers in DRAM) */
+#ifndef MV_CACHE_COHER_SW
+/* Taken from mvCommon.h */
+/* Memory uncached, HW or SW cache coherency is not needed */
+#define MV_UNCACHED 0
+/* Memory cached, HW cache coherency supported in WriteThrough mode */
+#define MV_CACHE_COHER_HW_WT 1
+/* Memory cached, HW cache coherency supported in WriteBack mode */
+#define MV_CACHE_COHER_HW_WB 2
+/* Memory cached, No HW cache coherency, Cache coherency must be in SW */
+#define MV_CACHE_COHER_SW 3
+
+#endif
+
+/* DRAM cache coherency configuration */
+#define MV_CACHE_COHERENCY MV_CACHE_COHER_SW
+
+
+#define ETHER_DRAM_COHER MV_CACHE_COHER_SW /* No HW coherency in 88Fxx81 devices */
+
+#if (ETHER_DRAM_COHER == MV_CACHE_COHER_HW_WB)
+ #define ETH_SDRAM_CONFIG_STR "DRAM HW cache coherency (write-back)"
+#elif (ETHER_DRAM_COHER == MV_CACHE_COHER_HW_WT)
+ #define ETH_SDRAM_CONFIG_STR "DRAM HW cache coherency (write-through)"
+#elif (ETHER_DRAM_COHER == MV_CACHE_COHER_SW)
+ #define ETH_SDRAM_CONFIG_STR "DRAM SW cache-coherency"
+#elif (ETHER_DRAM_COHER == MV_UNCACHED)
+# define ETH_SDRAM_CONFIG_STR "DRAM uncached"
+#else
+ #error "Ethernet-DRAM undefined"
+#endif /* ETHER_DRAM_COHER */
+
+
+/****************************************************************/
+/************* Ethernet driver configuration ********************/
+/****************************************************************/
+
+/* port's default queueus */
+#define ETH_DEF_TXQ 0
+#define ETH_DEF_RXQ 0
+
+#define MV_ETH_RX_Q_NUM CONFIG_MV_ETH_RX_Q_NUM
+#define MV_ETH_TX_Q_NUM CONFIG_MV_ETH_TX_Q_NUM
+
+/* interrupt coalescing setting */
+#define ETH_TX_COAL 200
+#define ETH_RX_COAL 200
+
+/* Checksum offloading */
+#define TX_CSUM_OFFLOAD
+#define RX_CSUM_OFFLOAD
+
+#endif /* CONFIG_MV_INCLUDE_GIG_ETH */
+
+/****************************************************************/
+/*************** Telephony configuration ************************/
+/****************************************************************/
+#if defined(CONFIG_MV_TDM_LINEAR_MODE)
+ #define MV_TDM_LINEAR_MODE
+#elif defined(CONFIG_MV_TDM_ULAW_MODE)
+ #define MV_TDM_ULAW_MODE
+#endif
+
+#if defined(CONFIG_MV_TDM_5CHANNELS)
+ #define MV_TDM_5CHANNELS
+#endif
+
+#if defined(CONFIG_MV_TDM_USE_EXTERNAL_PCLK_SOURCE)
+ #define MV_TDM_USE_EXTERNAL_PCLK_SOURCE
+#endif
+
+/* We use the following registers to store DRAM interface pre configuration */
+/* auto-detection results */
+/* IMPORTANT: We are using mask register for that purpose. Before writing */
+/* to units mask register, make sure main maks register is set to disable */
+/* all interrupts. */
+#define DRAM_BUF_REG0 0x30810 /* sdram bank 0 size */
+#define DRAM_BUF_REG1 0x30820 /* sdram config */
+#define DRAM_BUF_REG2 0x30830 /* sdram mode */
+#define DRAM_BUF_REG3 0x308c4 /* dunit control low */
+#define DRAM_BUF_REG4 0x60a90 /* sdram address control */
+#define DRAM_BUF_REG5 0x60a94 /* sdram timing control low */
+#define DRAM_BUF_REG6 0x60a98 /* sdram timing control high */
+#define DRAM_BUF_REG7 0x60a9c /* sdram ODT control low */
+#define DRAM_BUF_REG8 0x60b90 /* sdram ODT control high */
+#define DRAM_BUF_REG9 0x60b94 /* sdram Dunit ODT control */
+#define DRAM_BUF_REG10 0x60b98 /* sdram Extended Mode */
+#define DRAM_BUF_REG11 0x60b9c /* sdram Ddr2 Time Low Reg */
+#define DRAM_BUF_REG12 0x60a00 /* sdram Ddr2 Time High Reg */
+#define DRAM_BUF_REG13 0x60a04 /* dunit Ctrl High */
+#define DRAM_BUF_REG14 0x60b00 /* sdram second DIMM exist */
+
+/* Following the pre-configuration registers default values restored after */
+/* auto-detection is done */
+#define DRAM_BUF_REG_DV 0
+
+/* System Mapping */
+#define SDRAM_CS0_BASE 0x00000000
+#define SDRAM_CS0_SIZE _256M
+
+#define SDRAM_CS1_BASE 0x10000000
+#define SDRAM_CS1_SIZE _256M
+
+#define SDRAM_CS2_BASE 0x20000000
+#define SDRAM_CS2_SIZE _256M
+
+#define SDRAM_CS3_BASE 0x30000000
+#define SDRAM_CS3_SIZE _256M
+
+/* PEX */
+#define PEX0_MEM_BASE 0xe8000000
+#define PEX0_MEM_SIZE _128M
+
+#define PEX0_IO_BASE 0xf2000000
+#define PEX0_IO_SIZE _1M
+
+/* Device Chip Selects */
+#define NFLASH_CS_BASE 0xfa000000
+#define NFLASH_CS_SIZE _2M
+
+#define SPI_CS_BASE 0xf4000000
+#define SPI_CS_SIZE _16M
+
+#define CRYPT_ENG_BASE 0xf0000000
+#define CRYPT_ENG_SIZE _2M
+
+#define BOOTDEV_CS_BASE 0xff800000
+#define BOOTDEV_CS_SIZE _8M
+
+/* CS2 - BOOTROM */
+#define DEVICE_CS2_BASE 0xff900000
+#define DEVICE_CS2_SIZE _1M
+
+/* PEX Work arround */
+/* the target we will use for the workarround */
+#define PEX_CONFIG_RW_WA_TARGET PEX0_MEM
+/*a flag that indicates if we are going to use the
+size and base of the target we using for the workarround
+window */
+#define PEX_CONFIG_RW_WA_USE_ORIGINAL_WIN_VALUES 1
+/* if the above flag is 0 then the following values
+will be used for the workarround window base and size,
+otherwise the following defines will be ignored */
+#define PEX_CONFIG_RW_WA_BASE 0xF3000000
+#define PEX_CONFIG_RW_WA_SIZE _16M
+
+/* Internal registers: size is defined in Controllerenvironment */
+#define INTER_REGS_BASE 0xFEE00000
+
+/* DRAM detection stuff */
+#define MV_DRAM_AUTO_SIZE
+
+/* Board clock detection */
+#define TCLK_AUTO_DETECT /* Use Tclk auto detection */
+#define SYSCLK_AUTO_DETECT /* Use SysClk auto detection */
+#define PCLCK_AUTO_DETECT /* Use PClk auto detection */
+#define L2CLK_AUTO_DETECT /* Use L2Clk auto detection */
+
+/* PEX-PCI\PCI-PCI Bridge*/
+#define PCI0_IF_PTP 0 /* Bridge exist on pciIf0*/
+
+
+
+#endif /* __INCmvSysHwConfigh */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c
new file mode 100644
index 000000000..717c1507c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c
@@ -0,0 +1,376 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCntmr.h"
+#include "cpu/mvCpu.h"
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+extern unsigned int whoAmI(void);
+
+/*******************************************************************************
+* mvCntmrLoad -
+*
+* DESCRIPTION:
+* Load an init Value to a given counter/timer
+*
+* INPUT:
+* countNum - counter number
+* value - value to be loaded
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrLoad(MV_U32 countNum, MV_U32 value)
+{
+ if (countNum >= MV_CNTMR_MAX_COUNTER )
+ {
+
+ mvOsPrintf(("mvCntmrLoad: Err. Illigal counter number \n"));
+ return MV_BAD_PARAM;;
+
+ }
+
+ MV_REG_WRITE(CNTMR_RELOAD_REG(countNum),value);
+ MV_REG_WRITE(CNTMR_VAL_REG(countNum),value);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCntmrRead -
+*
+* DESCRIPTION:
+* Returns the value of the given Counter/Timer
+*
+* INPUT:
+* countNum - counter number
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_U32 counter value
+*******************************************************************************/
+MV_U32 mvCntmrRead(MV_U32 countNum)
+{
+ return MV_REG_READ(CNTMR_VAL_REG(countNum));
+}
+
+/*******************************************************************************
+* mvCntmrWrite -
+*
+* DESCRIPTION:
+* Returns the value of the given Counter/Timer
+*
+* INPUT:
+* countNum - counter number
+* countVal - value to write
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None
+*******************************************************************************/
+void mvCntmrWrite(MV_U32 countNum,MV_U32 countVal)
+{
+ MV_REG_WRITE(CNTMR_VAL_REG(countNum),countVal);
+}
+
+/*******************************************************************************
+* mvCntmrCtrlSet -
+*
+* DESCRIPTION:
+* Set the Control to a given counter/timer
+*
+* INPUT:
+* countNum - counter number
+* pCtrl - pointer to MV_CNTMR_CTRL structure
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrCtrlSet(MV_U32 countNum, MV_CNTMR_CTRL *pCtrl)
+{
+ MV_U32 cntmrCtrl;
+
+ if (countNum >= MV_CNTMR_MAX_COUNTER )
+ {
+
+ DB(mvOsPrintf(("mvCntmrCtrlSet: Err. Illigal counter number \n")));
+ return MV_BAD_PARAM;;
+
+ }
+
+ /* read control register */
+ cntmrCtrl = MV_REG_READ(CNTMR_CTRL_REG);
+
+
+ if (pCtrl->enable) /* enable counter\timer */
+ {
+ cntmrCtrl |= CTCR_ARM_TIMER_EN(countNum);
+ }
+ else /* disable counter\timer */
+ {
+ cntmrCtrl &= ~CTCR_ARM_TIMER_EN(countNum);
+ }
+
+ if ( pCtrl->autoEnable ) /* Auto mode */
+ {
+ cntmrCtrl |= CTCR_ARM_TIMER_AUTO_EN(countNum);
+
+ }
+ else /* no auto mode */
+ {
+ cntmrCtrl &= ~CTCR_ARM_TIMER_AUTO_EN(countNum);
+ }
+
+ MV_REG_WRITE(CNTMR_CTRL_REG,cntmrCtrl);
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCntmrCtrlGet -
+*
+* DESCRIPTION:
+* Get the Control value of a given counter/timer
+*
+* INPUT:
+* countNum - counter number
+* pCtrl - pointer to MV_CNTMR_CTRL structure
+*
+* OUTPUT:
+* Counter\Timer control value
+*
+* RETURN:
+* MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrCtrlGet(MV_U32 countNum, MV_CNTMR_CTRL *pCtrl)
+{
+ MV_U32 cntmrCtrl;
+
+ if (countNum >= MV_CNTMR_MAX_COUNTER )
+ {
+ DB(mvOsPrintf(("mvCntmrCtrlGet: Err. Illigal counter number \n")));
+ return MV_BAD_PARAM;;
+ }
+
+ /* read control register */
+ cntmrCtrl = MV_REG_READ(CNTMR_CTRL_REG);
+
+ /* enable counter\timer */
+ if (cntmrCtrl & CTCR_ARM_TIMER_EN(countNum))
+ {
+ pCtrl->enable = MV_TRUE;
+ }
+ else
+ {
+ pCtrl->enable = MV_FALSE;
+ }
+
+ /* counter mode */
+ if (cntmrCtrl & CTCR_ARM_TIMER_AUTO_EN(countNum))
+ {
+ pCtrl->autoEnable = MV_TRUE;
+ }
+ else
+ {
+ pCtrl->autoEnable = MV_FALSE;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCntmrEnable -
+*
+* DESCRIPTION:
+* Set the Enable-Bit to logic '1' ==> starting the counter
+*
+* INPUT:
+* countNum - counter number
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrEnable(MV_U32 countNum)
+{
+ MV_U32 cntmrCtrl;
+
+ if (countNum >= MV_CNTMR_MAX_COUNTER )
+ {
+
+ DB(mvOsPrintf(("mvCntmrEnable: Err. Illigal counter number \n")));
+ return MV_BAD_PARAM;;
+
+ }
+
+ /* read control register */
+ cntmrCtrl = MV_REG_READ(CNTMR_CTRL_REG);
+
+ /* enable counter\timer */
+ cntmrCtrl |= CTCR_ARM_TIMER_EN(countNum);
+
+
+ MV_REG_WRITE(CNTMR_CTRL_REG,cntmrCtrl);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCntmrDisable -
+*
+* DESCRIPTION:
+* Stop the counter/timer running, and returns its Value
+*
+* INPUT:
+* countNum - counter number
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_U32 counter\timer value
+*******************************************************************************/
+MV_STATUS mvCntmrDisable(MV_U32 countNum)
+{
+ MV_U32 cntmrCtrl;
+
+ if (countNum >= MV_CNTMR_MAX_COUNTER )
+ {
+
+ DB(mvOsPrintf(("mvCntmrDisable: Err. Illigal counter number \n")));
+ return MV_BAD_PARAM;;
+
+ }
+
+ /* read control register */
+ cntmrCtrl = MV_REG_READ(CNTMR_CTRL_REG);
+
+ /* disable counter\timer */
+ cntmrCtrl &= ~CTCR_ARM_TIMER_EN(countNum);
+
+ MV_REG_WRITE(CNTMR_CTRL_REG,cntmrCtrl);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvCntmrStart -
+*
+* DESCRIPTION:
+* Combined all the sub-operations above to one function: Load,setMode,Enable
+*
+* INPUT:
+* countNum - counter number
+* value - value of the counter\timer to be set
+* pCtrl - pointer to MV_CNTMR_CTRL structure
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrStart(MV_U32 countNum, MV_U32 value,
+ MV_CNTMR_CTRL *pCtrl)
+{
+
+ if (countNum >= MV_CNTMR_MAX_COUNTER )
+ {
+
+ mvOsPrintf(("mvCntmrDisable: Err. Illigal counter number \n"));
+ return MV_BAD_PARAM;;
+
+ }
+
+ /* load value onto counter\timer */
+ mvCntmrLoad(countNum,value);
+
+ /* set the counter to load in the first time */
+ mvCntmrWrite(countNum,value);
+
+ /* set control for timer \ cunter and enable */
+ mvCntmrCtrlSet(countNum,pCtrl);
+
+ return MV_OK;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h
new file mode 100644
index 000000000..b911d0f04
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h
@@ -0,0 +1,121 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvTmrWtdgh
+#define __INCmvTmrWtdgh
+
+/* includes */
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "cntmr/mvCntmrRegs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+
+/* This enumerator describe counters\watchdog numbers */
+typedef enum _mvCntmrID
+{
+ TIMER0 = 0,
+ TIMER1,
+ WATCHDOG,
+ TIMER2,
+ TIMER3,
+}MV_CNTMR_ID;
+
+
+/* Counter / Timer control structure */
+typedef struct _mvCntmrCtrl
+{
+ MV_BOOL enable; /* enable */
+ MV_BOOL autoEnable; /* counter/Timer */
+}MV_CNTMR_CTRL;
+
+
+/* Functions */
+
+/* Load an init Value to a given counter/timer */
+MV_STATUS mvCntmrLoad(MV_U32 countNum, MV_U32 value);
+
+/* Returns the value of the given Counter/Timer */
+MV_U32 mvCntmrRead(MV_U32 countNum);
+
+/* Write a value of the given Counter/Timer */
+void mvCntmrWrite(MV_U32 countNum,MV_U32 countVal);
+
+/* Set the Control to a given counter/timer */
+MV_STATUS mvCntmrCtrlSet(MV_U32 countNum, MV_CNTMR_CTRL *pCtrl);
+
+/* Get the value of a given counter/timer */
+MV_STATUS mvCntmrCtrlGet(MV_U32 countNum, MV_CNTMR_CTRL *pCtrl);
+
+/* Set the Enable-Bit to logic '1' ==> starting the counter. */
+MV_STATUS mvCntmrEnable(MV_U32 countNum);
+
+/* Stop the counter/timer running, and returns its Value. */
+MV_STATUS mvCntmrDisable(MV_U32 countNum);
+
+/* Combined all the sub-operations above to one function: Load,setMode,Enable */
+MV_STATUS mvCntmrStart(MV_U32 countNum, MV_U32 value,
+ MV_CNTMR_CTRL *pCtrl);
+
+#endif /* __INCmvTmrWtdgh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h
new file mode 100644
index 000000000..b69bc66ad
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h
@@ -0,0 +1,121 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvTmrwtdgRegsh
+#define __INCmvTmrwtdgRegsh
+
+/*******************************************/
+/* ARM Timers Registers Map */
+/*******************************************/
+
+#define CNTMR_RELOAD_REG(tmrNum) (CNTMR_BASE + 0x10 + (tmrNum)*8 + \
+ (((tmrNum) <= 3)?0:8))
+#define CNTMR_VAL_REG(tmrNum) (CNTMR_BASE + 0x14 + (tmrNum)*8 + \
+ (((tmrNum) <= 3)?0:8))
+#define CNTMR_CTRL_REG (CNTMR_BASE)
+
+/*For MV78XX0*/
+#define CNTMR_CAUSE_REG (CPU_AHB_MBUS_CAUSE_INT_REG(whoAmI()))
+#define CNTMR_MASK_REG (CPU_AHB_MBUS_MASK_INT_REG(whoAmI()))
+
+/* ARM Timers Registers Map */
+/*******************************************/
+
+
+/* ARM Timers Control Register */
+/* CPU_TIMERS_CTRL_REG (CTCR) */
+
+#define TIMER0_NUM 0
+#define TIMER1_NUM 1
+#define WATCHDOG_NUM 2
+#define TIMER2_NUM 3
+#define TIMER3_NUM 4
+
+#define CTCR_ARM_TIMER_EN_OFFS(cntr) (cntr * 2)
+#define CTCR_ARM_TIMER_EN_MASK(cntr) (1 << CTCR_ARM_TIMER_EN_OFFS)
+#define CTCR_ARM_TIMER_EN(cntr) (1 << CTCR_ARM_TIMER_EN_OFFS(cntr))
+#define CTCR_ARM_TIMER_DIS(cntr) (0 << CTCR_ARM_TIMER_EN_OFFS(cntr))
+
+#define CTCR_ARM_TIMER_AUTO_OFFS(cntr) ((cntr * 2) + 1)
+#define CTCR_ARM_TIMER_AUTO_MASK(cntr) BIT1
+#define CTCR_ARM_TIMER_AUTO_EN(cntr) (1 << CTCR_ARM_TIMER_AUTO_OFFS(cntr))
+#define CTCR_ARM_TIMER_AUTO_DIS(cntr) (0 << CTCR_ARM_TIMER_AUTO_OFFS(cntr))
+
+
+/* ARM Timer\Watchdog Reload Register */
+/* CNTMR_RELOAD_REG (TRR) */
+
+#define TRG_ARM_TIMER_REL_OFFS 0
+#define TRG_ARM_TIMER_REL_MASK 0xffffffff
+
+/* ARM Timer\Watchdog Register */
+/* CNTMR_VAL_REG (TVRG) */
+
+#define TVR_ARM_TIMER_OFFS 0
+#define TVR_ARM_TIMER_MASK 0xffffffff
+#define TVR_ARM_TIMER_MAX 0xffffffff
+
+
+
+#endif /* __INCmvTmrwtdgRegsh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCompVer.txt
new file mode 100644
index 000000000..85bfa612c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.3
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c
new file mode 100644
index 000000000..609e674e3
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c
@@ -0,0 +1,207 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "mvOs.h"
+#include "mvCpuCntrs.h"
+
+
+const static MV_CPU_CNTRS_OPS mvCpuCntrsOpsTbl[MV_CPU_CNTRS_NUM][MV_CPU_CNTRS_OPS_NUM] =
+{
+ /*0*/
+ {
+ MV_CPU_CNTRS_CYCLES, MV_CPU_CNTRS_DCACHE_READ_HIT, MV_CPU_CNTRS_DCACHE_READ_MISS,
+ MV_CPU_CNTRS_DCACHE_WRITE_HIT, MV_CPU_CNTRS_DCACHE_WRITE_MISS, MV_CPU_CNTRS_INSTRUCTIONS,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID,
+ MV_CPU_CNTRS_MMU_READ_LATENCY, MV_CPU_CNTRS_ICACHE_READ_LATENCY, MV_CPU_CNTRS_WB_WRITE_LATENCY,
+ MV_CPU_CNTRS_LDM_STM_HOLD, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID,
+ MV_CPU_CNTRS_DATA_WRITE_ACCESS, MV_CPU_CNTRS_DATA_READ_ACCESS, MV_CPU_CNTRS_INVALID,
+ MV_CPU_CNTRS_BRANCH_PREDICT_COUNT,
+ },
+ /*1*/
+ {
+ MV_CPU_CNTRS_CYCLES, MV_CPU_CNTRS_ICACHE_READ_MISS, MV_CPU_CNTRS_DCACHE_READ_MISS,
+ MV_CPU_CNTRS_DCACHE_WRITE_MISS, MV_CPU_CNTRS_ITLB_MISS, MV_CPU_CNTRS_SINGLE_ISSUE,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_BRANCH_RETIRED, MV_CPU_CNTRS_INVALID,
+ MV_CPU_CNTRS_MMU_READ_BEAT, MV_CPU_CNTRS_ICACHE_READ_LATENCY, MV_CPU_CNTRS_WB_WRITE_BEAT,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_IS_HOLD, MV_CPU_CNTRS_DATA_READ_ACCESS,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID,
+ MV_CPU_CNTRS_INVALID,
+ },
+ /*2*/
+ {
+ MV_CPU_CNTRS_CYCLES, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_DCACHE_ACCESS,
+ MV_CPU_CNTRS_DTLB_MISS, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_BRANCH_PREDICT_MISS, MV_CPU_CNTRS_WB_WRITE_BEAT,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_DCACHE_READ_LATENCY, MV_CPU_CNTRS_DCACHE_WRITE_LATENCY,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_BIU_SIMULT_ACCESS,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID,
+ MV_CPU_CNTRS_INVALID,
+ },
+ /*3*/
+ {
+ MV_CPU_CNTRS_CYCLES, MV_CPU_CNTRS_DCACHE_READ_MISS, MV_CPU_CNTRS_DCACHE_WRITE_MISS,
+ MV_CPU_CNTRS_TLB_MISS, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_BRANCH_TAKEN, MV_CPU_CNTRS_WB_FULL_CYCLES,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_DCACHE_READ_BEAT, MV_CPU_CNTRS_DCACHE_WRITE_BEAT,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_BIU_ANY_ACCESS,
+ MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_INVALID, MV_CPU_CNTRS_DATA_WRITE_ACCESS,
+ MV_CPU_CNTRS_INVALID,
+ }
+};
+
+MV_CPU_CNTRS_ENTRY mvCpuCntrsTbl[MV_CPU_CNTRS_NUM];
+
+MV_CPU_CNTRS_EVENT* mvCpuCntrsEventTbl[128];
+
+void mvCpuCntrsReset(void)
+{
+ MV_U32 reg = 0;
+
+ MV_ASM ("mcr p15, 0, %0, c15, c13, 0" : : "r" (reg));
+ MV_ASM ("mcr p15, 0, %0, c15, c13, 1" : : "r" (reg));
+ MV_ASM ("mcr p15, 0, %0, c15, c13, 2" : : "r" (reg));
+ MV_ASM ("mcr p15, 0, %0, c15, c13, 3" : : "r" (reg));
+ MV_ASM ("mcr p15, 0, %0, c15, c13, 4" : : "r" (reg));
+ MV_ASM ("mcr p15, 0, %0, c15, c13, 5" : : "r" (reg));
+ MV_ASM ("mcr p15, 0, %0, c15, c13, 6" : : "r" (reg));
+ MV_ASM ("mcr p15, 0, %0, c15, c13, 7" : : "r" (reg));
+}
+
+void program_counter(int counter, int op)
+{
+ MV_U32 reg = (1 << op) | 0x1; /*enable*/
+
+ switch(counter)
+ {
+ case 0:
+ __asm__ __volatile__ ("mcr p15, 0, %0, c15, c12, 0" : : "r" (reg));
+ return;
+
+ case 1:
+ __asm__ __volatile__ ("mcr p15, 0, %0, c15, c12, 1" : : "r" (reg));
+ return;
+
+ case 2:
+ __asm__ __volatile__ ("mcr p15, 0, %0, c15, c12, 2" : : "r" (reg));
+ return;
+
+ case 3:
+ __asm__ __volatile__ ("mcr p15, 0, %0, c15, c12, 3" : : "r" (reg));
+ return;
+
+ default:
+ mvOsPrintf("error in program_counter: bad counter number (%d)\n", counter);
+ }
+ return;
+}
+
+void mvCpuCntrsEventClear(MV_CPU_CNTRS_EVENT* pEvent)
+{
+ int i;
+
+ for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+ {
+ pEvent->counters_sum[i] = 0;
+ }
+ pEvent->num_of_measurements = 0;
+}
+
+
+MV_CPU_CNTRS_EVENT* mvCpuCntrsEventCreate(char* name, MV_U32 print_threshold)
+{
+ int i;
+ MV_CPU_CNTRS_EVENT* event = mvOsMalloc(sizeof(MV_CPU_CNTRS_EVENT));
+
+ if(event)
+ {
+ strncpy(event->name, name, sizeof(event->name));
+ event->num_of_measurements = 0;
+ event->avg_sample_count = print_threshold;
+ for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+ {
+ event->counters_before[i] = 0;
+ event->counters_after[i] = 0;
+ event->counters_sum[i] = 0;
+ }
+ }
+ return event;
+}
+
+void mvCpuCntrsEventDelete(MV_CPU_CNTRS_EVENT* event)
+{
+ if(event != NULL)
+ mvOsFree(event);
+}
+
+
+MV_STATUS mvCpuCntrsProgram(int counter, MV_CPU_CNTRS_OPS op,
+ char* name, MV_U32 overhead)
+{
+ int i;
+
+ /* Find required operations */
+ for(i=0; i<MV_CPU_CNTRS_OPS_NUM; i++)
+ {
+ if( mvCpuCntrsOpsTbl[counter][i] == op)
+ {
+ strncpy(mvCpuCntrsTbl[counter].name, name, sizeof(mvCpuCntrsTbl[counter].name));
+ mvCpuCntrsTbl[counter].operation = op;
+ mvCpuCntrsTbl[counter].opIdx = i+1;
+ mvCpuCntrsTbl[counter].overhead = overhead;
+ program_counter(counter, mvCpuCntrsTbl[counter].opIdx);
+ mvOsPrintf("Counter=%d, opIdx=%d, overhead=%d\n",
+ counter, mvCpuCntrsTbl[counter].opIdx, mvCpuCntrsTbl[counter].overhead);
+ return MV_OK;
+ }
+ }
+ return MV_NOT_FOUND;
+}
+
+void mvCpuCntrsShow(MV_CPU_CNTRS_EVENT* pEvent)
+{
+ int i;
+ MV_U64 counters_avg;
+
+ if(pEvent->num_of_measurements < pEvent->avg_sample_count)
+ return;
+
+ mvOsPrintf("%16s: ", pEvent->name);
+ for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+ {
+ counters_avg = mvOsDivMod64(pEvent->counters_sum[i],
+ pEvent->num_of_measurements, NULL);
+ if(counters_avg >= mvCpuCntrsTbl[i].overhead)
+ counters_avg -= mvCpuCntrsTbl[i].overhead;
+ else
+ counters_avg = 0;
+
+ mvOsPrintf("%s=%5llu, ", mvCpuCntrsTbl[i].name, counters_avg);
+ }
+ mvOsPrintf("\n");
+ mvCpuCntrsEventClear(pEvent);
+ mvCpuCntrsReset();
+}
+
+void mvCpuCntrsStatus(void)
+{
+ int i;
+
+ for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+ {
+ mvOsPrintf("#%d: %s, overhead=%d\n",
+ i, mvCpuCntrsTbl[i].name, mvCpuCntrsTbl[i].overhead);
+ }
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h
new file mode 100644
index 000000000..8d49af009
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h
@@ -0,0 +1,213 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mvCpuCntrs_h__
+#define __mvCpuCntrs_h__
+
+#include "mvTypes.h"
+#include "mvOs.h"
+
+
+#define MV_CPU_CNTRS_NUM 4
+#define MV_CPU_CNTRS_OPS_NUM 32
+
+typedef enum
+{
+ MV_CPU_CNTRS_INVALID = 0,
+ MV_CPU_CNTRS_CYCLES,
+ MV_CPU_CNTRS_ICACHE_READ_MISS,
+ MV_CPU_CNTRS_DCACHE_ACCESS,
+ MV_CPU_CNTRS_DCACHE_READ_MISS,
+ MV_CPU_CNTRS_DCACHE_READ_HIT,
+ MV_CPU_CNTRS_DCACHE_WRITE_MISS,
+ MV_CPU_CNTRS_DCACHE_WRITE_HIT,
+ MV_CPU_CNTRS_DTLB_MISS,
+ MV_CPU_CNTRS_TLB_MISS,
+ MV_CPU_CNTRS_ITLB_MISS,
+ MV_CPU_CNTRS_INSTRUCTIONS,
+ MV_CPU_CNTRS_SINGLE_ISSUE,
+ MV_CPU_CNTRS_MMU_READ_LATENCY,
+ MV_CPU_CNTRS_MMU_READ_BEAT,
+ MV_CPU_CNTRS_BRANCH_RETIRED,
+ MV_CPU_CNTRS_BRANCH_TAKEN,
+ MV_CPU_CNTRS_BRANCH_PREDICT_MISS,
+ MV_CPU_CNTRS_BRANCH_PREDICT_COUNT,
+ MV_CPU_CNTRS_WB_FULL_CYCLES,
+ MV_CPU_CNTRS_WB_WRITE_LATENCY,
+ MV_CPU_CNTRS_WB_WRITE_BEAT,
+ MV_CPU_CNTRS_ICACHE_READ_LATENCY,
+ MV_CPU_CNTRS_ICACHE_READ_BEAT,
+ MV_CPU_CNTRS_DCACHE_READ_LATENCY,
+ MV_CPU_CNTRS_DCACHE_READ_BEAT,
+ MV_CPU_CNTRS_DCACHE_WRITE_LATENCY,
+ MV_CPU_CNTRS_DCACHE_WRITE_BEAT,
+ MV_CPU_CNTRS_LDM_STM_HOLD,
+ MV_CPU_CNTRS_IS_HOLD,
+ MV_CPU_CNTRS_DATA_WRITE_ACCESS,
+ MV_CPU_CNTRS_DATA_READ_ACCESS,
+ MV_CPU_CNTRS_BIU_SIMULT_ACCESS,
+ MV_CPU_CNTRS_BIU_ANY_ACCESS,
+
+} MV_CPU_CNTRS_OPS;
+
+typedef struct
+{
+ char name[16];
+ MV_CPU_CNTRS_OPS operation;
+ int opIdx;
+ MV_U32 overhead;
+
+} MV_CPU_CNTRS_ENTRY;
+
+
+typedef struct
+{
+ char name[16];
+ MV_U32 num_of_measurements;
+ MV_U32 avg_sample_count;
+ MV_U64 counters_before[MV_CPU_CNTRS_NUM];
+ MV_U64 counters_after[MV_CPU_CNTRS_NUM];
+ MV_U64 counters_sum[MV_CPU_CNTRS_NUM];
+
+} MV_CPU_CNTRS_EVENT;
+
+extern MV_CPU_CNTRS_ENTRY mvCpuCntrsTbl[MV_CPU_CNTRS_NUM];
+
+
+MV_STATUS mvCpuCntrsProgram(int counter, MV_CPU_CNTRS_OPS op,
+ char* name, MV_U32 overhead);
+void mvCpuCntrsInit(void);
+MV_CPU_CNTRS_EVENT* mvCpuCntrsEventCreate(char* name, MV_U32 print_threshold);
+void mvCpuCntrsEventDelete(MV_CPU_CNTRS_EVENT* event);
+void mvCpuCntrsReset(void);
+void mvCpuCntrsShow(MV_CPU_CNTRS_EVENT* pEvent);
+void mvCpuCntrsEventClear(MV_CPU_CNTRS_EVENT* pEvent);
+
+/* internal */
+void program_counter(int counter, int op);
+
+static INLINE MV_U64 mvCpuCntrsRead(const int counter)
+{
+ MV_U32 low = 0, high = 0;
+ MV_U32 ll = 0;
+
+ switch(counter)
+ {
+ case 0:
+ MV_ASM ("mcr p15, 0, %0, c15, c12, 0" : : "r" (ll));
+ MV_ASM ("mrc p15, 0, %0, c15, c13, 0" : "=r" (low));
+ MV_ASM ("mrc p15, 0, %0, c15, c13, 1" : "=r" (high));
+ break;
+
+ case 1:
+ MV_ASM ("mcr p15, 0, %0, c15, c12, 1" : : "r" (ll));
+ MV_ASM ("mrc p15, 0, %0, c15, c13, 2" : "=r" (low));
+ MV_ASM ("mrc p15, 0, %0, c15, c13, 3" : "=r" (high));
+ break;
+
+ case 2:
+ MV_ASM ("mcr p15, 0, %0, c15, c12, 2" : : "r" (ll));
+ MV_ASM ("mrc p15, 0, %0, c15, c13, 4" : "=r" (low));
+ MV_ASM ("mrc p15, 0, %0, c15, c13, 5" : "=r" (high));
+ break;
+
+ case 3:
+ MV_ASM ("mcr p15, 0, %0, c15, c12, 3" : : "r" (ll));
+ MV_ASM ("mrc p15, 0, %0, c15, c13, 6" : "=r" (low));
+ MV_ASM ("mrc p15, 0, %0, c15, c13, 7" : "=r" (high));
+ break;
+
+ default:
+ mvOsPrintf("mv_cpu_cntrs_read: bad counter number (%d)\n", counter);
+ }
+ program_counter(counter, mvCpuCntrsTbl[counter].opIdx);
+ return (((MV_U64)high << 32 ) | low);
+
+}
+
+
+static INLINE void mvCpuCntrsReadBefore(MV_CPU_CNTRS_EVENT* pEvent)
+{
+#if 0
+ int i;
+
+ /* order is important - we want to measure the cycle count last here! */
+ for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+ pEvent->counters_before[i] = mvCpuCntrsRead(i);
+#else
+ pEvent->counters_before[1] = mvCpuCntrsRead(1);
+ pEvent->counters_before[3] = mvCpuCntrsRead(3);
+ pEvent->counters_before[0] = mvCpuCntrsRead(0);
+ pEvent->counters_before[2] = mvCpuCntrsRead(2);
+#endif
+}
+
+static INLINE void mvCpuCntrsReadAfter(MV_CPU_CNTRS_EVENT* pEvent)
+{
+ int i;
+
+#if 0
+ /* order is important - we want to measure the cycle count first here! */
+ for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+ pEvent->counters_after[i] = mvCpuCntrsRead(i);
+#else
+ pEvent->counters_after[2] = mvCpuCntrsRead(2);
+ pEvent->counters_after[0] = mvCpuCntrsRead(0);
+ pEvent->counters_after[3] = mvCpuCntrsRead(3);
+ pEvent->counters_after[1] = mvCpuCntrsRead(1);
+#endif
+
+ for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+ {
+ pEvent->counters_sum[i] += (pEvent->counters_after[i] - pEvent->counters_before[i]);
+ }
+ pEvent->num_of_measurements++;
+}
+
+
+#ifdef CONFIG_MV_CPU_PERF_CNTRS
+
+#define MV_CPU_CNTRS_READ(counter) mvCpuCntrsRead(counter)
+
+#define MV_CPU_CNTRS_START(event) mvCpuCntrsReadBefore(event)
+
+#define MV_CPU_CNTRS_STOP(event) mvCpuCntrsReadAfter(event)
+
+#define MV_CPU_CNTRS_SHOW(event) mvCpuCntrsShow(event)
+
+#else
+
+#define MV_CPU_CNTRS_READ(counter)
+#define MV_CPU_CNTRS_START(event)
+#define MV_CPU_CNTRS_STOP(event)
+#define MV_CPU_CNTRS_SHOW(event)
+
+#endif /* CONFIG_MV_CPU_PERF_CNTRS */
+
+
+#endif /* __mvCpuCntrs_h__ */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c
new file mode 100644
index 000000000..033386224
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "mvOs.h"
+#include "mvCpuL2Cntrs.h"
+
+
+
+MV_CPU_L2_CNTRS_ENTRY mvCpuL2CntrsTbl[MV_CPU_L2_CNTRS_NUM];
+
+MV_CPU_L2_CNTRS_EVENT* mvCpuL2CntrsEventTbl[128];
+
+void mvCpuL2CntrsReset(void)
+{
+ MV_U32 reg = 0;
+
+ MV_ASM ("mcr p15, 6, %0, c15, c13, 0" : : "r" (reg));
+ MV_ASM ("mcr p15, 6, %0, c15, c13, 1" : : "r" (reg));
+ MV_ASM ("mcr p15, 6, %0, c15, c13, 2" : : "r" (reg));
+ MV_ASM ("mcr p15, 6, %0, c15, c13, 3" : : "r" (reg));
+}
+
+static void mvCpuL2CntrConfig(int counter, int op)
+{
+ MV_U32 reg = (1 << op) | 0x1; /*enable*/
+
+ switch(counter)
+ {
+ case 0:
+ MV_ASM ("mcr p15, 6, %0, c15, c12, 0" : : "r" (reg));
+ return;
+
+ case 1:
+ MV_ASM ("mcr p15, 6, %0, c15, c12, 1" : : "r" (reg));
+ return;
+
+ default:
+ mvOsPrintf("mvCpuL2CntrConfig: bad counter number (%d)\n", counter);
+ }
+ return;
+}
+
+void mvCpuL2CntrsEventClear(MV_CPU_L2_CNTRS_EVENT* pEvent)
+{
+ int i;
+
+ for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+ {
+ pEvent->counters_sum[i] = 0;
+ }
+ pEvent->num_of_measurements = 0;
+}
+
+
+MV_CPU_L2_CNTRS_EVENT* mvCpuL2CntrsEventCreate(char* name, MV_U32 print_threshold)
+{
+ int i;
+ MV_CPU_L2_CNTRS_EVENT* event = mvOsMalloc(sizeof(MV_CPU_L2_CNTRS_EVENT));
+
+ if(event)
+ {
+ strncpy(event->name, name, sizeof(event->name));
+ event->num_of_measurements = 0;
+ event->avg_sample_count = print_threshold;
+ for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+ {
+ event->counters_before[i] = 0;
+ event->counters_after[i] = 0;
+ event->counters_sum[i] = 0;
+ }
+ }
+ return event;
+}
+
+void mvCpuL2CntrsEventDelete(MV_CPU_L2_CNTRS_EVENT* event)
+{
+ if(event != NULL)
+ mvOsFree(event);
+}
+
+
+MV_STATUS mvCpuL2CntrsProgram(int counter, MV_CPU_L2_CNTRS_OPS op,
+ char* name, MV_U32 overhead)
+{
+ strncpy(mvCpuL2CntrsTbl[counter].name, name, sizeof(mvCpuL2CntrsTbl[counter].name));
+ mvCpuL2CntrsTbl[counter].operation = op;
+ mvCpuL2CntrsTbl[counter].opIdx = op;
+ mvCpuL2CntrsTbl[counter].overhead = overhead;
+ mvCpuL2CntrConfig(counter, op);
+ mvOsPrintf("CPU L2 Counter %d: operation=%d, overhead=%d\n",
+ counter, op, overhead);
+ return MV_OK;
+}
+
+void mvCpuL2CntrsShow(MV_CPU_L2_CNTRS_EVENT* pEvent)
+{
+ int i;
+ MV_U64 counters_avg;
+
+ if(pEvent->num_of_measurements < pEvent->avg_sample_count)
+ return;
+
+ mvOsPrintf("%16s: ", pEvent->name);
+ for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+ {
+ counters_avg = mvOsDivMod64(pEvent->counters_sum[i],
+ pEvent->num_of_measurements, NULL);
+
+ if(counters_avg >= mvCpuL2CntrsTbl[i].overhead)
+ counters_avg -= mvCpuL2CntrsTbl[i].overhead;
+ else
+ counters_avg = 0;
+
+ mvOsPrintf("%s=%5llu, ", mvCpuL2CntrsTbl[i].name, counters_avg);
+ }
+ mvOsPrintf("\n");
+ mvCpuL2CntrsEventClear(pEvent);
+ mvCpuL2CntrsReset();
+}
+
+void mvCpuL2CntrsStatus(void)
+{
+ int i;
+
+ for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+ {
+ mvOsPrintf("#%d: %s, overhead=%d\n",
+ i, mvCpuL2CntrsTbl[i].name, mvCpuL2CntrsTbl[i].overhead);
+ }
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h
new file mode 100644
index 000000000..570d70195
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h
@@ -0,0 +1,151 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mvCpuL2Cntrs_h__
+#define __mvCpuL2Cntrs_h__
+
+#include "mvTypes.h"
+#include "mvOs.h"
+
+
+#define MV_CPU_L2_CNTRS_NUM 2
+
+typedef enum
+{
+ MV_CPU_L2_CNTRS_ENABLE = 0,
+ MV_CPU_L2_CNTRS_DATA_REQ,
+ MV_CPU_L2_CNTRS_DATA_MISS_REQ,
+ MV_CPU_L2_CNTRS_INST_REQ,
+ MV_CPU_L2_CNTRS_INST_MISS_REQ,
+ MV_CPU_L2_CNTRS_DATA_READ_REQ,
+ MV_CPU_L2_CNTRS_DATA_READ_MISS_REQ,
+ MV_CPU_L2_CNTRS_DATA_WRITE_REQ,
+ MV_CPU_L2_CNTRS_DATA_WRITE_MISS_REQ,
+ MV_CPU_L2_CNTRS_RESERVED,
+ MV_CPU_L2_CNTRS_DIRTY_EVICT_REQ,
+ MV_CPU_L2_CNTRS_EVICT_BUFF_STALL,
+ MV_CPU_L2_CNTRS_ACTIVE_CYCLES,
+
+} MV_CPU_L2_CNTRS_OPS;
+
+typedef struct
+{
+ char name[16];
+ MV_CPU_L2_CNTRS_OPS operation;
+ int opIdx;
+ MV_U32 overhead;
+
+} MV_CPU_L2_CNTRS_ENTRY;
+
+
+typedef struct
+{
+ char name[16];
+ MV_U32 num_of_measurements;
+ MV_U32 avg_sample_count;
+ MV_U64 counters_before[MV_CPU_L2_CNTRS_NUM];
+ MV_U64 counters_after[MV_CPU_L2_CNTRS_NUM];
+ MV_U64 counters_sum[MV_CPU_L2_CNTRS_NUM];
+
+} MV_CPU_L2_CNTRS_EVENT;
+
+
+MV_STATUS mvCpuL2CntrsProgram(int counter, MV_CPU_L2_CNTRS_OPS op,
+ char* name, MV_U32 overhead);
+void mvCpuL2CntrsInit(void);
+MV_CPU_L2_CNTRS_EVENT* mvCpuL2CntrsEventCreate(char* name, MV_U32 print_threshold);
+void mvCpuL2CntrsEventDelete(MV_CPU_L2_CNTRS_EVENT* event);
+void mvCpuL2CntrsReset(void);
+void mvCpuL2CntrsShow(MV_CPU_L2_CNTRS_EVENT* pEvent);
+void mvCpuL2CntrsEventClear(MV_CPU_L2_CNTRS_EVENT* pEvent);
+
+static INLINE MV_U64 mvCpuL2CntrsRead(const int counter)
+{
+ MV_U32 low = 0, high = 0;
+
+ switch(counter)
+ {
+ case 0:
+ MV_ASM ("mrc p15, 6, %0, c15, c13, 0" : "=r" (low));
+ MV_ASM ("mrc p15, 6, %0, c15, c13, 1" : "=r" (high));
+ break;
+
+ case 1:
+ MV_ASM ("mrc p15, 6, %0, c15, c13, 2" : "=r" (low));
+ MV_ASM ("mrc p15, 6, %0, c15, c13, 3" : "=r" (high));
+ break;
+
+ default:
+ mvOsPrintf("mvCpuL2CntrsRead: bad counter number (%d)\n", counter);
+ }
+ return (((MV_U64)high << 32 ) | low);
+
+}
+
+static INLINE void mvCpuL2CntrsReadBefore(MV_CPU_L2_CNTRS_EVENT* pEvent)
+{
+ int i;
+
+ for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+ pEvent->counters_before[i] = mvCpuL2CntrsRead(i);
+}
+
+static INLINE void mvCpuL2CntrsReadAfter(MV_CPU_L2_CNTRS_EVENT* pEvent)
+{
+ int i;
+
+ for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+ {
+ pEvent->counters_after[i] = mvCpuL2CntrsRead(i);
+ pEvent->counters_sum[i] += (pEvent->counters_after[i] - pEvent->counters_before[i]);
+ }
+ pEvent->num_of_measurements++;
+}
+
+
+#ifdef CONFIG_MV_CPU_L2_PERF_CNTRS
+
+#define MV_CPU_L2_CNTRS_READ(counter) mvCpuL2CntrsRead(counter)
+
+#define MV_CPU_L2_CNTRS_START(event) mvCpuL2CntrsReadBefore(event)
+
+#define MV_CPU_L2_CNTRS_STOP(event) mvCpuL2CntrsReadAfter(event)
+
+#define MV_CPU_L2_CNTRS_SHOW(event) mvCpuL2CntrsShow(event)
+
+#else
+
+#define MV_CPU_L2_CNTRS_READ(counter)
+#define MV_CPU_L2_CNTRS_START(event)
+#define MV_CPU_L2_CNTRS_STOP(event)
+#define MV_CPU_L2_CNTRS_SHOW(event)
+
+#endif /* CONFIG_MV_CPU_L2_PERF_CNTRS */
+
+
+#endif /* __mvCpuL2Cntrs_h__ */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvCompVer.txt
new file mode 100644
index 000000000..85bfa612c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.3
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c
new file mode 100644
index 000000000..d1b8a3d37
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c
@@ -0,0 +1,1479 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "ddr1_2/mvDram.h"
+#include "boardEnv/mvBoardEnvLib.h"
+
+#undef MV_DEBUG
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+static MV_VOID cpyDimm2BankInfo(MV_DIMM_INFO *pDimmInfo,
+ MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 cas2ps(MV_U8 spd_byte);
+/*******************************************************************************
+* mvDramBankGet - Get the DRAM bank paramters.
+*
+* DESCRIPTION:
+* This function retrieves DRAM bank parameters as described in
+* DRAM_BANK_INFO struct to the controller DRAM unit. In case the board
+* has its DRAM on DIMMs it will use its EEPROM to extract SPD data
+* from it. Otherwise, if the DRAM is soldered on board, the function
+* should insert its bank information into MV_DRAM_BANK_INFO struct.
+*
+* INPUT:
+* bankNum - Board DRAM bank number.
+*
+* OUTPUT:
+* pBankInfo - DRAM bank information struct.
+*
+* RETURN:
+* MV_FAIL - Bank parameters could not be read.
+*
+*******************************************************************************/
+MV_STATUS mvDramBankInfoGet(MV_U32 bankNum, MV_DRAM_BANK_INFO *pBankInfo)
+{
+ MV_DIMM_INFO dimmInfo;
+
+ DB(mvOsPrintf("Dram: mvDramBankInfoGet bank %d\n", bankNum));
+ /* zero pBankInfo structure */
+ memset(pBankInfo, 0, sizeof(*pBankInfo));
+
+ if((NULL == pBankInfo) || (bankNum >= MV_DRAM_MAX_CS ))
+ {
+ DB(mvOsPrintf("Dram: mvDramBankInfoGet bad params \n"));
+ return MV_BAD_PARAM;
+ }
+ if( MV_OK != dimmSpdGet((MV_U32)(bankNum/2), &dimmInfo))
+ {
+ DB(mvOsPrintf("Dram: ERR dimmSpdGet failed to get dimm info \n"));
+ return MV_FAIL;
+ }
+ if((dimmInfo.numOfModuleBanks == 1) && ((bankNum % 2) == 1))
+ {
+ DB(mvOsPrintf("Dram: ERR dimmSpdGet. Can't find DIMM bank 2 \n"));
+ return MV_FAIL;
+ }
+
+ /* convert Dimm info to Bank info */
+ cpyDimm2BankInfo(&dimmInfo, pBankInfo);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* cpyDimm2BankInfo - Convert a Dimm info struct into a bank info struct.
+*
+* DESCRIPTION:
+* Convert a Dimm info struct into a bank info struct.
+*
+* INPUT:
+* pDimmInfo - DIMM information structure.
+*
+* OUTPUT:
+* pBankInfo - DRAM bank information struct.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+static MV_VOID cpyDimm2BankInfo(MV_DIMM_INFO *pDimmInfo,
+ MV_DRAM_BANK_INFO *pBankInfo)
+{
+ pBankInfo->memoryType = pDimmInfo->memoryType;
+
+ /* DIMM dimensions */
+ pBankInfo->numOfRowAddr = pDimmInfo->numOfRowAddr;
+ pBankInfo->numOfColAddr = pDimmInfo->numOfColAddr;
+ pBankInfo->dataWidth = pDimmInfo->dataWidth;
+ pBankInfo->errorCheckType = pDimmInfo->errorCheckType;
+ pBankInfo->sdramWidth = pDimmInfo->sdramWidth;
+ pBankInfo->errorCheckDataWidth = pDimmInfo->errorCheckDataWidth;
+ pBankInfo->numOfBanksOnEachDevice = pDimmInfo->numOfBanksOnEachDevice;
+ pBankInfo->suportedCasLatencies = pDimmInfo->suportedCasLatencies;
+ pBankInfo->refreshInterval = pDimmInfo->refreshInterval;
+
+ /* DIMM timing parameters */
+ pBankInfo->minCycleTimeAtMaxCasLatPs = pDimmInfo->minCycleTimeAtMaxCasLatPs;
+ pBankInfo->minCycleTimeAtMaxCasLatMinus1Ps =
+ pDimmInfo->minCycleTimeAtMaxCasLatMinus1Ps;
+ pBankInfo->minCycleTimeAtMaxCasLatMinus2Ps =
+ pDimmInfo->minCycleTimeAtMaxCasLatMinus2Ps;
+
+ pBankInfo->minRowPrechargeTime = pDimmInfo->minRowPrechargeTime;
+ pBankInfo->minRowActiveToRowActive = pDimmInfo->minRowActiveToRowActive;
+ pBankInfo->minRasToCasDelay = pDimmInfo->minRasToCasDelay;
+ pBankInfo->minRasPulseWidth = pDimmInfo->minRasPulseWidth;
+ pBankInfo->minWriteRecoveryTime = pDimmInfo->minWriteRecoveryTime;
+ pBankInfo->minWriteToReadCmdDelay = pDimmInfo->minWriteToReadCmdDelay;
+ pBankInfo->minReadToPrechCmdDelay = pDimmInfo->minReadToPrechCmdDelay;
+ pBankInfo->minRefreshToActiveCmd = pDimmInfo->minRefreshToActiveCmd;
+
+ /* Parameters calculated from the extracted DIMM information */
+ pBankInfo->size = pDimmInfo->size/pDimmInfo->numOfModuleBanks;
+ pBankInfo->deviceDensity = pDimmInfo->deviceDensity;
+ pBankInfo->numberOfDevices = pDimmInfo->numberOfDevices /
+ pDimmInfo->numOfModuleBanks;
+
+ /* DIMM attributes (MV_TRUE for yes) */
+
+ if ((pDimmInfo->memoryType == MEM_TYPE_SDRAM) ||
+ (pDimmInfo->memoryType == MEM_TYPE_DDR1) )
+ {
+ if (pDimmInfo->dimmAttributes & BIT1)
+ pBankInfo->registeredAddrAndControlInputs = MV_TRUE;
+ else
+ pBankInfo->registeredAddrAndControlInputs = MV_FALSE;
+ }
+ else /* pDimmInfo->memoryType == MEM_TYPE_DDR2 */
+ {
+ if (pDimmInfo->dimmTypeInfo & (BIT0 | BIT4))
+ pBankInfo->registeredAddrAndControlInputs = MV_TRUE;
+ else
+ pBankInfo->registeredAddrAndControlInputs = MV_FALSE;
+ }
+
+ return;
+}
+
+/*******************************************************************************
+* dimmSpdCpy - Cpy SPD parameters from dimm 0 to dimm 1.
+*
+* DESCRIPTION:
+* Read the DIMM SPD parameters from dimm 0 into dimm 1 SPD.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if function could read DIMM parameters, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS dimmSpdCpy(MV_VOID)
+{
+ MV_U32 i;
+ MV_U32 spdChecksum;
+
+ MV_TWSI_SLAVE twsiSlave;
+ MV_U8 data[SPD_SIZE];
+
+ /* zero dimmInfo structure */
+ memset(data, 0, SPD_SIZE);
+
+ /* read the dimm eeprom */
+ DB(mvOsPrintf("DRAM: Read Dimm eeprom\n"));
+ twsiSlave.slaveAddr.address = MV_BOARD_DIMM0_I2C_ADDR;
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ twsiSlave.offset = 0;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+ if( MV_OK != mvTwsiRead (MV_BOARD_DIMM_I2C_CHANNEL,
+ &twsiSlave, data, SPD_SIZE) )
+ {
+ DB(mvOsPrintf("DRAM: ERR. no DIMM in dimmNum 0\n"));
+ return MV_FAIL;
+ }
+ DB(puts("DRAM: Reading dimm info succeded.\n"));
+
+ /* calculate SPD checksum */
+ spdChecksum = 0;
+
+ for(i = 0 ; i <= 62 ; i++)
+ {
+ spdChecksum += data[i];
+ }
+
+ if ((spdChecksum & 0xff) != data[63])
+ {
+ DB(mvOsPrintf("DRAM: Warning. Wrong SPD Checksum %2x, expValue=%2x\n",
+ (MV_U32)(spdChecksum & 0xff), data[63]));
+ }
+ else
+ {
+ DB(mvOsPrintf("DRAM: SPD Checksum ok!\n"));
+ }
+
+ /* copy the SPD content 1:1 into the DIMM 1 SPD */
+ twsiSlave.slaveAddr.address = MV_BOARD_DIMM1_I2C_ADDR;
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ twsiSlave.offset = 0;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+ for(i = 0 ; i < SPD_SIZE ; i++)
+ {
+ twsiSlave.offset = i;
+ if( MV_OK != mvTwsiWrite (MV_BOARD_DIMM_I2C_CHANNEL,
+ &twsiSlave, &data[i], 1) )
+ {
+ mvOsPrintf("DRAM: ERR. no DIMM in dimmNum 1 byte %d \n",i);
+ return MV_FAIL;
+ }
+ mvOsDelay(5);
+ }
+
+ DB(puts("DRAM: Reading dimm info succeded.\n"));
+ return MV_OK;
+}
+
+/*******************************************************************************
+* dimmSpdGet - Get the SPD parameters.
+*
+* DESCRIPTION:
+* Read the DIMM SPD parameters into given struct parameter.
+*
+* INPUT:
+* dimmNum - DIMM number. See MV_BOARD_DIMM_NUM enumerator.
+*
+* OUTPUT:
+* pDimmInfo - DIMM information structure.
+*
+* RETURN:
+* MV_TRUE if function could read DIMM parameters, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS dimmSpdGet(MV_U32 dimmNum, MV_DIMM_INFO *pDimmInfo)
+{
+ MV_U32 i;
+ MV_U32 density = 1;
+ MV_U32 spdChecksum;
+
+ MV_TWSI_SLAVE twsiSlave;
+ MV_U8 data[SPD_SIZE];
+
+ if((NULL == pDimmInfo)|| (dimmNum >= MAX_DIMM_NUM))
+ {
+ DB(mvOsPrintf("Dram: mvDramBankInfoGet bad params \n"));
+ return MV_BAD_PARAM;
+ }
+
+ /* zero dimmInfo structure */
+ memset(data, 0, SPD_SIZE);
+
+ /* read the dimm eeprom */
+ DB(mvOsPrintf("DRAM: Read Dimm eeprom\n"));
+ twsiSlave.slaveAddr.address = (dimmNum == 0) ?
+ MV_BOARD_DIMM0_I2C_ADDR : MV_BOARD_DIMM1_I2C_ADDR;
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ twsiSlave.offset = 0;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+ if( MV_OK != mvTwsiRead (MV_BOARD_DIMM_I2C_CHANNEL,
+ &twsiSlave, data, SPD_SIZE) )
+ {
+ DB(mvOsPrintf("DRAM: ERR. no DIMM in dimmNum %d \n", dimmNum));
+ return MV_FAIL;
+ }
+ DB(puts("DRAM: Reading dimm info succeded.\n"));
+
+ /* calculate SPD checksum */
+ spdChecksum = 0;
+
+ for(i = 0 ; i <= 62 ; i++)
+ {
+ spdChecksum += data[i];
+ }
+
+ if ((spdChecksum & 0xff) != data[63])
+ {
+ DB(mvOsPrintf("DRAM: Warning. Wrong SPD Checksum %2x, expValue=%2x\n",
+ (MV_U32)(spdChecksum & 0xff), data[63]));
+ }
+ else
+ {
+ DB(mvOsPrintf("DRAM: SPD Checksum ok!\n"));
+ }
+
+ /* copy the SPD content 1:1 into the dimmInfo structure*/
+ for(i = 0 ; i < SPD_SIZE ; i++)
+ {
+ pDimmInfo->spdRawData[i] = data[i];
+ DB(mvOsPrintf("SPD-EEPROM Byte %3d = %3x (%3d)\n",i, data[i], data[i]));
+ }
+
+ DB(mvOsPrintf("DRAM SPD Information:\n"));
+
+ /* Memory type (DDR / SDRAM) */
+ switch (data[DIMM_MEM_TYPE])
+ {
+ case (DIMM_MEM_TYPE_SDRAM):
+ pDimmInfo->memoryType = MEM_TYPE_SDRAM;
+ DB(mvOsPrintf("DRAM Memeory type SDRAM\n"));
+ break;
+ case (DIMM_MEM_TYPE_DDR1):
+ pDimmInfo->memoryType = MEM_TYPE_DDR1;
+ DB(mvOsPrintf("DRAM Memeory type DDR1\n"));
+ break;
+ case (DIMM_MEM_TYPE_DDR2):
+ pDimmInfo->memoryType = MEM_TYPE_DDR2;
+ DB(mvOsPrintf("DRAM Memeory type DDR2\n"));
+ break;
+ default:
+ mvOsPrintf("ERROR: Undefined memory type!\n");
+ return MV_ERROR;
+ }
+
+
+ /* Number Of Row Addresses */
+ pDimmInfo->numOfRowAddr = data[DIMM_ROW_NUM];
+ DB(mvOsPrintf("DRAM numOfRowAddr[3] %d\n",pDimmInfo->numOfRowAddr));
+
+ /* Number Of Column Addresses */
+ pDimmInfo->numOfColAddr = data[DIMM_COL_NUM];
+ DB(mvOsPrintf("DRAM numOfColAddr[4] %d\n",pDimmInfo->numOfColAddr));
+
+ /* Number Of Module Banks */
+ pDimmInfo->numOfModuleBanks = data[DIMM_MODULE_BANK_NUM];
+ DB(mvOsPrintf("DRAM numOfModuleBanks[5] 0x%x\n",
+ pDimmInfo->numOfModuleBanks));
+
+ /* Number of module banks encoded differently for DDR2 */
+ if (pDimmInfo->memoryType == MEM_TYPE_DDR2)
+ pDimmInfo->numOfModuleBanks = (pDimmInfo->numOfModuleBanks & 0x7)+1;
+
+ /* Data Width */
+ pDimmInfo->dataWidth = data[DIMM_DATA_WIDTH];
+ DB(mvOsPrintf("DRAM dataWidth[6] 0x%x\n", pDimmInfo->dataWidth));
+
+ /* Minimum Cycle Time At Max CasLatancy */
+ pDimmInfo->minCycleTimeAtMaxCasLatPs = cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS]);
+
+ /* Error Check Type */
+ pDimmInfo->errorCheckType = data[DIMM_ERR_CHECK_TYPE];
+ DB(mvOsPrintf("DRAM errorCheckType[11] 0x%x\n",
+ pDimmInfo->errorCheckType));
+
+ /* Refresh Interval */
+ pDimmInfo->refreshInterval = data[DIMM_REFRESH_INTERVAL];
+ DB(mvOsPrintf("DRAM refreshInterval[12] 0x%x\n",
+ pDimmInfo->refreshInterval));
+
+ /* Sdram Width */
+ pDimmInfo->sdramWidth = data[DIMM_SDRAM_WIDTH];
+ DB(mvOsPrintf("DRAM sdramWidth[13] 0x%x\n",pDimmInfo->sdramWidth));
+
+ /* Error Check Data Width */
+ pDimmInfo->errorCheckDataWidth = data[DIMM_ERR_CHECK_DATA_WIDTH];
+ DB(mvOsPrintf("DRAM errorCheckDataWidth[14] 0x%x\n",
+ pDimmInfo->errorCheckDataWidth));
+
+ /* Burst Length Supported */
+ /* SDRAM/DDR1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ burst length = * Page | TBD | TBD | TBD | 8 | 4 | 2 | 1 *
+ *********************************************************/
+ /* DDR2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ burst length = * Page | TBD | TBD | TBD | 8 | 4 | TBD | TBD *
+ *********************************************************/
+
+ pDimmInfo->burstLengthSupported = data[DIMM_BURST_LEN_SUP];
+ DB(mvOsPrintf("DRAM burstLengthSupported[16] 0x%x\n",
+ pDimmInfo->burstLengthSupported));
+
+ /* Number Of Banks On Each Device */
+ pDimmInfo->numOfBanksOnEachDevice = data[DIMM_DEV_BANK_NUM];
+ DB(mvOsPrintf("DRAM numOfBanksOnEachDevice[17] 0x%x\n",
+ pDimmInfo->numOfBanksOnEachDevice));
+
+ /* Suported Cas Latencies */
+
+ /* SDRAM:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 7 | 6 | 5 | 4 | 3 | 2 | 1 *
+ ********************************************************/
+
+ /* DDR 1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 4 | 3.5 | 3 | 2.5 | 2 | 1.5 | 1 *
+ *********************************************************/
+
+ /* DDR 2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | TBD | 5 | 4 | 3 | 2 | TBD | TBD *
+ *********************************************************/
+
+ pDimmInfo->suportedCasLatencies = data[DIMM_SUP_CAL];
+ DB(mvOsPrintf("DRAM suportedCasLatencies[18] 0x%x\n",
+ pDimmInfo->suportedCasLatencies));
+
+ /* For DDR2 only, get the DIMM type information */
+ if (pDimmInfo->memoryType == MEM_TYPE_DDR2)
+ {
+ pDimmInfo->dimmTypeInfo = data[DIMM_DDR2_TYPE_INFORMATION];
+ DB(mvOsPrintf("DRAM dimmTypeInfo[20] (DDR2) 0x%x\n",
+ pDimmInfo->dimmTypeInfo));
+ }
+
+ /* SDRAM Modules Attributes */
+ pDimmInfo->dimmAttributes = data[DIMM_BUF_ADDR_CONT_IN];
+ DB(mvOsPrintf("DRAM dimmAttributes[21] 0x%x\n",
+ pDimmInfo->dimmAttributes));
+
+ /* Minimum Cycle Time At Max CasLatancy Minus 1*/
+ pDimmInfo->minCycleTimeAtMaxCasLatMinus1Ps =
+ cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS_MINUS1]);
+
+ /* Minimum Cycle Time At Max CasLatancy Minus 2*/
+ pDimmInfo->minCycleTimeAtMaxCasLatMinus2Ps =
+ cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS_MINUS2]);
+
+ pDimmInfo->minRowPrechargeTime = data[DIMM_MIN_ROW_PRECHARGE_TIME];
+ DB(mvOsPrintf("DRAM minRowPrechargeTime[27] 0x%x\n",
+ pDimmInfo->minRowPrechargeTime));
+ pDimmInfo->minRowActiveToRowActive = data[DIMM_MIN_ROW_ACTIVE_TO_ROW_ACTIVE];
+ DB(mvOsPrintf("DRAM minRowActiveToRowActive[28] 0x%x\n",
+ pDimmInfo->minRowActiveToRowActive));
+ pDimmInfo->minRasToCasDelay = data[DIMM_MIN_RAS_TO_CAS_DELAY];
+ DB(mvOsPrintf("DRAM minRasToCasDelay[29] 0x%x\n",
+ pDimmInfo->minRasToCasDelay));
+ pDimmInfo->minRasPulseWidth = data[DIMM_MIN_RAS_PULSE_WIDTH];
+ DB(mvOsPrintf("DRAM minRasPulseWidth[30] 0x%x\n",
+ pDimmInfo->minRasPulseWidth));
+
+ /* DIMM Bank Density */
+ pDimmInfo->dimmBankDensity = data[DIMM_BANK_DENSITY];
+ DB(mvOsPrintf("DRAM dimmBankDensity[31] 0x%x\n",
+ pDimmInfo->dimmBankDensity));
+
+ /* Only DDR2 includes Write Recovery Time field. Other SDRAM ignore */
+ pDimmInfo->minWriteRecoveryTime = data[DIMM_MIN_WRITE_RECOVERY_TIME];
+ DB(mvOsPrintf("DRAM minWriteRecoveryTime[36] 0x%x\n",
+ pDimmInfo->minWriteRecoveryTime));
+
+ /* Only DDR2 includes Internal Write To Read Command Delay field. */
+ pDimmInfo->minWriteToReadCmdDelay = data[DIMM_MIN_WRITE_TO_READ_CMD_DELAY];
+ DB(mvOsPrintf("DRAM minWriteToReadCmdDelay[37] 0x%x\n",
+ pDimmInfo->minWriteToReadCmdDelay));
+
+ /* Only DDR2 includes Internal Read To Precharge Command Delay field. */
+ pDimmInfo->minReadToPrechCmdDelay = data[DIMM_MIN_READ_TO_PRECH_CMD_DELAY];
+ DB(mvOsPrintf("DRAM minReadToPrechCmdDelay[38] 0x%x\n",
+ pDimmInfo->minReadToPrechCmdDelay));
+
+ /* Only DDR2 includes Minimum Refresh to Activate/Refresh Command field */
+ pDimmInfo->minRefreshToActiveCmd = data[DIMM_MIN_REFRESH_TO_ACTIVATE_CMD];
+ DB(mvOsPrintf("DRAM minRefreshToActiveCmd[42] 0x%x\n",
+ pDimmInfo->minRefreshToActiveCmd));
+
+ /* calculating the sdram density. Representing device density from */
+ /* bit 20 to allow representation of 4GB and above. */
+ /* For example, if density is 512Mbit 0x20000000, will be represent in */
+ /* deviceDensity by 0x20000000 >> 16 --> 0x00000200. Another example */
+ /* is density 8GB 0x200000000 >> 16 --> 0x00002000. */
+ density = (1 << ((pDimmInfo->numOfRowAddr + pDimmInfo->numOfColAddr) - 20));
+ pDimmInfo->deviceDensity = density *
+ pDimmInfo->numOfBanksOnEachDevice *
+ pDimmInfo->sdramWidth;
+ DB(mvOsPrintf("DRAM deviceDensity %d\n",pDimmInfo->deviceDensity));
+
+ /* Number of devices includeing Error correction */
+ pDimmInfo->numberOfDevices = (pDimmInfo->dataWidth/pDimmInfo->sdramWidth) *
+ pDimmInfo->numOfModuleBanks;
+ DB(mvOsPrintf("DRAM numberOfDevices %d\n",
+ pDimmInfo->numberOfDevices));
+
+ pDimmInfo->size = 0;
+
+ /* Note that pDimmInfo->size is in MB units */
+ if (pDimmInfo->memoryType == MEM_TYPE_SDRAM)
+ {
+ if (pDimmInfo->dimmBankDensity & BIT0)
+ pDimmInfo->size += 1024; /* Equal to 1GB */
+ else if (pDimmInfo->dimmBankDensity & BIT1)
+ pDimmInfo->size += 8; /* Equal to 8MB */
+ else if (pDimmInfo->dimmBankDensity & BIT2)
+ pDimmInfo->size += 16; /* Equal to 16MB */
+ else if (pDimmInfo->dimmBankDensity & BIT3)
+ pDimmInfo->size += 32; /* Equal to 32MB */
+ else if (pDimmInfo->dimmBankDensity & BIT4)
+ pDimmInfo->size += 64; /* Equal to 64MB */
+ else if (pDimmInfo->dimmBankDensity & BIT5)
+ pDimmInfo->size += 128; /* Equal to 128MB */
+ else if (pDimmInfo->dimmBankDensity & BIT6)
+ pDimmInfo->size += 256; /* Equal to 256MB */
+ else if (pDimmInfo->dimmBankDensity & BIT7)
+ pDimmInfo->size += 512; /* Equal to 512MB */
+ }
+ else if (pDimmInfo->memoryType == MEM_TYPE_DDR1)
+ {
+ if (pDimmInfo->dimmBankDensity & BIT0)
+ pDimmInfo->size += 1024; /* Equal to 1GB */
+ else if (pDimmInfo->dimmBankDensity & BIT1)
+ pDimmInfo->size += 2048; /* Equal to 2GB */
+ else if (pDimmInfo->dimmBankDensity & BIT2)
+ pDimmInfo->size += 16; /* Equal to 16MB */
+ else if (pDimmInfo->dimmBankDensity & BIT3)
+ pDimmInfo->size += 32; /* Equal to 32MB */
+ else if (pDimmInfo->dimmBankDensity & BIT4)
+ pDimmInfo->size += 64; /* Equal to 64MB */
+ else if (pDimmInfo->dimmBankDensity & BIT5)
+ pDimmInfo->size += 128; /* Equal to 128MB */
+ else if (pDimmInfo->dimmBankDensity & BIT6)
+ pDimmInfo->size += 256; /* Equal to 256MB */
+ else if (pDimmInfo->dimmBankDensity & BIT7)
+ pDimmInfo->size += 512; /* Equal to 512MB */
+ }
+ else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+ {
+ if (pDimmInfo->dimmBankDensity & BIT0)
+ pDimmInfo->size += 1024; /* Equal to 1GB */
+ else if (pDimmInfo->dimmBankDensity & BIT1)
+ pDimmInfo->size += 2048; /* Equal to 2GB */
+ else if (pDimmInfo->dimmBankDensity & BIT2)
+ pDimmInfo->size += 4096; /* Equal to 4GB */
+ else if (pDimmInfo->dimmBankDensity & BIT3)
+ pDimmInfo->size += 8192; /* Equal to 8GB */
+ else if (pDimmInfo->dimmBankDensity & BIT4)
+ pDimmInfo->size += 16384; /* Equal to 16GB */
+ else if (pDimmInfo->dimmBankDensity & BIT5)
+ pDimmInfo->size += 128; /* Equal to 128MB */
+ else if (pDimmInfo->dimmBankDensity & BIT6)
+ pDimmInfo->size += 256; /* Equal to 256MB */
+ else if (pDimmInfo->dimmBankDensity & BIT7)
+ pDimmInfo->size += 512; /* Equal to 512MB */
+ }
+
+ pDimmInfo->size *= pDimmInfo->numOfModuleBanks;
+
+ DB(mvOsPrintf("Dram: dimm size %dMB \n",pDimmInfo->size));
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* dimmSpdPrint - Print the SPD parameters.
+*
+* DESCRIPTION:
+* Print the Dimm SPD parameters.
+*
+* INPUT:
+* pDimmInfo - DIMM information structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID dimmSpdPrint(MV_U32 dimmNum)
+{
+ MV_DIMM_INFO dimmInfo;
+ MV_U32 i, temp = 0;
+ MV_U32 k, maskLeftOfPoint = 0, maskRightOfPoint = 0;
+ MV_U32 rightOfPoint = 0,leftOfPoint = 0, div, time_tmp, shift;
+ MV_U32 busClkPs;
+ MV_U8 trp_clocks=0, trcd_clocks, tras_clocks, trrd_clocks,
+ temp_buf[40], *spdRawData;
+
+ busClkPs = 1000000000 / (mvBoardSysClkGet() / 100); /* in 10 ps units */
+
+ spdRawData = dimmInfo.spdRawData;
+
+ if(MV_OK != dimmSpdGet(dimmNum, &dimmInfo))
+ {
+ mvOsOutput("ERROR: Could not read SPD information!\n");
+ return;
+ }
+
+ /* find Manufactura of Dimm Module */
+ mvOsOutput("\nManufacturer's JEDEC ID Code: ");
+ for(i = 0 ; i < DIMM_MODULE_MANU_SIZE ; i++)
+ {
+ mvOsOutput("%x",spdRawData[DIMM_MODULE_MANU_OFFS + i]);
+ }
+ mvOsOutput("\n");
+
+ /* Manufacturer's Specific Data */
+ for(i = 0 ; i < DIMM_MODULE_ID_SIZE ; i++)
+ {
+ temp_buf[i] = spdRawData[DIMM_MODULE_ID_OFFS + i];
+ }
+ mvOsOutput("Manufacturer's Specific Data: %s\n", temp_buf);
+
+ /* Module Part Number */
+ for(i = 0 ; i < DIMM_MODULE_VEN_SIZE ; i++)
+ {
+ temp_buf[i] = spdRawData[DIMM_MODULE_VEN_OFFS + i];
+ }
+ mvOsOutput("Module Part Number: %s\n", temp_buf);
+
+ /* Module Serial Number */
+ for(i = 0; i < sizeof(MV_U32); i++)
+ {
+ temp |= spdRawData[95+i] << 8*i;
+ }
+ mvOsOutput("DIMM Serial No. %ld (%lx)\n", (long)temp,
+ (long)temp);
+
+ /* find Manufac-Data of Dimm Module */
+ mvOsOutput("Manufactoring Date: Year 20%d%d/ ww %d%d\n",
+ ((spdRawData[93] & 0xf0) >> 4), (spdRawData[93] & 0xf),
+ ((spdRawData[94] & 0xf0) >> 4), (spdRawData[94] & 0xf));
+ /* find modul_revision of Dimm Module */
+ mvOsOutput("Module Revision: %d.%d\n",
+ spdRawData[91], spdRawData[92]);
+
+ /* find manufac_place of Dimm Module */
+ mvOsOutput("manufac_place: %d\n", spdRawData[72]);
+
+ /* go over the first 35 I2C data bytes */
+ for(i = 2 ; i <= 35 ; i++)
+ switch(i)
+ {
+ case 2: /* Memory type (DDR1/2 / SDRAM) */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ mvOsOutput("Dram Type is: SDRAM\n");
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ mvOsOutput("Dram Type is: SDRAM DDR1\n");
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+ mvOsOutput("Dram Type is: SDRAM DDR2\n");
+ else
+ mvOsOutput("Dram Type unknown\n");
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 3: /* Number Of Row Addresses */
+ mvOsOutput("Module Number of row addresses: %d\n",
+ dimmInfo.numOfRowAddr);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 4: /* Number Of Column Addresses */
+ mvOsOutput("Module Number of col addresses: %d\n",
+ dimmInfo.numOfColAddr);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 5: /* Number Of Module Banks */
+ mvOsOutput("Number of Banks on Mod.: %d\n",
+ dimmInfo.numOfModuleBanks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 6: /* Data Width */
+ mvOsOutput("Module Data Width: %d bit\n",
+ dimmInfo.dataWidth);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 8: /* Voltage Interface */
+ switch(spdRawData[i])
+ {
+ case 0x0:
+ mvOsOutput("Module is TTL_5V_TOLERANT\n");
+ break;
+ case 0x1:
+ mvOsOutput("Module is LVTTL\n");
+ break;
+ case 0x2:
+ mvOsOutput("Module is HSTL_1_5V\n");
+ break;
+ case 0x3:
+ mvOsOutput("Module is SSTL_3_3V\n");
+ break;
+ case 0x4:
+ mvOsOutput("Module is SSTL_2_5V\n");
+ break;
+ case 0x5:
+ if (dimmInfo.memoryType != MEM_TYPE_SDRAM)
+ {
+ mvOsOutput("Module is SSTL_1_8V\n");
+ break;
+ }
+ default:
+ mvOsOutput("Module is VOLTAGE_UNKNOWN\n");
+ break;
+ }
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 9: /* Minimum Cycle Time At Max CasLatancy */
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+ /* DDR2 addition of right of point */
+ if ((spdRawData[i] & 0x0f) == 0xA)
+ {
+ rightOfPoint = 25;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xB)
+ {
+ rightOfPoint = 33;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xC)
+ {
+ rightOfPoint = 66;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xD)
+ {
+ rightOfPoint = 75;
+ }
+ mvOsOutput("Minimum Cycle Time At Max CL: %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 10: /* Clock To Data Out */
+ div = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 10:100;
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / div;
+ rightOfPoint = time_tmp % div;
+ mvOsOutput("Clock To Data Out: %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 11: /* Error Check Type */
+ mvOsOutput("Error Check Type (0=NONE): %d\n",
+ dimmInfo.errorCheckType);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 12: /* Refresh Interval */
+ mvOsOutput("Refresh Rate: %x\n",
+ dimmInfo.refreshInterval);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 13: /* Sdram Width */
+ mvOsOutput("Sdram Width: %d bits\n",
+ dimmInfo.sdramWidth);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 14: /* Error Check Data Width */
+ mvOsOutput("Error Check Data Width: %d bits\n",
+ dimmInfo.errorCheckDataWidth);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 15: /* Minimum Clock Delay is unsupported */
+ if ((dimmInfo.memoryType == MEM_TYPE_SDRAM) ||
+ (dimmInfo.memoryType == MEM_TYPE_DDR1))
+ {
+ mvOsOutput("Minimum Clk Delay back to back: %d\n",
+ spdRawData[i]);
+ }
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 16: /* Burst Length Supported */
+ /* SDRAM/DDR1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ burst length = * Page | TBD | TBD | TBD | 8 | 4 | 2 | 1 *
+ *********************************************************/
+ /* DDR2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ burst length = * Page | TBD | TBD | TBD | 8 | 4 | TBD | TBD *
+ *********************************************************/
+ mvOsOutput("Burst Length Supported: ");
+ if ((dimmInfo.memoryType == MEM_TYPE_SDRAM) ||
+ (dimmInfo.memoryType == MEM_TYPE_DDR1))
+ {
+ if (dimmInfo.burstLengthSupported & BIT0)
+ mvOsOutput("1, ");
+ if (dimmInfo.burstLengthSupported & BIT1)
+ mvOsOutput("2, ");
+ }
+ if (dimmInfo.burstLengthSupported & BIT2)
+ mvOsOutput("4, ");
+ if (dimmInfo.burstLengthSupported & BIT3)
+ mvOsOutput("8, ");
+
+ mvOsOutput(" Bit \n");
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 17: /* Number Of Banks On Each Device */
+ mvOsOutput("Number Of Banks On Each Chip: %d\n",
+ dimmInfo.numOfBanksOnEachDevice);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 18: /* Suported Cas Latencies */
+
+ /* SDRAM:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 7 | 6 | 5 | 4 | 3 | 2 | 1 *
+ ********************************************************/
+
+ /* DDR 1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 4 | 3.5 | 3 | 2.5 | 2 | 1.5 | 1 *
+ *********************************************************/
+
+ /* DDR 2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | TBD | 5 | 4 | 3 | 2 | TBD | TBD *
+ *********************************************************/
+
+ mvOsOutput("Suported Cas Latencies: (CL) ");
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ for (k = 0; k <=7; k++)
+ {
+ if (dimmInfo.suportedCasLatencies & (1 << k))
+ mvOsOutput("%d, ", k+1);
+ }
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ {
+ if (dimmInfo.suportedCasLatencies & BIT0)
+ mvOsOutput("1, ");
+ if (dimmInfo.suportedCasLatencies & BIT1)
+ mvOsOutput("1.5, ");
+ if (dimmInfo.suportedCasLatencies & BIT2)
+ mvOsOutput("2, ");
+ if (dimmInfo.suportedCasLatencies & BIT3)
+ mvOsOutput("2.5, ");
+ if (dimmInfo.suportedCasLatencies & BIT4)
+ mvOsOutput("3, ");
+ if (dimmInfo.suportedCasLatencies & BIT5)
+ mvOsOutput("3.5, ");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+ {
+ if (dimmInfo.suportedCasLatencies & BIT2)
+ mvOsOutput("2, ");
+ if (dimmInfo.suportedCasLatencies & BIT3)
+ mvOsOutput("3, ");
+ if (dimmInfo.suportedCasLatencies & BIT4)
+ mvOsOutput("4, ");
+ if (dimmInfo.suportedCasLatencies & BIT5)
+ mvOsOutput("5, ");
+ }
+ else
+ mvOsOutput("?.?, ");
+ mvOsOutput("\n");
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 20: /* DDR2 DIMM type info */
+ if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+ {
+ if (dimmInfo.dimmTypeInfo & (BIT0 | BIT4))
+ mvOsOutput("Registered DIMM (RDIMM)\n");
+ else if (dimmInfo.dimmTypeInfo & (BIT1 | BIT5))
+ mvOsOutput("Unbuffered DIMM (UDIMM)\n");
+ else
+ mvOsOutput("Unknown DIMM type.\n");
+ }
+
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 21: /* SDRAM Modules Attributes */
+ mvOsOutput("\nModule Attributes (SPD Byte 21): \n");
+
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ if (dimmInfo.dimmAttributes & BIT0)
+ mvOsOutput(" Buffered Addr/Control Input: Yes\n");
+ else
+ mvOsOutput(" Buffered Addr/Control Input: No\n");
+
+ if (dimmInfo.dimmAttributes & BIT1)
+ mvOsOutput(" Registered Addr/Control Input: Yes\n");
+ else
+ mvOsOutput(" Registered Addr/Control Input: No\n");
+
+ if (dimmInfo.dimmAttributes & BIT2)
+ mvOsOutput(" On-Card PLL (clock): Yes \n");
+ else
+ mvOsOutput(" On-Card PLL (clock): No \n");
+
+ if (dimmInfo.dimmAttributes & BIT3)
+ mvOsOutput(" Bufferd DQMB Input: Yes \n");
+ else
+ mvOsOutput(" Bufferd DQMB Inputs: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT4)
+ mvOsOutput(" Registered DQMB Inputs: Yes \n");
+ else
+ mvOsOutput(" Registered DQMB Inputs: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT5)
+ mvOsOutput(" Differential Clock Input: Yes \n");
+ else
+ mvOsOutput(" Differential Clock Input: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT6)
+ mvOsOutput(" redundant Row Addressing: Yes \n");
+ else
+ mvOsOutput(" redundant Row Addressing: No \n");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ {
+ if (dimmInfo.dimmAttributes & BIT0)
+ mvOsOutput(" Buffered Addr/Control Input: Yes\n");
+ else
+ mvOsOutput(" Buffered Addr/Control Input: No\n");
+
+ if (dimmInfo.dimmAttributes & BIT1)
+ mvOsOutput(" Registered Addr/Control Input: Yes\n");
+ else
+ mvOsOutput(" Registered Addr/Control Input: No\n");
+
+ if (dimmInfo.dimmAttributes & BIT2)
+ mvOsOutput(" On-Card PLL (clock): Yes \n");
+ else
+ mvOsOutput(" On-Card PLL (clock): No \n");
+
+ if (dimmInfo.dimmAttributes & BIT3)
+ mvOsOutput(" FET Switch On-Card Enabled: Yes \n");
+ else
+ mvOsOutput(" FET Switch On-Card Enabled: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT4)
+ mvOsOutput(" FET Switch External Enabled: Yes \n");
+ else
+ mvOsOutput(" FET Switch External Enabled: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT5)
+ mvOsOutput(" Differential Clock Input: Yes \n");
+ else
+ mvOsOutput(" Differential Clock Input: No \n");
+ }
+ else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+ {
+ mvOsOutput(" Number of Active Registers on the DIMM: %d\n",
+ (dimmInfo.dimmAttributes & 0x3) + 1);
+
+ mvOsOutput(" Number of PLLs on the DIMM: %d\n",
+ ((dimmInfo.dimmAttributes) >> 2) & 0x3);
+
+ if (dimmInfo.dimmAttributes & BIT4)
+ mvOsOutput(" FET Switch External Enabled: Yes \n");
+ else
+ mvOsOutput(" FET Switch External Enabled: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT6)
+ mvOsOutput(" Analysis probe installed: Yes \n");
+ else
+ mvOsOutput(" Analysis probe installed: No \n");
+ }
+
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 22: /* Suported AutoPreCharge */
+ mvOsOutput("\nModul Attributes (SPD Byte 22): \n");
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ if ( spdRawData[i] & BIT0 )
+ mvOsOutput(" Early Ras Precharge: Yes \n");
+ else
+ mvOsOutput(" Early Ras Precharge: No \n");
+
+ if ( spdRawData[i] & BIT1 )
+ mvOsOutput(" AutoPreCharge: Yes \n");
+ else
+ mvOsOutput(" AutoPreCharge: No \n");
+
+ if ( spdRawData[i] & BIT2 )
+ mvOsOutput(" Precharge All: Yes \n");
+ else
+ mvOsOutput(" Precharge All: No \n");
+
+ if ( spdRawData[i] & BIT3 )
+ mvOsOutput(" Write 1/ReadBurst: Yes \n");
+ else
+ mvOsOutput(" Write 1/ReadBurst: No \n");
+
+ if ( spdRawData[i] & BIT4 )
+ mvOsOutput(" lower VCC tolerance: 5%%\n");
+ else
+ mvOsOutput(" lower VCC tolerance: 10%%\n");
+
+ if ( spdRawData[i] & BIT5 )
+ mvOsOutput(" upper VCC tolerance: 5%%\n");
+ else
+ mvOsOutput(" upper VCC tolerance: 10%%\n");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ {
+ if ( spdRawData[i] & BIT0 )
+ mvOsOutput(" Supports Weak Driver: Yes \n");
+ else
+ mvOsOutput(" Supports Weak Driver: No \n");
+
+ if ( !(spdRawData[i] & BIT4) )
+ mvOsOutput(" lower VCC tolerance: 0.2V\n");
+
+ if ( !(spdRawData[i] & BIT5) )
+ mvOsOutput(" upper VCC tolerance: 0.2V\n");
+
+ if ( spdRawData[i] & BIT6 )
+ mvOsOutput(" Concurrent Auto Preharge: Yes \n");
+ else
+ mvOsOutput(" Concurrent Auto Preharge: No \n");
+
+ if ( spdRawData[i] & BIT7 )
+ mvOsOutput(" Supports Fast AP: Yes \n");
+ else
+ mvOsOutput(" Supports Fast AP: No \n");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+ {
+ if ( spdRawData[i] & BIT0 )
+ mvOsOutput(" Supports Weak Driver: Yes \n");
+ else
+ mvOsOutput(" Supports Weak Driver: No \n");
+ }
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 23:
+ /* Minimum Cycle Time At Maximum Cas Latancy Minus 1 (2nd highest CL) */
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+ /* DDR2 addition of right of point */
+ if ((spdRawData[i] & 0x0f) == 0xA)
+ {
+ rightOfPoint = 25;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xB)
+ {
+ rightOfPoint = 33;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xC)
+ {
+ rightOfPoint = 66;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xD)
+ {
+ rightOfPoint = 75;
+ }
+
+ mvOsOutput("Minimum Cycle Time At 2nd highest CasLatancy"
+ "(0 = Not supported): %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint );
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 24: /* Clock To Data Out 2nd highest Cas Latency Value*/
+ div = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ? 10:100;
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / div;
+ rightOfPoint = time_tmp % div;
+ mvOsOutput("Clock To Data Out (2nd CL value): %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 25:
+ /* Minimum Cycle Time At Maximum Cas Latancy Minus 2 (3rd highest CL) */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ leftOfPoint = (spdRawData[i] & 0xfc) >> 2;
+ rightOfPoint = (spdRawData[i] & 0x3) * 25;
+ }
+ else /* DDR1 or DDR2 */
+ {
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+ /* DDR2 addition of right of point */
+ if ((spdRawData[i] & 0x0f) == 0xA)
+ {
+ rightOfPoint = 25;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xB)
+ {
+ rightOfPoint = 33;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xC)
+ {
+ rightOfPoint = 66;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xD)
+ {
+ rightOfPoint = 75;
+ }
+ }
+ mvOsOutput("Minimum Cycle Time At 3rd highest CasLatancy"
+ "(0 = Not supported): %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint );
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 26: /* Clock To Data Out 3rd highest Cas Latency Value*/
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ leftOfPoint = (spdRawData[i] & 0xfc) >> 2;
+ rightOfPoint = (spdRawData[i] & 0x3) * 25;
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = 0;
+ rightOfPoint = time_tmp;
+ }
+ mvOsOutput("Clock To Data Out (3rd CL value): %d.%2d[ns]\n",
+ leftOfPoint, rightOfPoint );
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 27: /* Minimum Row Precharge Time */
+ shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+ maskLeftOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0xff : 0xfc;
+ maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0x00 : 0x03;
+ leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+ rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+ temp = ((leftOfPoint*100) + rightOfPoint);/* in 10ps Intervals*/
+ trp_clocks = (temp + (busClkPs-1)) / busClkPs;
+ mvOsOutput("Minimum Row Precharge Time [ns]: %d.%d = "
+ "in Clk cycles %d\n",
+ leftOfPoint, rightOfPoint, trp_clocks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 28: /* Minimum Row Active to Row Active Time */
+ shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+ maskLeftOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0xff : 0xfc;
+ maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0x00 : 0x03;
+ leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+ rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+ temp = ((leftOfPoint*100) + rightOfPoint);/* in 100ns Interval*/
+ trrd_clocks = (temp + (busClkPs-1)) / busClkPs;
+ mvOsOutput("Minimum Row Active -To- Row Active Delay [ns]: "
+ "%d.%d = in Clk cycles %d\n",
+ leftOfPoint, rightOfPoint, trp_clocks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 29: /* Minimum Ras-To-Cas Delay */
+ shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+ maskLeftOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0xff : 0xfc;
+ maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0x00 : 0x03;
+ leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+ rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+ temp = ((leftOfPoint*100) + rightOfPoint);/* in 100ns Interval*/
+ trcd_clocks = (temp + (busClkPs-1) )/ busClkPs;
+ mvOsOutput("Minimum Ras-To-Cas Delay [ns]: %d.%d = "
+ "in Clk cycles %d\n",
+ leftOfPoint, rightOfPoint, trp_clocks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 30: /* Minimum Ras Pulse Width */
+ tras_clocks = (cas2ps(spdRawData[i])+(busClkPs-1)) / busClkPs;
+ mvOsOutput("Minimum Ras Pulse Width [ns]: %d = "
+ "in Clk cycles %d\n", spdRawData[i], tras_clocks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 31: /* Module Bank Density */
+ mvOsOutput("Module Bank Density (more than 1= Multisize-Module):");
+
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ if (dimmInfo.dimmBankDensity & BIT0)
+ mvOsOutput("1GB, ");
+ if (dimmInfo.dimmBankDensity & BIT1)
+ mvOsOutput("8MB, ");
+ if (dimmInfo.dimmBankDensity & BIT2)
+ mvOsOutput("16MB, ");
+ if (dimmInfo.dimmBankDensity & BIT3)
+ mvOsOutput("32MB, ");
+ if (dimmInfo.dimmBankDensity & BIT4)
+ mvOsOutput("64MB, ");
+ if (dimmInfo.dimmBankDensity & BIT5)
+ mvOsOutput("128MB, ");
+ if (dimmInfo.dimmBankDensity & BIT6)
+ mvOsOutput("256MB, ");
+ if (dimmInfo.dimmBankDensity & BIT7)
+ mvOsOutput("512MB, ");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ {
+ if (dimmInfo.dimmBankDensity & BIT0)
+ mvOsOutput("1GB, ");
+ if (dimmInfo.dimmBankDensity & BIT1)
+ mvOsOutput("2GB, ");
+ if (dimmInfo.dimmBankDensity & BIT2)
+ mvOsOutput("16MB, ");
+ if (dimmInfo.dimmBankDensity & BIT3)
+ mvOsOutput("32MB, ");
+ if (dimmInfo.dimmBankDensity & BIT4)
+ mvOsOutput("64MB, ");
+ if (dimmInfo.dimmBankDensity & BIT5)
+ mvOsOutput("128MB, ");
+ if (dimmInfo.dimmBankDensity & BIT6)
+ mvOsOutput("256MB, ");
+ if (dimmInfo.dimmBankDensity & BIT7)
+ mvOsOutput("512MB, ");
+ }
+ else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+ {
+ if (dimmInfo.dimmBankDensity & BIT0)
+ mvOsOutput("1GB, ");
+ if (dimmInfo.dimmBankDensity & BIT1)
+ mvOsOutput("2GB, ");
+ if (dimmInfo.dimmBankDensity & BIT2)
+ mvOsOutput("4GB, ");
+ if (dimmInfo.dimmBankDensity & BIT3)
+ mvOsOutput("8GB, ");
+ if (dimmInfo.dimmBankDensity & BIT4)
+ mvOsOutput("16GB, ");
+ if (dimmInfo.dimmBankDensity & BIT5)
+ mvOsOutput("128MB, ");
+ if (dimmInfo.dimmBankDensity & BIT6)
+ mvOsOutput("256MB, ");
+ if (dimmInfo.dimmBankDensity & BIT7)
+ mvOsOutput("512MB, ");
+ }
+ mvOsOutput("\n");
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 32: /* Address And Command Setup Time (measured in ns/1000) */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ rightOfPoint = (spdRawData[i] & 0x0f);
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ if(leftOfPoint > 7)
+ {
+ leftOfPoint *= -1;
+ }
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / 100;
+ rightOfPoint = time_tmp % 100;
+ }
+ mvOsOutput("Address And Command Setup Time [ns]: %d.%d\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 33: /* Address And Command Hold Time */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ rightOfPoint = (spdRawData[i] & 0x0f);
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ if(leftOfPoint > 7)
+ {
+ leftOfPoint *= -1;
+ }
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / 100;
+ rightOfPoint = time_tmp % 100;
+ }
+ mvOsOutput("Address And Command Hold Time [ns]: %d.%d\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 34: /* Data Input Setup Time */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ rightOfPoint = (spdRawData[i] & 0x0f);
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ if(leftOfPoint > 7)
+ {
+ leftOfPoint *= -1;
+ }
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / 100;
+ rightOfPoint = time_tmp % 100;
+ }
+ mvOsOutput("Data Input Setup Time [ns]: %d.%d\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 35: /* Data Input Hold Time */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ rightOfPoint = (spdRawData[i] & 0x0f);
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ if(leftOfPoint > 7)
+ {
+ leftOfPoint *= -1;
+ }
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / 100;
+ rightOfPoint = time_tmp % 100;
+ }
+ mvOsOutput("Data Input Hold Time [ns]: %d.%d\n\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 36: /* Relevant for DDR2 only: Write Recovery Time */
+ leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> 2);
+ rightOfPoint = (spdRawData[i] & maskRightOfPoint) * 25;
+ mvOsOutput("Write Recovery Time [ns]: %d.%d\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+ }
+
+}
+
+
+/*
+ * translate ns.ns/10 coding of SPD timing values
+ * into ps unit values
+ */
+/*******************************************************************************
+* cas2ps - Translate x.y ns parameter to pico-seconds values
+*
+* DESCRIPTION:
+* This function translates x.y nano seconds to its value in pico seconds.
+* For example 3.75ns will return 3750.
+*
+* INPUT:
+* spd_byte - DIMM SPD byte.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* value in pico seconds.
+*
+*******************************************************************************/
+static MV_U32 cas2ps(MV_U8 spd_byte)
+{
+ MV_U32 ns, ns10;
+
+ /* isolate upper nibble */
+ ns = (spd_byte >> 4) & 0x0F;
+ /* isolate lower nibble */
+ ns10 = (spd_byte & 0x0F);
+
+ if( ns10 < 10 ) {
+ ns10 *= 10;
+ }
+ else if( ns10 == 10 )
+ ns10 = 25;
+ else if( ns10 == 11 )
+ ns10 = 33;
+ else if( ns10 == 12 )
+ ns10 = 66;
+ else if( ns10 == 13 )
+ ns10 = 75;
+ else
+ {
+ mvOsOutput("cas2ps Err. unsupported cycle time.\n");
+ }
+
+ return (ns*1000 + ns10*10);
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h
new file mode 100644
index 000000000..678e22456
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h
@@ -0,0 +1,191 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDram
+#define __INCmvDram
+
+#include "ddr1_2/mvDramIf.h"
+#include "twsi/mvTwsi.h"
+
+#define MAX_DIMM_NUM 2
+#define SPD_SIZE 128
+
+/* Dimm spd offsets */
+#define DIMM_MEM_TYPE 2
+#define DIMM_ROW_NUM 3
+#define DIMM_COL_NUM 4
+#define DIMM_MODULE_BANK_NUM 5
+#define DIMM_DATA_WIDTH 6
+#define DIMM_VOLT_IF 8
+#define DIMM_MIN_CC_AT_MAX_CAS 9
+#define DIMM_ERR_CHECK_TYPE 11
+#define DIMM_REFRESH_INTERVAL 12
+#define DIMM_SDRAM_WIDTH 13
+#define DIMM_ERR_CHECK_DATA_WIDTH 14
+#define DIMM_MIN_CLK_DEL 15
+#define DIMM_BURST_LEN_SUP 16
+#define DIMM_DEV_BANK_NUM 17
+#define DIMM_SUP_CAL 18
+#define DIMM_DDR2_TYPE_INFORMATION 20 /* DDR2 only */
+#define DIMM_BUF_ADDR_CONT_IN 21
+#define DIMM_MIN_CC_AT_MAX_CAS_MINUS1 23
+#define DIMM_MIN_CC_AT_MAX_CAS_MINUS2 25
+#define DIMM_MIN_ROW_PRECHARGE_TIME 27
+#define DIMM_MIN_ROW_ACTIVE_TO_ROW_ACTIVE 28
+#define DIMM_MIN_RAS_TO_CAS_DELAY 29
+#define DIMM_MIN_RAS_PULSE_WIDTH 30
+#define DIMM_BANK_DENSITY 31
+#define DIMM_MIN_WRITE_RECOVERY_TIME 36
+#define DIMM_MIN_WRITE_TO_READ_CMD_DELAY 37
+#define DIMM_MIN_READ_TO_PRECH_CMD_DELAY 38
+#define DIMM_MIN_REFRESH_TO_ACTIVATE_CMD 42
+
+/* Dimm Memory Type values */
+#define DIMM_MEM_TYPE_SDRAM 0x4
+#define DIMM_MEM_TYPE_DDR1 0x7
+#define DIMM_MEM_TYPE_DDR2 0x8
+
+#define DIMM_MODULE_MANU_OFFS 64
+#define DIMM_MODULE_MANU_SIZE 8
+#define DIMM_MODULE_VEN_OFFS 73
+#define DIMM_MODULE_VEN_SIZE 25
+#define DIMM_MODULE_ID_OFFS 99
+#define DIMM_MODULE_ID_SIZE 18
+
+/* enumeration for voltage levels. */
+typedef enum _mvDimmVoltageIf
+{
+ TTL_5V_TOLERANT,
+ LVTTL,
+ HSTL_1_5V,
+ SSTL_3_3V,
+ SSTL_2_5V,
+ VOLTAGE_UNKNOWN,
+} MV_DIMM_VOLTAGE_IF;
+
+
+/* enumaration for SDRAM CAS Latencies. */
+typedef enum _mvDimmSdramCas
+{
+ SD_CL_1 =1,
+ SD_CL_2,
+ SD_CL_3,
+ SD_CL_4,
+ SD_CL_5,
+ SD_CL_6,
+ SD_CL_7,
+ SD_FAULT
+}MV_DIMM_SDRAM_CAS;
+
+
+/* DIMM information structure */
+typedef struct _mvDimmInfo
+{
+ MV_MEMORY_TYPE memoryType; /* DDR or SDRAM */
+
+ MV_U8 spdRawData[SPD_SIZE]; /* Content of SPD-EEPROM copied 1:1 */
+
+ /* DIMM dimensions */
+ MV_U32 numOfRowAddr;
+ MV_U32 numOfColAddr;
+ MV_U32 numOfModuleBanks;
+ MV_U32 dataWidth;
+ MV_U32 errorCheckType; /* ECC , PARITY..*/
+ MV_U32 sdramWidth; /* 4,8,16 or 32 */
+ MV_U32 errorCheckDataWidth; /* 0 - no, 1 - Yes */
+ MV_U32 burstLengthSupported;
+ MV_U32 numOfBanksOnEachDevice;
+ MV_U32 suportedCasLatencies;
+ MV_U32 refreshInterval;
+ MV_U32 dimmBankDensity;
+ MV_U32 dimmTypeInfo; /* DDR2 only */
+ MV_U32 dimmAttributes;
+
+ /* DIMM timing parameters */
+ MV_U32 minCycleTimeAtMaxCasLatPs;
+ MV_U32 minCycleTimeAtMaxCasLatMinus1Ps;
+ MV_U32 minCycleTimeAtMaxCasLatMinus2Ps;
+ MV_U32 minRowPrechargeTime;
+ MV_U32 minRowActiveToRowActive;
+ MV_U32 minRasToCasDelay;
+ MV_U32 minRasPulseWidth;
+ MV_U32 minWriteRecoveryTime; /* DDR2 only */
+ MV_U32 minWriteToReadCmdDelay; /* DDR2 only */
+ MV_U32 minReadToPrechCmdDelay; /* DDR2 only */
+ MV_U32 minRefreshToActiveCmd; /* DDR2 only */
+
+ /* Parameters calculated from the extracted DIMM information */
+ MV_U32 size; /* 16,64,128,256 or 512 MByte in MB units */
+ MV_U32 deviceDensity; /* 16,64,128,256 or 512 Mbit in MB units */
+ MV_U32 numberOfDevices;
+
+} MV_DIMM_INFO;
+
+
+MV_STATUS mvDramBankInfoGet(MV_U32 bankNum, MV_DRAM_BANK_INFO *pBankInfo);
+MV_STATUS dimmSpdGet(MV_U32 dimmNum, MV_DIMM_INFO *pDimmInfo);
+MV_VOID dimmSpdPrint(MV_U32 dimmNum);
+MV_STATUS dimmSpdCpy(MV_VOID);
+
+#endif /* __INCmvDram */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c
new file mode 100644
index 000000000..12fb26ad0
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c
@@ -0,0 +1,1599 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "ddr1_2/mvDramIf.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+
+
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+/* DRAM bank presence encoding */
+#define BANK_PRESENT_CS0 0x1
+#define BANK_PRESENT_CS0_CS1 0x3
+#define BANK_PRESENT_CS0_CS2 0x5
+#define BANK_PRESENT_CS0_CS1_CS2 0x7
+#define BANK_PRESENT_CS0_CS2_CS3 0xd
+#define BANK_PRESENT_CS0_CS2_CS3_CS4 0xf
+
+/* locals */
+static MV_BOOL sdramIfWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin);
+#if defined(MV_INC_BOARD_DDIM)
+static void sdramDDr2OdtConfig(MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 dunitCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas);
+static MV_U32 sdramModeRegCalc(MV_U32 minCas);
+static MV_U32 sdramExtModeRegCalc(MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 sdramAddrCtrlRegCalc(MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 sdramConfigRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk);
+static MV_U32 minCasCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk,
+ MV_U32 forcedCl);
+static MV_U32 sdramTimeCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo,
+ MV_U32 minCas, MV_U32 busClk);
+static MV_U32 sdramTimeCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo,
+ MV_U32 busClk);
+
+/*******************************************************************************
+* mvDramIfDetect - Prepare DRAM interface configuration values.
+*
+* DESCRIPTION:
+* This function implements the full DRAM detection and timing
+* configuration for best system performance.
+* Since this routine runs from a ROM device (Boot Flash), its stack
+* resides on RAM, that might be the system DRAM. Changing DRAM
+* configuration values while keeping vital data in DRAM is risky. That
+* is why the function does not preform the configuration setting but
+* prepare those in predefined 32bit registers (in this case IDMA
+* registers are used) for other routine to perform the settings.
+* The function will call for board DRAM SPD information for each DRAM
+* chip select. The function will then analyze those SPD parameters of
+* all DRAM banks in order to decide on DRAM configuration compatible
+* for all DRAM banks.
+* The function will set the CPU DRAM address decode registers.
+* Note: This routine prepares values that will overide configuration of
+* mvDramBasicAsmInit().
+*
+* INPUT:
+* forcedCl - Forced CAL Latency. If equal to zero, do not force.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfDetect(MV_U32 forcedCl)
+{
+ MV_U32 retVal = MV_OK; /* return value */
+ MV_DRAM_BANK_INFO bankInfo[MV_DRAM_MAX_CS];
+ MV_U32 busClk, size, base = 0, i, temp, deviceW, dimmW;
+ MV_U8 minCas;
+ MV_DRAM_DEC_WIN dramDecWin;
+
+ dramDecWin.addrWin.baseHigh = 0;
+
+ busClk = mvBoardSysClkGet();
+
+ if (0 == busClk)
+ {
+ mvOsPrintf("Dram: ERR. Can't detect system clock! \n");
+ return MV_ERROR;
+ }
+
+ /* Close DRAM banks except bank 0 (in case code is excecuting from it...) */
+#if defined(MV_INCLUDE_SDRAM_CS1)
+ for(i= SDRAM_CS1; i < MV_DRAM_MAX_CS; i++)
+ mvCpuIfTargetWinEnable(i, MV_FALSE);
+#endif
+
+ /* we will use bank 0 as the representative of the all the DRAM banks, */
+ /* since bank 0 must exist. */
+ for(i = 0; i < MV_DRAM_MAX_CS; i++)
+ {
+ /* if Bank exist */
+ if(MV_OK == mvDramBankInfoGet(i, &bankInfo[i]))
+ {
+ /* check it isn't SDRAM */
+ if(bankInfo[i].memoryType == MEM_TYPE_SDRAM)
+ {
+ mvOsPrintf("Dram: ERR. SDRAM type not supported !!!\n");
+ return MV_ERROR;
+ }
+ /* All banks must support registry in order to activate it */
+ if(bankInfo[i].registeredAddrAndControlInputs !=
+ bankInfo[0].registeredAddrAndControlInputs)
+ {
+ mvOsPrintf("Dram: ERR. different Registered settings !!!\n");
+ return MV_ERROR;
+ }
+
+ /* Init the CPU window decode */
+ /* Note that the size in Bank info is in MB units */
+ /* Note that the Dimm width might be different then the device DRAM width */
+ temp = MV_REG_READ(SDRAM_CONFIG_REG);
+
+ deviceW = ((temp & SDRAM_DWIDTH_MASK) == SDRAM_DWIDTH_16BIT )? 16 : 32;
+ dimmW = bankInfo[0].dataWidth - (bankInfo[0].dataWidth % 16);
+ size = ((bankInfo[i].size << 20) / (dimmW/deviceW));
+
+ /* We can not change DRAM window settings while excecuting */
+ /* code from it. That is why we skip the DRAM CS[0], saving */
+ /* it to the ROM configuration routine */
+ if(i == SDRAM_CS0)
+ {
+ MV_U32 sizeToReg;
+
+ /* Translate the given window size to register format */
+ sizeToReg = ctrlSizeToReg(size, SCSR_SIZE_ALIGNMENT);
+
+ /* Size parameter validity check. */
+ if (-1 == sizeToReg)
+ {
+ mvOsPrintf("mvCtrlAddrDecToReg: ERR. Win %d size invalid.\n"
+ ,i);
+ return MV_BAD_PARAM;
+ }
+
+ /* Size is located at upper 16 bits */
+ sizeToReg <<= SCSR_SIZE_OFFS;
+
+ /* enable it */
+ sizeToReg |= SCSR_WIN_EN;
+
+ MV_REG_WRITE(DRAM_BUF_REG0, sizeToReg);
+ }
+ else
+ {
+ dramDecWin.addrWin.baseLow = base;
+ dramDecWin.addrWin.size = size;
+ dramDecWin.enable = MV_TRUE;
+
+ if (MV_OK != mvDramIfWinSet(SDRAM_CS0 + i, &dramDecWin))
+ {
+ mvOsPrintf("Dram: ERR. Fail to set bank %d!!!\n",
+ SDRAM_CS0 + i);
+ return MV_ERROR;
+ }
+ }
+
+ base += size;
+
+ /* update the suportedCasLatencies mask */
+ bankInfo[0].suportedCasLatencies &= bankInfo[i].suportedCasLatencies;
+
+ }
+ else
+ {
+ if( i == 0 ) /* bank 0 doesn't exist */
+ {
+ mvOsPrintf("Dram: ERR. Fail to detect bank 0 !!!\n");
+ return MV_ERROR;
+ }
+ else
+ {
+ DB(mvOsPrintf("Dram: Could not find bank %d\n", i));
+ bankInfo[i].size = 0; /* Mark this bank as non exist */
+ }
+ }
+ }
+
+ /* calculate minimum CAS */
+ minCas = minCasCalc(&bankInfo[0], busClk, forcedCl);
+ if (0 == minCas)
+ {
+ mvOsOutput("Dram: Warn: Could not find CAS compatible to SysClk %dMhz\n",
+ (busClk / 1000000));
+
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ minCas = DDR2_CL_4; /* Continue with this CAS */
+ mvOsPrintf("Set default CAS latency 4\n");
+ }
+ else
+ {
+ minCas = DDR1_CL_3; /* Continue with this CAS */
+ mvOsPrintf("Set default CAS latency 3\n");
+ }
+ }
+
+ /* calc SDRAM_CONFIG_REG and save it to temp register */
+ temp = sdramConfigRegCalc(&bankInfo[0], busClk);
+ if(-1 == temp)
+ {
+ mvOsPrintf("Dram: ERR. sdramConfigRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG1, temp);
+
+ /* calc SDRAM_MODE_REG and save it to temp register */
+ temp = sdramModeRegCalc(minCas);
+ if(-1 == temp)
+ {
+ mvOsPrintf("Dram: ERR. sdramModeRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG2, temp);
+
+ /* calc SDRAM_EXTENDED_MODE_REG and save it to temp register */
+ temp = sdramExtModeRegCalc(&bankInfo[0]);
+ if(-1 == temp)
+ {
+ mvOsPrintf("Dram: ERR. sdramModeRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG10, temp);
+
+ /* calc D_UNIT_CONTROL_LOW and save it to temp register */
+ temp = dunitCtrlLowRegCalc(&bankInfo[0], minCas);
+ if(-1 == temp)
+ {
+ mvOsPrintf("Dram: ERR. dunitCtrlLowRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG3, temp);
+
+ /* calc SDRAM_ADDR_CTRL_REG and save it to temp register */
+ temp = sdramAddrCtrlRegCalc(&bankInfo[0]);
+ if(-1 == temp)
+ {
+ mvOsPrintf("Dram: ERR. sdramAddrCtrlRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG4, temp);
+
+ /* calc SDRAM_TIMING_CTRL_LOW_REG and save it to temp register */
+ temp = sdramTimeCtrlLowRegCalc(&bankInfo[0], minCas, busClk);
+ if(-1 == temp)
+ {
+ mvOsPrintf("Dram: ERR. sdramTimeCtrlLowRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG5, temp);
+
+ /* calc SDRAM_TIMING_CTRL_HIGH_REG and save it to temp register */
+ temp = sdramTimeCtrlHighRegCalc(&bankInfo[0], busClk);
+ if(-1 == temp)
+ {
+ mvOsPrintf("Dram: ERR. sdramTimeCtrlHighRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG6, temp);
+
+ /* Config DDR2 On Die Termination (ODT) registers */
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ sdramDDr2OdtConfig(bankInfo);
+ }
+
+ /* Note that DDR SDRAM Address/Control and Data pad calibration */
+ /* settings is done in mvSdramIfConfig.s */
+
+ return retVal;
+}
+
+/*******************************************************************************
+* minCasCalc - Calculate the Minimum CAS latency which can be used.
+*
+* DESCRIPTION:
+* Calculate the minimum CAS latency that can be used, base on the DRAM
+* parameters and the SDRAM bus Clock freq.
+*
+* INPUT:
+* busClk - the DRAM bus Clock.
+* pBankInfo - bank info parameters.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* The minimum CAS Latency. The function returns 0 if max CAS latency
+* supported by banks is incompatible with system bus clock frequancy.
+*
+*******************************************************************************/
+static MV_U32 minCasCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk,
+ MV_U32 forcedCl)
+{
+ MV_U32 count = 1, j;
+ MV_U32 busClkPs = 1000000000 / (busClk / 1000); /* in ps units */
+ MV_U32 startBit, stopBit;
+
+ /* DDR 1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 4 | 3.5 | 3 | 2.5 | 2 | 1.5 | 1 *
+ *********************************************************/
+
+ /* DDR 2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | TBD | 5 | 4 | 3 | 2 | TBD | TBD *
+ *********************************************************/
+
+
+ /* If we are asked to use the forced CAL */
+ if (forcedCl)
+ {
+ mvOsPrintf("DRAM: Using forced CL %d.%d\n", (forcedCl / 10),
+ (forcedCl % 10));
+
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ if (forcedCl == 30)
+ pBankInfo->suportedCasLatencies = 0x08;
+ else if (forcedCl == 40)
+ pBankInfo->suportedCasLatencies = 0x10;
+ else
+ {
+ mvOsPrintf("Forced CL %d.%d not supported. Set default CL 4\n",
+ (forcedCl / 10), (forcedCl % 10));
+ pBankInfo->suportedCasLatencies = 0x10;
+ }
+ }
+ else
+ {
+ if (forcedCl == 15)
+ pBankInfo->suportedCasLatencies = 0x02;
+ else if (forcedCl == 20)
+ pBankInfo->suportedCasLatencies = 0x04;
+ else if (forcedCl == 25)
+ pBankInfo->suportedCasLatencies = 0x08;
+ else if (forcedCl == 30)
+ pBankInfo->suportedCasLatencies = 0x10;
+ else if (forcedCl == 40)
+ pBankInfo->suportedCasLatencies = 0x40;
+ else
+ {
+ mvOsPrintf("Forced CL %d.%d not supported. Set default CL 3\n",
+ (forcedCl / 10), (forcedCl % 10));
+ pBankInfo->suportedCasLatencies = 0x10;
+ }
+ }
+
+ return pBankInfo->suportedCasLatencies;
+ }
+
+ /* go over the supported cas mask from Max Cas down and check if the */
+ /* SysClk stands in its time requirments. */
+
+
+ DB(mvOsPrintf("Dram: minCasCalc supported mask = %x busClkPs = %x \n",
+ pBankInfo->suportedCasLatencies,busClkPs ));
+ for(j = 7; j > 0; j--)
+ {
+ if((pBankInfo->suportedCasLatencies >> j) & BIT0 )
+ {
+ /* Reset the bits for CL incompatible for the sysClk */
+ switch (count)
+ {
+ case 1:
+ if (pBankInfo->minCycleTimeAtMaxCasLatPs > busClkPs)
+ pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ case 2:
+ if (pBankInfo->minCycleTimeAtMaxCasLatMinus1Ps > busClkPs)
+ pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ case 3:
+ if (pBankInfo->minCycleTimeAtMaxCasLatMinus2Ps > busClkPs)
+ pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ default:
+ pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+ break;
+ }
+ }
+ }
+
+ DB(mvOsPrintf("Dram: minCasCalc support = %x (after SysCC calc)\n",
+ pBankInfo->suportedCasLatencies ));
+
+ /* SDRAM DDR1 controller supports CL 1.5 to 3.5 */
+ /* SDRAM DDR2 controller supports CL 3 to 5 */
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ startBit = 3; /* DDR2 support CL start with CL3 (bit 3) */
+ stopBit = 5; /* DDR2 support CL stops with CL5 (bit 5) */
+ }
+ else
+ {
+ startBit = 1; /* DDR1 support CL start with CL1.5 (bit 3) */
+ stopBit = 4; /* DDR1 support CL stops with CL3 (bit 4) */
+ }
+
+ for(j = startBit; j <= stopBit ; j++)
+ {
+ if((pBankInfo->suportedCasLatencies >> j) & BIT0 )
+ {
+ DB(mvOsPrintf("Dram: minCasCalc choose CAS %x \n",(BIT0 << j)));
+ return (BIT0 << j);
+ }
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+* sdramConfigRegCalc - Calculate sdram config register
+*
+* DESCRIPTION: Calculate sdram config register optimized value based
+* on the bank info parameters.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram config reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramConfigRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk)
+{
+ MV_U32 sdramConfig = 0;
+ MV_U32 refreshPeriod;
+
+ busClk /= 1000000; /* we work with busClk in MHz */
+
+ sdramConfig = MV_REG_READ(SDRAM_CONFIG_REG);
+
+ /* figure out the memory refresh internal */
+ switch (pBankInfo->refreshInterval & 0xf)
+ {
+ case 0x0: /* refresh period is 15.625 usec */
+ refreshPeriod = 15625;
+ break;
+ case 0x1: /* refresh period is 3.9 usec */
+ refreshPeriod = 3900;
+ break;
+ case 0x2: /* refresh period is 7.8 usec */
+ refreshPeriod = 7800;
+ break;
+ case 0x3: /* refresh period is 31.3 usec */
+ refreshPeriod = 31300;
+ break;
+ case 0x4: /* refresh period is 62.5 usec */
+ refreshPeriod = 62500;
+ break;
+ case 0x5: /* refresh period is 125 usec */
+ refreshPeriod = 125000;
+ break;
+ default: /* refresh period undefined */
+ mvOsPrintf("Dram: ERR. DRAM refresh period is unknown!\n");
+ return -1;
+ }
+
+ /* Now the refreshPeriod is in register format value */
+ refreshPeriod = (busClk * refreshPeriod) / 1000;
+
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc calculated refresh interval %0x\n",
+ refreshPeriod));
+
+ /* make sure the refresh value is only 14 bits */
+ if(refreshPeriod > SDRAM_REFRESH_MAX)
+ {
+ refreshPeriod = SDRAM_REFRESH_MAX;
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc adjusted refresh interval %0x\n",
+ refreshPeriod));
+ }
+
+ /* Clear the refresh field */
+ sdramConfig &= ~SDRAM_REFRESH_MASK;
+
+ /* Set new value to refresh field */
+ sdramConfig |= (refreshPeriod & SDRAM_REFRESH_MASK);
+
+ /* registered DRAM ? */
+ if ( pBankInfo->registeredAddrAndControlInputs )
+ {
+ /* it's registered DRAM, so set the reg. DRAM bit */
+ sdramConfig |= SDRAM_REGISTERED;
+ mvOsPrintf("DRAM Attribute: Registered address and control inputs.\n");
+ }
+
+ /* set DDR SDRAM devices configuration */
+ sdramConfig &= ~SDRAM_DCFG_MASK; /* Clear Dcfg field */
+
+ switch (pBankInfo->sdramWidth)
+ {
+ case 8: /* memory is x8 */
+ sdramConfig |= SDRAM_DCFG_X8_DEV;
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc SDRAM device width x8\n"));
+ break;
+ case 16:
+ sdramConfig |= SDRAM_DCFG_X16_DEV;
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc SDRAM device width x16\n"));
+ break;
+ default: /* memory width unsupported */
+ mvOsPrintf("Dram: ERR. DRAM chip width is unknown!\n");
+ return -1;
+ }
+
+ /* Set static default settings */
+ sdramConfig |= SDRAM_CONFIG_DV;
+
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc set sdramConfig to 0x%x\n",
+ sdramConfig));
+
+ return sdramConfig;
+}
+
+/*******************************************************************************
+* sdramModeRegCalc - Calculate sdram mode register
+*
+* DESCRIPTION: Calculate sdram mode register optimized value based
+* on the bank info parameters and the minCas.
+*
+* INPUT:
+* minCas - minimum CAS supported.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram mode reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramModeRegCalc(MV_U32 minCas)
+{
+ MV_U32 sdramMode;
+
+ sdramMode = MV_REG_READ(SDRAM_MODE_REG);
+
+ /* Clear CAS Latency field */
+ sdramMode &= ~SDRAM_CL_MASK;
+
+ mvOsPrintf("DRAM CAS Latency ");
+
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ switch (minCas)
+ {
+ case DDR2_CL_3:
+ sdramMode |= SDRAM_DDR2_CL_3;
+ mvOsPrintf("3.\n");
+ break;
+ case DDR2_CL_4:
+ sdramMode |= SDRAM_DDR2_CL_4;
+ mvOsPrintf("4.\n");
+ break;
+ case DDR2_CL_5:
+ sdramMode |= SDRAM_DDR2_CL_5;
+ mvOsPrintf("5.\n");
+ break;
+ default:
+ mvOsPrintf("\nsdramModeRegCalc ERROR: Max. CL out of range\n");
+ return -1;
+ }
+ sdramMode |= DDR2_MODE_REG_DV;
+ }
+ else /* DDR1 */
+ {
+ switch (minCas)
+ {
+ case DDR1_CL_1_5:
+ sdramMode |= SDRAM_DDR1_CL_1_5;
+ mvOsPrintf("1.5\n");
+ break;
+ case DDR1_CL_2:
+ sdramMode |= SDRAM_DDR1_CL_2;
+ mvOsPrintf("2\n");
+ break;
+ case DDR1_CL_2_5:
+ sdramMode |= SDRAM_DDR1_CL_2_5;
+ mvOsPrintf("2.5\n");
+ break;
+ case DDR1_CL_3:
+ sdramMode |= SDRAM_DDR1_CL_3;
+ mvOsPrintf("3\n");
+ break;
+ case DDR1_CL_4:
+ sdramMode |= SDRAM_DDR1_CL_4;
+ mvOsPrintf("4\n");
+ break;
+ default:
+ mvOsPrintf("\nsdramModeRegCalc ERROR: Max. CL out of range\n");
+ return -1;
+ }
+ sdramMode |= DDR1_MODE_REG_DV;
+ }
+
+ DB(mvOsPrintf("nsdramModeRegCalc register 0x%x\n", sdramMode ));
+
+ return sdramMode;
+}
+
+/*******************************************************************************
+* sdramExtModeRegCalc - Calculate sdram Extended mode register
+*
+* DESCRIPTION:
+* Return sdram Extended mode register value based
+* on the bank info parameters and bank presence.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram Extended mode reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramExtModeRegCalc(MV_DRAM_BANK_INFO *pBankInfo)
+{
+ MV_U32 populateBanks = 0;
+ int bankNum;
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ /* Represent the populate banks in binary form */
+ for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ if (0 != pBankInfo[bankNum].size)
+ {
+ populateBanks |= (1 << bankNum);
+ }
+ }
+
+ switch(populateBanks)
+ {
+ case(BANK_PRESENT_CS0):
+ return DDR_SDRAM_EXT_MODE_CS0_DV;
+
+ case(BANK_PRESENT_CS0_CS1):
+ return DDR_SDRAM_EXT_MODE_CS0_DV;
+
+ case(BANK_PRESENT_CS0_CS2):
+ return DDR_SDRAM_EXT_MODE_CS0_CS2_DV;
+
+ case(BANK_PRESENT_CS0_CS1_CS2):
+ return DDR_SDRAM_EXT_MODE_CS0_CS2_DV;
+
+ case(BANK_PRESENT_CS0_CS2_CS3):
+ return DDR_SDRAM_EXT_MODE_CS0_CS2_DV;
+
+ case(BANK_PRESENT_CS0_CS2_CS3_CS4):
+ return DDR_SDRAM_EXT_MODE_CS0_CS2_DV;
+
+ default:
+ mvOsPrintf("sdramExtModeRegCalc: Invalid DRAM bank presence\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*******************************************************************************
+* dunitCtrlLowRegCalc - Calculate sdram dunit control low register
+*
+* DESCRIPTION: Calculate sdram dunit control low register optimized value based
+* on the bank info parameters and the minCas.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+* minCas - minimum CAS supported.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram dunit control low reg value.
+*
+*******************************************************************************/
+static MV_U32 dunitCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas)
+{
+ MV_U32 dunitCtrlLow;
+
+ dunitCtrlLow = MV_REG_READ(SDRAM_DUNIT_CTRL_REG);
+
+ /* Clear StBurstDel field */
+ dunitCtrlLow &= ~SDRAM_ST_BURST_DEL_MASK;
+
+#ifdef MV_88W8660
+ /* Clear address/control output timing field */
+ dunitCtrlLow &= ~SDRAM_CTRL_POS_RISE;
+#endif /* MV_88W8660 */
+
+ DB(mvOsPrintf("Dram: dunitCtrlLowRegCalc\n"));
+
+ /* For proper sample of read data set the Dunit Control register's */
+ /* stBurstDel bits [27:24] */
+ /********-********-********-********-********-*********
+ * CL=1.5 | CL=2 | CL=2.5 | CL=3 | CL=4 | CL=5 *
+ *********-********-********-********-********-*********
+Not Reg. * 0011 | 0011 | 0100 | 0100 | 0101 | TBD *
+ *********-********-********-********-********-*********
+Registered * 0100 | 0100 | 0101 | 0101 | 0110 | TBD *
+ *********-********-********-********-********-*********/
+
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ switch (minCas)
+ {
+ case DDR2_CL_3:
+ /* registerd DDR SDRAM? */
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+ else
+ dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+ break;
+ case DDR2_CL_4:
+ /* registerd DDR SDRAM? */
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ dunitCtrlLow |= 0x6 << SDRAM_ST_BURST_DEL_OFFS;
+ else
+ dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+ break;
+ default:
+ mvOsPrintf("Dram: dunitCtrlLowRegCalc Max. CL out of range %d\n",
+ minCas);
+ return -1;
+ }
+ }
+ else /* DDR1 */
+ {
+ switch (minCas)
+ {
+ case DDR1_CL_1_5:
+ /* registerd DDR SDRAM? */
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+ else
+ dunitCtrlLow |= 0x3 << SDRAM_ST_BURST_DEL_OFFS;
+ break;
+ case DDR1_CL_2:
+ /* registerd DDR SDRAM? */
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+ else
+ dunitCtrlLow |= 0x3 << SDRAM_ST_BURST_DEL_OFFS;
+ break;
+ case DDR1_CL_2_5:
+ /* registerd DDR SDRAM? */
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+ else
+ dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+ break;
+ case DDR1_CL_3:
+ /* registerd DDR SDRAM? */
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+ else
+ dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+ break;
+ case DDR1_CL_4:
+ /* registerd DDR SDRAM? */
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ dunitCtrlLow |= 0x6 << SDRAM_ST_BURST_DEL_OFFS;
+ else
+ dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+ break;
+ default:
+ mvOsPrintf("Dram: dunitCtrlLowRegCalc Max. CL out of range %d\n",
+ minCas);
+ return -1;
+ }
+
+ }
+ DB(mvOsPrintf("Dram: Reg dunit control low = %x\n", dunitCtrlLow ));
+
+ return dunitCtrlLow;
+}
+
+/*******************************************************************************
+* sdramAddrCtrlRegCalc - Calculate sdram address control register
+*
+* DESCRIPTION: Calculate sdram address control register optimized value based
+* on the bank info parameters and the minCas.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram address control reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramAddrCtrlRegCalc(MV_DRAM_BANK_INFO *pBankInfo)
+{
+ MV_U32 addrCtrl = 0;
+
+ /* Set Address Control register static configuration bits */
+ addrCtrl = MV_REG_READ(SDRAM_ADDR_CTRL_REG);
+
+ /* Set address control default value */
+ addrCtrl |= SDRAM_ADDR_CTRL_DV;
+
+ /* Clear DSize field */
+ addrCtrl &= ~SDRAM_DSIZE_MASK;
+
+ /* Note that density is in MB units */
+ switch (pBankInfo->deviceDensity)
+ {
+ case 128: /* 128 Mbit */
+ DB(mvOsPrintf("DRAM Device Density 128Mbit\n"));
+ addrCtrl |= SDRAM_DSIZE_128Mb;
+ break;
+ case 256: /* 256 Mbit */
+ DB(mvOsPrintf("DRAM Device Density 256Mbit\n"));
+ addrCtrl |= SDRAM_DSIZE_256Mb;
+ break;
+ case 512: /* 512 Mbit */
+ DB(mvOsPrintf("DRAM Device Density 512Mbit\n"));
+ addrCtrl |= SDRAM_DSIZE_512Mb;
+ break;
+ default:
+ mvOsPrintf("Dram: sdramAddrCtrl unsupported RAM-Device size %d\n",
+ pBankInfo->deviceDensity);
+ return -1;
+ }
+
+ /* SDRAM address control */
+ DB(mvOsPrintf("Dram: setting sdram address control with: %x \n", addrCtrl));
+
+ return addrCtrl;
+}
+
+/*******************************************************************************
+* sdramTimeCtrlLowRegCalc - Calculate sdram timing control low register
+*
+* DESCRIPTION:
+* This function calculates sdram timing control low register
+* optimized value based on the bank info parameters and the minCas.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+* busClk - Bus clock
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram timinf control low reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramTimeCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo,
+ MV_U32 minCas, MV_U32 busClk)
+{
+ MV_U32 tRp = 0;
+ MV_U32 tRrd = 0;
+ MV_U32 tRcd = 0;
+ MV_U32 tRas = 0;
+ MV_U32 tWr = 0;
+ MV_U32 tWtr = 0;
+ MV_U32 tRtp = 0;
+
+ MV_U32 bankNum;
+
+ busClk = busClk / 1000000; /* In MHz */
+
+ /* Scan all DRAM banks to find maximum timing values */
+ for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ tRp = MV_MAX(tRp, pBankInfo[bankNum].minRowPrechargeTime);
+ tRrd = MV_MAX(tRrd, pBankInfo[bankNum].minRowActiveToRowActive);
+ tRcd = MV_MAX(tRcd, pBankInfo[bankNum].minRasToCasDelay);
+ tRas = MV_MAX(tRas, pBankInfo[bankNum].minRasPulseWidth);
+ }
+
+ /* Extract timing (in ns) from SPD value. We ignore the tenth ns part. */
+ /* by shifting the data two bits right. */
+ tRp = tRp >> 2; /* For example 0x50 -> 20ns */
+ tRrd = tRrd >> 2;
+ tRcd = tRcd >> 2;
+
+ /* Extract clock cycles from time parameter. We need to round up */
+ tRp = ((busClk * tRp) / 1000) + (((busClk * tRp) % 1000) ? 1 : 0);
+ /* Micron work around for 133MHz */
+ if (busClk == 133)
+ tRp += 1;
+ DB(mvOsPrintf("Dram Timing Low: tRp = %d ", tRp));
+ tRrd = ((busClk * tRrd) / 1000) + (((busClk * tRrd) % 1000) ? 1 : 0);
+ /* JEDEC min reqeirments tRrd = 2 */
+ if (tRrd < 2)
+ tRrd = 2;
+ DB(mvOsPrintf("tRrd = %d ", tRrd));
+ tRcd = ((busClk * tRcd) / 1000) + (((busClk * tRcd) % 1000) ? 1 : 0);
+ DB(mvOsPrintf("tRcd = %d ", tRcd));
+ tRas = ((busClk * tRas) / 1000) + (((busClk * tRas) % 1000) ? 1 : 0);
+ DB(mvOsPrintf("tRas = %d ", tRas));
+
+ /* tWr and tWtr is different for DDR1 and DDR2. tRtp is only for DDR2 */
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ /* Scan all DRAM banks to find maximum timing values */
+ for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ tWr = MV_MAX(tWr, pBankInfo[bankNum].minWriteRecoveryTime);
+ tWtr = MV_MAX(tWtr, pBankInfo[bankNum].minWriteToReadCmdDelay);
+ tRtp = MV_MAX(tRtp, pBankInfo[bankNum].minReadToPrechCmdDelay);
+ }
+
+ /* Extract timing (in ns) from SPD value. We ignore the tenth ns */
+ /* part by shifting the data two bits right. */
+ tWr = tWr >> 2; /* For example 0x50 -> 20ns */
+ tWtr = tWtr >> 2;
+ tRtp = tRtp >> 2;
+
+ /* Extract clock cycles from time parameter. We need to round up */
+ tWr = ((busClk * tWr) / 1000) + (((busClk * tWr) % 1000) ? 1 : 0);
+ DB(mvOsPrintf("tWr = %d ", tWr));
+ tWtr = ((busClk * tWtr) / 1000) + (((busClk * tWtr) % 1000) ? 1 : 0);
+ /* JEDEC min reqeirments tWtr = 2 */
+ if (tWtr < 2)
+ tWtr = 2;
+ DB(mvOsPrintf("tWtr = %d ", tWtr));
+ tRtp = ((busClk * tRtp) / 1000) + (((busClk * tRtp) % 1000) ? 1 : 0);
+ /* JEDEC min reqeirments tRtp = 2 */
+ if (tRtp < 2)
+ tRtp = 2;
+ DB(mvOsPrintf("tRtp = %d ", tRtp));
+ }
+ else
+ {
+ tWr = ((busClk*SDRAM_TWR) / 1000) + (((busClk*SDRAM_TWR) % 1000)?1:0);
+
+ if ((200 == busClk) || ((100 == busClk) && (DDR1_CL_1_5 == minCas)))
+ {
+ tWtr = 2;
+ }
+ else
+ {
+ tWtr = 1;
+ }
+
+ tRtp = 2; /* Must be set to 0x1 (two cycles) when using DDR1 */
+ }
+
+ DB(mvOsPrintf("tWtr = %d\n", tWtr));
+
+ /* Note: value of 0 in register means one cycle, 1 means two and so on */
+ return (((tRp - 1) << SDRAM_TRP_OFFS) |
+ ((tRrd - 1) << SDRAM_TRRD_OFFS) |
+ ((tRcd - 1) << SDRAM_TRCD_OFFS) |
+ ((tRas - 1) << SDRAM_TRAS_OFFS) |
+ ((tWr - 1) << SDRAM_TWR_OFFS) |
+ ((tWtr - 1) << SDRAM_TWTR_OFFS) |
+ ((tRtp - 1) << SDRAM_TRTP_OFFS));
+}
+
+/*******************************************************************************
+* sdramTimeCtrlHighRegCalc - Calculate sdram timing control high register
+*
+* DESCRIPTION:
+* This function calculates sdram timing control high register
+* optimized value based on the bank info parameters and the bus clock.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+* busClk - Bus clock
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram timinf control high reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramTimeCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo,
+ MV_U32 busClk)
+{
+ MV_U32 tRfc;
+ MV_U32 timeNs = 0;
+ int bankNum;
+ MV_U32 sdramTw2wCyc = 0;
+
+ busClk = busClk / 1000000; /* In MHz */
+
+ /* tRfc is different for DDR1 and DDR2. */
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+ {
+ MV_U32 bankNum;
+
+ /* Scan all DRAM banks to find maximum timing values */
+ for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ timeNs = MV_MAX(timeNs, pBankInfo[bankNum].minRefreshToActiveCmd);
+ }
+ else
+ {
+ if (pBankInfo[0].deviceDensity == _1G)
+ {
+ timeNs = SDRAM_TRFC_1G;
+ }
+ else
+ {
+ if (200 == busClk)
+ {
+ timeNs = SDRAM_TRFC_64_512M_AT_200MHZ;
+ }
+ else
+ {
+ timeNs = SDRAM_TRFC_64_512M;
+ }
+ }
+ }
+
+ tRfc = ((busClk * timeNs) / 1000) + (((busClk * timeNs) % 1000) ? 1 : 0);
+
+ DB(mvOsPrintf("Dram Timing High: tRfc = %d\n", tRfc));
+
+
+ /* Represent the populate banks in binary form */
+ for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ if (0 != pBankInfo[bankNum].size)
+ sdramTw2wCyc++;
+ }
+
+ /* If we have more the 1 bank then we need the TW2W in 1 for ODT switch */
+ if (sdramTw2wCyc > 1)
+ sdramTw2wCyc = 1;
+ else
+ sdramTw2wCyc = 0;
+
+ /* Note: value of 0 in register means one cycle, 1 means two and so on */
+ return ((((tRfc - 1) & SDRAM_TRFC_MASK) << SDRAM_TRFC_OFFS) |
+ ((SDRAM_TR2R_CYC - 1) << SDRAM_TR2R_OFFS) |
+ ((SDRAM_TR2WW2R_CYC - 1) << SDRAM_TR2W_W2R_OFFS) |
+ (((tRfc - 1) >> 4) << SDRAM_TRFC_EXT_OFFS) |
+ (sdramTw2wCyc << SDRAM_TW2W_OFFS));
+
+}
+
+/*******************************************************************************
+* sdramDDr2OdtConfig - Set DRAM DDR2 On Die Termination registers.
+*
+* DESCRIPTION:
+* This function config DDR2 On Die Termination (ODT) registers.
+* ODT configuration is done according to DIMM presence:
+*
+* Presence Ctrl Low Ctrl High Dunit Ctrl Ext Mode
+* CS0 0x84210000 0x00000000 0x0000780F 0x00000440
+* CS0+CS1 0x84210000 0x00000000 0x0000780F 0x00000440
+* CS0+CS2 0x030C030C 0x00000000 0x0000740F 0x00000404
+* CS0+CS1+CS2 0x030C030C 0x00000000 0x0000740F 0x00000404
+* CS0+CS2+CS3 0x030C030C 0x00000000 0x0000740F 0x00000404
+* CS0+CS1+CS2+CS3 0x030C030C 0x00000000 0x0000740F 0x00000404
+*
+* INPUT:
+* pBankInfo - bank info parameters.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* None
+*******************************************************************************/
+static void sdramDDr2OdtConfig(MV_DRAM_BANK_INFO *pBankInfo)
+{
+ MV_U32 populateBanks = 0;
+ MV_U32 odtCtrlLow, odtCtrlHigh, dunitOdtCtrl;
+ int bankNum;
+
+ /* Represent the populate banks in binary form */
+ for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ if (0 != pBankInfo[bankNum].size)
+ {
+ populateBanks |= (1 << bankNum);
+ }
+ }
+
+ switch(populateBanks)
+ {
+ case(BANK_PRESENT_CS0):
+ odtCtrlLow = DDR2_ODT_CTRL_LOW_CS0_DV;
+ odtCtrlHigh = DDR2_ODT_CTRL_HIGH_CS0_DV;
+ dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_DV;
+ break;
+ case(BANK_PRESENT_CS0_CS1):
+ odtCtrlLow = DDR2_ODT_CTRL_LOW_CS0_DV;
+ odtCtrlHigh = DDR2_ODT_CTRL_HIGH_CS0_DV;
+ dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_DV;
+ break;
+ case(BANK_PRESENT_CS0_CS2):
+ odtCtrlLow = DDR2_ODT_CTRL_LOW_CS0_CS2_DV;
+ odtCtrlHigh = DDR2_ODT_CTRL_HIGH_CS0_CS2_DV;
+ dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV;
+ break;
+ case(BANK_PRESENT_CS0_CS1_CS2):
+ odtCtrlLow = DDR2_ODT_CTRL_LOW_CS0_CS2_DV;
+ odtCtrlHigh = DDR2_ODT_CTRL_HIGH_CS0_CS2_DV;
+ dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV;
+ break;
+ case(BANK_PRESENT_CS0_CS2_CS3):
+ odtCtrlLow = DDR2_ODT_CTRL_LOW_CS0_CS2_DV;
+ odtCtrlHigh = DDR2_ODT_CTRL_HIGH_CS0_CS2_DV;
+ dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV;
+ break;
+ case(BANK_PRESENT_CS0_CS2_CS3_CS4):
+ odtCtrlLow = DDR2_ODT_CTRL_LOW_CS0_CS2_DV;
+ odtCtrlHigh = DDR2_ODT_CTRL_HIGH_CS0_CS2_DV;
+ dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV;
+ break;
+ default:
+ mvOsPrintf("sdramDDr2OdtConfig: Invalid DRAM bank presence\n");
+ return;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG7, odtCtrlLow);
+ MV_REG_WRITE(DRAM_BUF_REG8, odtCtrlHigh);
+ MV_REG_WRITE(DRAM_BUF_REG9, dunitOdtCtrl);
+ return;
+}
+#endif /* defined(MV_INC_BOARD_DDIM) */
+
+/*******************************************************************************
+* mvDramIfWinSet - Set DRAM interface address decode window
+*
+* DESCRIPTION:
+* This function sets DRAM interface address decode window.
+*
+* INPUT:
+* target - System target. Use only SDRAM targets.
+* pAddrDecWin - SDRAM address window structure.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* MV_BAD_PARAM if parameters are invalid or window is invalid, MV_OK
+* otherwise.
+*******************************************************************************/
+MV_STATUS mvDramIfWinSet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin)
+{
+ MV_U32 baseReg=0,sizeReg=0;
+ MV_U32 baseToReg=0 , sizeToReg=0;
+
+ /* Check parameters */
+ if (!MV_TARGET_IS_DRAM(target))
+ {
+ mvOsPrintf("mvDramIfWinSet: target %d is not SDRAM\n", target);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the requested window overlaps with current enabled windows */
+ if (MV_TRUE == sdramIfWinOverlap(target, &pAddrDecWin->addrWin))
+ {
+ mvOsPrintf("mvDramIfWinSet: ERR. Target %d overlaps\n", target);
+ return MV_BAD_PARAM;
+ }
+
+ /* check if address is aligned to the size */
+ if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+ {
+ mvOsPrintf("mvDramIfWinSet:Error setting DRAM interface window %d."\
+ "\nAddress 0x%08x is unaligned to size 0x%x.\n",
+ target,
+ pAddrDecWin->addrWin.baseLow,
+ pAddrDecWin->addrWin.size);
+ return MV_ERROR;
+ }
+
+ /* read base register*/
+ baseReg = MV_REG_READ(SDRAM_BASE_ADDR_REG(target));
+
+ /* read size register */
+ sizeReg = MV_REG_READ(SDRAM_SIZE_REG(target));
+
+ /* BaseLow[31:16] => base register [31:16] */
+ baseToReg = pAddrDecWin->addrWin.baseLow & SCBAR_BASE_MASK;
+
+ /* Write to address decode Base Address Register */
+ baseReg &= ~SCBAR_BASE_MASK;
+ baseReg |= baseToReg;
+
+ /* Translate the given window size to register format */
+ sizeToReg = ctrlSizeToReg(pAddrDecWin->addrWin.size, SCSR_SIZE_ALIGNMENT);
+
+ /* Size parameter validity check. */
+ if (-1 == sizeToReg)
+ {
+ mvOsPrintf("mvCtrlAddrDecToReg: ERR. Win %d size invalid.\n",target);
+ return MV_BAD_PARAM;
+ }
+
+ /* set size */
+ sizeReg &= ~SCSR_SIZE_MASK;
+ /* Size is located at upper 16 bits */
+ sizeReg |= (sizeToReg << SCSR_SIZE_OFFS);
+
+ /* enable/Disable */
+ if (MV_TRUE == pAddrDecWin->enable)
+ {
+ sizeReg |= SCSR_WIN_EN;
+ }
+ else
+ {
+ sizeReg &= ~SCSR_WIN_EN;
+ }
+
+ /* 3) Write to address decode Base Address Register */
+ MV_REG_WRITE(SDRAM_BASE_ADDR_REG(target), baseReg);
+
+ /* Write to address decode Size Register */
+ MV_REG_WRITE(SDRAM_SIZE_REG(target), sizeReg);
+
+ return MV_OK;
+}
+/*******************************************************************************
+* mvDramIfWinGet - Get DRAM interface address decode window
+*
+* DESCRIPTION:
+* This function gets DRAM interface address decode window.
+*
+* INPUT:
+* target - System target. Use only SDRAM targets.
+*
+* OUTPUT:
+* pAddrDecWin - SDRAM address window structure.
+*
+* RETURN:
+* MV_BAD_PARAM if parameters are invalid or window is invalid, MV_OK
+* otherwise.
+*******************************************************************************/
+MV_STATUS mvDramIfWinGet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin)
+{
+ MV_U32 baseReg,sizeReg;
+ MV_U32 sizeRegVal;
+
+ /* Check parameters */
+ if (!MV_TARGET_IS_DRAM(target))
+ {
+ mvOsPrintf("mvDramIfWinGet: target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ /* Read base and size registers */
+ sizeReg = MV_REG_READ(SDRAM_SIZE_REG(target));
+ baseReg = MV_REG_READ(SDRAM_BASE_ADDR_REG(target));
+
+ sizeRegVal = (sizeReg & SCSR_SIZE_MASK) >> SCSR_SIZE_OFFS;
+
+ pAddrDecWin->addrWin.size = ctrlRegToSize(sizeRegVal,
+ SCSR_SIZE_ALIGNMENT);
+
+ /* Check if ctrlRegToSize returned OK */
+ if (-1 == pAddrDecWin->addrWin.size)
+ {
+ mvOsPrintf("mvDramIfWinGet: size of target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ /* Extract base address */
+ /* Base register [31:16] ==> baseLow[31:16] */
+ pAddrDecWin->addrWin.baseLow = baseReg & SCBAR_BASE_MASK;
+
+ pAddrDecWin->addrWin.baseHigh = 0;
+
+
+ if (sizeReg & SCSR_WIN_EN)
+ {
+ pAddrDecWin->enable = MV_TRUE;
+ }
+ else
+ {
+ pAddrDecWin->enable = MV_FALSE;
+ }
+
+ return MV_OK;
+}
+/*******************************************************************************
+* mvDramIfWinEnable - Enable/Disable SDRAM address decode window
+*
+* DESCRIPTION:
+* This function enable/Disable SDRAM address decode window.
+*
+* INPUT:
+* target - System target. Use only SDRAM targets.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_ERROR in case function parameter are invalid, MV_OK otherewise.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfWinEnable(MV_TARGET target,MV_BOOL enable)
+{
+ MV_DRAM_DEC_WIN addrDecWin;
+
+ /* Check parameters */
+ if (!MV_TARGET_IS_DRAM(target))
+ {
+ mvOsPrintf("mvDramIfWinEnable: target %d is Illigal\n", target);
+ return MV_ERROR;
+ }
+
+ if (enable == MV_TRUE)
+ { /* First check for overlap with other enabled windows */
+ if (MV_OK != mvDramIfWinGet(target, &addrDecWin))
+ {
+ mvOsPrintf("mvDramIfWinEnable:ERR. Getting target %d failed.\n",
+ target);
+ return MV_ERROR;
+ }
+ /* Check for overlapping */
+ if (MV_FALSE == sdramIfWinOverlap(target, &(addrDecWin.addrWin)))
+ {
+ /* No Overlap. Enable address decode winNum window */
+ MV_REG_BIT_SET(SDRAM_SIZE_REG(target), SCSR_WIN_EN);
+ }
+ else
+ { /* Overlap detected */
+ mvOsPrintf("mvDramIfWinEnable: ERR. Target %d overlap detect\n",
+ target);
+ return MV_ERROR;
+ }
+ }
+ else
+ { /* Disable address decode winNum window */
+ MV_REG_BIT_RESET(SDRAM_SIZE_REG(target), SCSR_WIN_EN);
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* sdramIfWinOverlap - Check if an address window overlap an SDRAM address window
+*
+* DESCRIPTION:
+* This function scan each SDRAM address decode window to test if it
+* overlapps the given address windoow
+*
+* INPUT:
+* target - SDRAM target where the function skips checking.
+* pAddrDecWin - The tested address window for overlapping with
+* SDRAM windows.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if the given address window overlaps any enabled address
+* decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL sdramIfWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin)
+{
+ MV_TARGET targetNum;
+ MV_DRAM_DEC_WIN addrDecWin;
+
+ for(targetNum = SDRAM_CS0; targetNum < MV_DRAM_MAX_CS ; targetNum++)
+ {
+ /* don't check our winNum or illegal targets */
+ if (targetNum == target)
+ {
+ continue;
+ }
+
+ /* Get window parameters */
+ if (MV_OK != mvDramIfWinGet(targetNum, &addrDecWin))
+ {
+ mvOsPrintf("sdramIfWinOverlap: ERR. TargetWinGet failed\n");
+ return MV_ERROR;
+ }
+
+ /* Do not check disabled windows */
+ if (MV_FALSE == addrDecWin.enable)
+ {
+ continue;
+ }
+
+ if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &addrDecWin.addrWin))
+ {
+ mvOsPrintf(
+ "sdramIfWinOverlap: Required target %d overlap winNum %d\n",
+ target, targetNum);
+ return MV_TRUE;
+ }
+ }
+
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvDramIfBankSizeGet - Get DRAM interface bank size.
+*
+* DESCRIPTION:
+* This function returns the size of a given DRAM bank.
+*
+* INPUT:
+* bankNum - Bank number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* DRAM bank size. If bank is disabled the function return '0'. In case
+* or paramter is invalid, the function returns -1.
+*
+*******************************************************************************/
+MV_32 mvDramIfBankSizeGet(MV_U32 bankNum)
+{
+ MV_DRAM_DEC_WIN addrDecWin;
+
+ /* Check parameters */
+ if (!MV_TARGET_IS_DRAM(bankNum))
+ {
+ mvOsPrintf("mvDramIfBankBaseGet: bankNum %d is invalid\n", bankNum);
+ return -1;
+ }
+ /* Get window parameters */
+ if (MV_OK != mvDramIfWinGet(bankNum, &addrDecWin))
+ {
+ mvOsPrintf("sdramIfWinOverlap: ERR. TargetWinGet failed\n");
+ return -1;
+ }
+
+ if (MV_TRUE == addrDecWin.enable)
+ {
+ return addrDecWin.addrWin.size;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+
+/*******************************************************************************
+* mvDramIfSizeGet - Get DRAM interface total size.
+*
+* DESCRIPTION:
+* This function get the DRAM total size.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* DRAM total size. In case or paramter is invalid, the function
+* returns -1.
+*
+*******************************************************************************/
+MV_32 mvDramIfSizeGet(MV_VOID)
+{
+ MV_U32 totalSize = 0, bankSize = 0, bankNum;
+
+ for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ bankSize = mvDramIfBankSizeGet(bankNum);
+
+ if (-1 == bankSize)
+ {
+ mvOsPrintf("Dram: mvDramIfSizeGet error with bank %d \n",bankNum);
+ return -1;
+ }
+ else
+ {
+ totalSize += bankSize;
+ }
+ }
+
+ DB(mvOsPrintf("Dram: Total DRAM size is 0x%x \n",totalSize));
+
+ return totalSize;
+}
+
+/*******************************************************************************
+* mvDramIfBankBaseGet - Get DRAM interface bank base.
+*
+* DESCRIPTION:
+* This function returns the 32 bit base address of a given DRAM bank.
+*
+* INPUT:
+* bankNum - Bank number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* DRAM bank size. If bank is disabled or paramter is invalid, the
+* function returns -1.
+*
+*******************************************************************************/
+MV_32 mvDramIfBankBaseGet(MV_U32 bankNum)
+{
+ MV_DRAM_DEC_WIN addrDecWin;
+
+ /* Check parameters */
+ if (!MV_TARGET_IS_DRAM(bankNum))
+ {
+ mvOsPrintf("mvDramIfBankBaseGet: bankNum %d is invalid\n", bankNum);
+ return -1;
+ }
+ /* Get window parameters */
+ if (MV_OK != mvDramIfWinGet(bankNum, &addrDecWin))
+ {
+ mvOsPrintf("sdramIfWinOverlap: ERR. TargetWinGet failed\n");
+ return -1;
+ }
+
+ if (MV_TRUE == addrDecWin.enable)
+ {
+ return addrDecWin.addrWin.baseLow;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h
new file mode 100644
index 000000000..8bfa3e883
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h
@@ -0,0 +1,179 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfh
+#define __INCmvDramIfh
+
+/* includes */
+#include "ddr1_2/mvDramIfRegs.h"
+#include "ddr1_2/mvDramIfConfig.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* defines */
+/* DRAM Timing parameters */
+#define SDRAM_TWR 15 /* ns tWr */
+#define SDRAM_TRFC_64_512M_AT_200MHZ 70 /* ns tRfc for dens 64-512 @ 200MHz */
+#define SDRAM_TRFC_64_512M 75 /* ns tRfc for dens 64-512 */
+#define SDRAM_TRFC_1G 120 /* ns tRfc for dens 1GB */
+#define SDRAM_TR2R_CYC 1 /* cycle for tR2r */
+#define SDRAM_TR2WW2R_CYC 1 /* cycle for tR2wW2r */
+
+/* typedefs */
+
+/* enumeration for memory types */
+typedef enum _mvMemoryType
+{
+ MEM_TYPE_SDRAM,
+ MEM_TYPE_DDR1,
+ MEM_TYPE_DDR2
+}MV_MEMORY_TYPE;
+
+/* enumeration for DDR1 supported CAS Latencies */
+typedef enum _mvDimmDdr1Cas
+{
+ DDR1_CL_1_5 = 0x02,
+ DDR1_CL_2 = 0x04,
+ DDR1_CL_2_5 = 0x08,
+ DDR1_CL_3 = 0x10,
+ DDR1_CL_4 = 0x40,
+ DDR1_CL_FAULT
+} MV_DIMM_DDR1_CAS;
+
+/* enumeration for DDR2 supported CAS Latencies */
+typedef enum _mvDimmDdr2Cas
+{
+ DDR2_CL_3 = 0x08,
+ DDR2_CL_4 = 0x10,
+ DDR2_CL_5 = 0x20,
+ DDR2_CL_FAULT
+} MV_DIMM_DDR2_CAS;
+
+
+typedef struct _mvDramBankInfo
+{
+ MV_MEMORY_TYPE memoryType; /* DDR1, DDR2 or SDRAM */
+
+ /* DIMM dimensions */
+ MV_U32 numOfRowAddr;
+ MV_U32 numOfColAddr;
+ MV_U32 dataWidth;
+ MV_U32 errorCheckType; /* ECC , PARITY..*/
+ MV_U32 sdramWidth; /* 4,8,16 or 32 */
+ MV_U32 errorCheckDataWidth; /* 0 - no, 1 - Yes */
+ MV_U32 burstLengthSupported;
+ MV_U32 numOfBanksOnEachDevice;
+ MV_U32 suportedCasLatencies;
+ MV_U32 refreshInterval;
+
+ /* DIMM timing parameters */
+ MV_U32 minCycleTimeAtMaxCasLatPs;
+ MV_U32 minCycleTimeAtMaxCasLatMinus1Ps;
+ MV_U32 minCycleTimeAtMaxCasLatMinus2Ps;
+ MV_U32 minRowPrechargeTime;
+ MV_U32 minRowActiveToRowActive;
+ MV_U32 minRasToCasDelay;
+ MV_U32 minRasPulseWidth;
+ MV_U32 minWriteRecoveryTime; /* DDR2 only */
+ MV_U32 minWriteToReadCmdDelay; /* DDR2 only */
+ MV_U32 minReadToPrechCmdDelay; /* DDR2 only */
+ MV_U32 minRefreshToActiveCmd; /* DDR2 only */
+
+ /* Parameters calculated from the extracted DIMM information */
+ MV_U32 size;
+ MV_U32 deviceDensity; /* 16,64,128,256 or 512 Mbit */
+ MV_U32 numberOfDevices;
+
+ /* DIMM attributes (MV_TRUE for yes) */
+ MV_BOOL registeredAddrAndControlInputs;
+
+}MV_DRAM_BANK_INFO;
+
+/* This structure describes CPU interface address decode window */
+typedef struct _mvDramIfDecWin
+{
+ MV_ADDR_WIN addrWin; /* An address window*/
+ MV_BOOL enable; /* Address decode window is enabled/disabled */
+}MV_DRAM_DEC_WIN;
+
+#include "ddr1_2/mvDram.h"
+
+/* mvDramIf.h API list */
+MV_VOID mvDramIfBasicAsmInit(MV_VOID);
+MV_STATUS mvDramIfDetect(MV_U32 forcedCl);
+MV_VOID _mvDramIfConfig(MV_VOID);
+
+MV_STATUS mvDramIfWinSet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvDramIfWinGet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvDramIfWinEnable(MV_TARGET target,MV_BOOL enable);
+MV_32 mvDramIfBankSizeGet(MV_U32 bankNum);
+MV_32 mvDramIfBankBaseGet(MV_U32 bankNum);
+MV_32 mvDramIfSizeGet(MV_VOID);
+
+#if 0
+MV_STATUS mvDramIfMbusCtrlSet(MV_XBAR_TARGET *pPizzaArbArray);
+MV_STATUS mvDramIfMbusToutSet(MV_U32 timeout, MV_BOOL enable);
+#endif
+
+#endif /* __INCmvDramIfh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S
new file mode 100644
index 000000000..f2a9365c0
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S
@@ -0,0 +1,988 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#define MV_ASMLANGUAGE
+#include "mvSysHwConfig.h"
+#include "mvOsAsm.h"
+#include "mvBoardEnvSpec.h"
+#include "mvCpuIfRegs.h"
+#include "mvDramIfConfig.h"
+#include "mvDramIfRegs.h"
+#include "pex/mvPexRegs.h"
+#include "pci/mvPciRegs.h"
+#include "mvCtrlEnvSpec.h"
+#include "mvCtrlEnvAsm.h"
+#include "cpu/mvCpuArm.h"
+#include "mvCommon.h"
+
+/* defines */
+
+#if !defined(MV_INC_BOARD_DDIM)
+.globl dramBoot1
+dramBoot1:
+ .word 0
+
+/******************************************************************************
+*
+*
+*
+*
+*******************************************************************************/
+#if defined(DB_PRPMC) || defined(DB_PEX_PCI) || defined(DB_MNG)
+
+/* PEX_PCI and PRPMC boards 256 MB*/
+#define STATIC_SDRAM0_BANK0_SIZE 0x0fff0001
+#define STATIC_SDRAM_CONFIG 0x03248400
+#define STATIC_SDRAM_MODE 0x62
+#define STATIC_DUNIT_CTRL_LOW 0x4041000
+#define STATIC_SDRAM_ADDR_CTRL 0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x11602220
+#define STATIC_SDRAM_TIME_CTRL_HI 0x0000030F
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x0
+#define STATIC_SDRAM_ODT_CTRL_HI 0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0
+#define STATIC_SDRAM_EXT_MODE 0x0
+
+#elif defined(DB_FPGA)
+
+/* FPGA DC boards 256 MB*/
+#define STATIC_SDRAM0_BANK0_SIZE 0x0fff0001
+#define STATIC_SDRAM_CONFIG 0x03208400 /* 32bit */
+#define STATIC_SDRAM_MODE 0x22
+#define STATIC_DUNIT_CTRL_LOW 0x03041000
+#define STATIC_SDRAM_ADDR_CTRL 0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x11112220
+#define STATIC_SDRAM_TIME_CTRL_HI 0x0000000D
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x0
+#define STATIC_SDRAM_ODT_CTRL_HI 0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0
+#define STATIC_SDRAM_EXT_MODE 0x1
+
+#elif defined(RD_88F6183GP) || defined(DB_CUSTOMER)
+
+/* Customer 1 DDR2 2 devices 512Mbit by 16 bit */
+#define STATIC_SDRAM0_BANK0_SIZE 0x07ff0001
+#define STATIC_SDRAM_CONFIG 0x03158400
+#define STATIC_SDRAM_MODE 0x452
+#define STATIC_DUNIT_CTRL_LOW 0x06041000
+#define STATIC_SDRAM_ADDR_CTRL 0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x11912220
+#define STATIC_SDRAM_TIME_CTRL_HI 0x00000502
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x00010000
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000002
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x00000601
+#define STATIC_SDRAM_EXT_MODE 0x00000440
+
+
+#elif defined(RD_88F6183AP)
+
+/* DDR2 1 devices 512Mbit by 16 bit */
+#define STATIC_SDRAM0_BANK0_SIZE 0x03ff0001
+#define STATIC_SDRAM_CONFIG 0x1f154400
+#define STATIC_SDRAM_MODE 0x432
+#define STATIC_DUNIT_CTRL_LOW 0x04041000
+#define STATIC_SDRAM_ADDR_CTRL 0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x11912220
+#define STATIC_SDRAM_TIME_CTRL_HI 0x00000502
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x00010000
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000002
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x00000601
+#define STATIC_SDRAM_EXT_MODE 0x00000440
+
+/* 6082L MARVELL DIMM */
+#elif defined(DB_88F6082LBP)
+#define STATIC_SDRAM0_BANK0_SIZE 0x07ff0001
+#define STATIC_SDRAM_CONFIG 0x7f158400
+#define STATIC_SDRAM_MODE 0x432
+#define STATIC_DUNIT_CTRL_LOW 0x04041040
+#define STATIC_SDRAM_ADDR_CTRL 0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x11612220
+#define STATIC_SDRAM_TIME_CTRL_HI 0x00000501
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x00010000
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000002
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x00000a01
+#define STATIC_SDRAM_EXT_MODE 0x00000440
+
+#elif defined(RD_88W8660_AP82S)
+
+/* Shark RD */
+
+#if defined(MV_DRAM_32M)
+#define STATIC_SDRAM0_BANK0_SIZE 0x01ff0001
+#define STATIC_SDRAM_ADDR_CTRL 0x00000010
+#elif defined(MV_DRAM_16M)
+
+#define STATIC_SDRAM0_BANK0_SIZE 0x00ff0001
+#define STATIC_SDRAM_ADDR_CTRL 0x00000000
+
+#else
+#error "NO DDR size selected"
+#endif
+
+#define STATIC_SDRAM_CONFIG 0x03144400
+#define STATIC_SDRAM_MODE 0x62
+#define STATIC_DUNIT_CTRL_LOW 0x4041000
+
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x11602220
+#define STATIC_SDRAM_TIME_CTRL_HI 0x0000040b
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x0
+#define STATIC_SDRAM_ODT_CTRL_HI 0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0
+#define STATIC_SDRAM_EXT_MODE 0x0
+
+#elif defined(RD_88W8660)
+
+/* Shark RD */
+#define STATIC_SDRAM0_BANK0_SIZE 0x03ff0001
+#define STATIC_SDRAM_CONFIG 0x03144400
+#define STATIC_SDRAM_MODE 0x62
+#define STATIC_DUNIT_CTRL_LOW 0x4041000
+#define STATIC_SDRAM_ADDR_CTRL 0x00000010
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x11602220
+#define STATIC_SDRAM_TIME_CTRL_HI 0x0000040b
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x0
+#define STATIC_SDRAM_ODT_CTRL_HI 0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0
+#define STATIC_SDRAM_EXT_MODE 0x0
+
+#else /* NAS */
+
+
+#if defined(RD_88F5182)
+
+#if defined(MV_88F5082)
+#define STATIC_SDRAM0_BANK0_SIZE 0x3ff0001
+#define STATIC_SDRAM_ADDR_CTRL 0x20
+#else
+#define STATIC_SDRAM0_BANK0_SIZE 0x7ff0001
+#define STATIC_SDRAM_ADDR_CTRL 0x20
+#endif
+
+#elif defined(RD_88F5182_3)
+
+#if defined(MV_88F5082)
+#define STATIC_SDRAM0_BANK0_SIZE 0x3ff0001
+#define STATIC_SDRAM_ADDR_CTRL 0x20
+#else
+#define STATIC_SDRAM0_BANK0_SIZE 0x7ff0001
+#define STATIC_SDRAM_ADDR_CTRL 0x20
+#endif
+
+#else
+
+#define STATIC_SDRAM0_BANK0_SIZE 0x1ff0001
+#define STATIC_SDRAM_ADDR_CTRL 0x0
+
+#endif
+
+#if defined(MV_88F5082)
+#define STATIC_SDRAM_CONFIG 0x3144400
+#else
+#define STATIC_SDRAM_CONFIG 0x3148400
+#endif
+#define STATIC_SDRAM_MODE 0x62
+#define STATIC_DUNIT_CTRL_LOW 0x4041000
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x11602220
+#define STATIC_SDRAM_TIME_CTRL_HI 0x40c
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x0
+#define STATIC_SDRAM_ODT_CTRL_HI 0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0
+#define STATIC_SDRAM_EXT_MODE 0x0
+
+#endif
+
+ .globl _mvDramIfStaticInit
+_mvDramIfStaticInit:
+
+ mov r11, LR /* Save link register */
+ mov r10, r2
+
+ /* If we boot from NAND jump to DRAM sddress */
+
+ mov r5, #1
+ ldr r6, =dramBoot1
+ str r5, [r6] /* We started executing from DRAM */
+
+ ldr r6, dramBoot1
+ cmp r6, #0
+ bne 1f
+
+
+ /* set all dram windows to 0 */
+ mov r6, #0
+ MV_REG_WRITE_ASM(r6, r5, 0x1504)
+ MV_REG_WRITE_ASM(r6, r5, 0x150c)
+ MV_REG_WRITE_ASM(r6, r5, 0x1514)
+ MV_REG_WRITE_ASM(r6, r5, 0x151c)
+
+ /* set all dram configuration in temp registers */
+ ldr r6, = STATIC_SDRAM0_BANK0_SIZE
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG0)
+ ldr r6, = STATIC_SDRAM_CONFIG
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG1)
+ ldr r6, = STATIC_SDRAM_MODE
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG2)
+ ldr r6, = STATIC_DUNIT_CTRL_LOW
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG3)
+ ldr r6, = STATIC_SDRAM_ADDR_CTRL
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG4)
+ ldr r6, = STATIC_SDRAM_TIME_CTRL_LOW
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG5)
+ ldr r6, = STATIC_SDRAM_TIME_CTRL_HI
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG6)
+ ldr r6, = STATIC_SDRAM_ODT_CTRL_LOW
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG7)
+ ldr r6, = STATIC_SDRAM_ODT_CTRL_HI
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG8)
+ ldr r6, = STATIC_SDRAM_DUNIT_ODT_CTRL
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG9)
+ ldr r6, = STATIC_SDRAM_EXT_MODE
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG10)
+
+ mov sp, #0
+ bl _mvDramIfConfig
+1:
+ mov r2, r10
+ mov PC, r11 /* r11 is saved link register */
+
+#else /* #if !defined(MV_INC_BOARD_DDIM) */
+
+.globl dramBoot1
+dramBoot1:
+ .word 0
+
+/*******************************************************************************
+* mvDramIfBasicInit - Basic initialization of DRAM interface
+*
+* DESCRIPTION:
+* The function will initialize the DRAM for basic usage. The function
+* will use the TWSI assembly API to extract DIMM parameters according
+* to which DRAM interface will be initialized.
+* The function referes to the following DRAM parameters:
+* 1) DIMM is registered or not.
+* 2) DIMM width detection.
+* 3) DIMM density.
+*
+* INPUT:
+* r3 - required size for initial DRAM.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+* Note:
+* r4 holds I2C EEPROM address
+* r5 holds SDRAM register base address
+* r7 holds returned values
+* r8 holds SDRAM various configuration registers value.
+* r11 holds return function address.
+*******************************************************************************/
+/* Setting the offsets of the I2C registers */
+#define NUM_OF_ROWS_OFFSET 3
+#define NUM_OF_COLS_OFFSET 4
+#define NUM_OF_RANKS 5
+#define SDRAM_WIDTH_OFFSET 13
+#define NUM_OF_BANKS_OFFSET 17
+#define SUPPORTED_CL_OFFSET 18
+#define DIMM_TYPE_INFO_OFFSET 20 /* DDR2 only */
+#define SDRAM_MODULES_ATTR_OFFSET 21
+
+#define DRAM_DEV_DENSITY_128M 0x080
+#define DRAM_DEV_DENSITY_256M 0x100
+#define DRAM_DEV_DENSITY_512M 0x200
+ .globl _mvDramIfBasicInit
+ .extern _i2cInit
+
+_mvDramIfBasicInit:
+
+ mov r11, LR /* Save link register */
+
+ mov r5, #1
+ ldr r8, =dramBoot1
+ str r5, [r8] /* We started executing from DRAM */
+
+ /* If we boot from NAND jump to DRAM sddress */
+ ldr r8, dramBoot1
+ cmp r8, #0
+ movne pc, r11
+
+
+
+ bl _i2cInit /* Initialize TWSI master */
+
+ /* Get default SDRAM Config values */
+ MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+ bic r8, r8, #SDRAM_DCFG_MASK
+
+
+ /* Read device ID */
+ MV_CTRL_MODEL_GET_ASM(r4, r5);
+
+ /* Return if OrionN */
+ ldr r5, =MV_5180_DEV_ID
+ cmp r4, r5
+ beq cat_through_end
+
+ /* Return if Orion1 */
+ ldr r5, =MV_5181_DEV_ID
+ cmp r4, r5
+ beq cat_through_end
+
+ /* Return if Nas */
+ ldr r5, =MV_5182_DEV_ID
+ cmp r4, r5
+ beq cat_through_end
+
+ /* Return if Shark */
+ ldr r5, =MV_8660_DEV_ID
+ cmp r4, r5
+ beq cat_through_end
+
+ /* goto calcConfigReg if bigger than Orion2*/
+ ldr r5, =MV_5281_DEV_ID
+ cmp r4, r5
+ bne cat_through
+
+cat_through:
+ /* set cat through - for better performance - in orion2 b0 and higher*/
+ orr r8, r8, #SDRAM_CATTHR_EN
+
+cat_through_end:
+
+
+ /* Get registered/non registered info from DIMM */
+ bl _is_Registered
+ beq nonRegistered
+
+setRegistered:
+ orr r8, r8, #SDRAM_REGISTERED /* Set registered bit(17) */
+
+nonRegistered:
+ /* Get SDRAM width */
+ bl _get_width
+
+ orr r6, r8, #SDRAM_DCFG_X16_DEV /* x16 devices */
+ cmp r7, #16
+ beq setConfigReg
+
+ orr r6, r8, #SDRAM_DCFG_X8_DEV /* x8 devices */
+ cmp r7, #8
+ beq setConfigReg
+
+ /* This is an error. return */
+ b exit_ddrAutoConfig
+
+setConfigReg:
+ mov r8, r6
+ ldr r6, =SDRAM_CONFIG_DV
+ orr r8, r8, r6 /* Add default settings */
+ mov r6, r8 /* Do not swap r8 content */
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_CONFIG_REG)
+
+ /* Set maximum CL supported by DIMM */
+ bl _get_CAL
+
+ /* r7 is DIMM supported CAS (e.g: 3 --> 0x1C) */
+ clz r6, r7
+ rsb r6, r6, #31 /* r6 = the bit number of MAX CAS supported */
+
+ /* Check the DDR version */
+ tst r8, #SDRAM_DTYPE_DDR2
+ bne casDdr2
+
+casDdr1:
+ ldr r7, =3 /* stBurstDel field value */
+ ldr r8, =0x52 /* Assuming MAX CL = 1.5 */
+ cmp r6, #1 /* If CL = 1.5 break */
+ beq setModeReg
+
+ ldr r7, =3 /* stBurstDel field value */
+ ldr r8, =0x22 /* Assuming MAX CL = 2 */
+ cmp r6, #2 /* If CL = 2 break */
+ beq setModeReg
+
+ ldr r7, =4 /* stBurstDel field value */
+ ldr r8, =0x62 /* Assuming MAX CL = 2.5 */
+ cmp r6, #3 /* If CL = 2.5 break */
+ beq setModeReg
+
+ ldr r7, =4 /* stBurstDel field value */
+ ldr r8, =0x32 /* Assuming MAX CL = 3 */
+ cmp r6, #4 /* If CL = 3 break */
+ beq setModeReg
+
+ ldr r7, =5 /* stBurstDel field value */
+ ldr r8, =0x42 /* Assuming MAX CL = 4 */
+ cmp r6, #6 /* If CL = 4 break */
+ b setModeReg
+
+ b exit_ddrAutoConfig /* This is an error !! */
+
+casDdr2:
+ ldr r7, =4 /* stBurstDel field value */
+ ldr r8, =0x32 /* Assuming MAX CL = 3 */
+ cmp r6, #3 /* If CL = 3 break */
+ beq casDdr2Cont
+
+ ldr r7, =5 /* stBurstDel field value */
+ ldr r8, =0x42 /* Assuming MAX CL = 4 */
+ cmp r6, #4 /* If CL = 4 break */
+ beq casDdr2Cont
+
+ /* CL 5 currently unsupported. We use CL 4 instead */
+ ldr r7, =5 /* stBurstDel field value */
+ ldr r8, =0x42 /* Assuming MAX CL = 5 */
+ cmp r6, #5 /* If CL = 5 break */
+ beq casDdr2Cont
+
+ b exit_ddrAutoConfig /* This is an error !! */
+casDdr2Cont:
+ /* Write recovery for auto-precharge relevant only in DDR2 */
+ orr r8, r8, #0x400 /* Default value */
+
+setModeReg:
+ /* The CPU must not attempt to change the SDRAM Mode register setting */
+ /* prior to DRAM controller completion of the DRAM initialization */
+ /* sequence. To guarantee this restriction, it is recommended that */
+ /* the CPU sets the SDRAM Operation register to NOP command, performs */
+ /* read polling until the register is back in Normal operation value, */
+ /* and then sets SDRAM Mode register to it's new value. */
+
+ /* write 'nop' to SDRAM operation */
+ mov r6, #0x5 /* 'NOP' command */
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_OPERATION_REG)
+
+ /* poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll1:
+ ldr r6, [r5]
+ cmp r6, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll1
+
+ /* Now its safe to write new value to SDRAM Mode register */
+ MV_REG_WRITE_ASM (r8, r5, SDRAM_MODE_REG)
+
+ /* Make the Dunit write the DRAM its new mode */
+ mov r6, #0x3 /* Mode Register Set command */
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_OPERATION_REG)
+
+ /* poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll2:
+ ldr r6, [r5]
+ cmp r6, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll2
+
+ /* Set Dunit control register according to max CL detected */
+ /* If we use registered DIMM, add 1 to stBurstDel */
+ MV_REG_READ_ASM (r6, r5, SDRAM_CONFIG_REG)
+ tst r6, #SDRAM_REGISTERED
+ beq setDunitReg
+ add r7, r7, #1
+
+setDunitReg:
+ ldr r6, =SDRAM_DUNIT_CTRL_LOW_DV
+ orr r6, r6, r7, LSL #SDRAM_ST_BURST_DEL_OFFS
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+
+
+ /* DIMM density configuration*/
+ /* Density = (1 << (rowNum + colNum)) * dramWidth * dramBankNum */
+Density:
+ bl _getDensity
+ mov r8, r7
+ mov r8, r8, LSR #20 /* Move density 20 bits to the right */
+ /* For example 0x10000000 --> 0x1000 */
+
+ mov r6, #0x00
+ cmp r8, #DRAM_DEV_DENSITY_128M
+ beq densCont
+
+ mov r6, #0x10
+ cmp r8, #DRAM_DEV_DENSITY_256M
+ beq densCont
+
+ mov r6, #0x20
+ cmp r8, #DRAM_DEV_DENSITY_512M
+ beq densCont
+
+ /* This is an error. return */
+ b exit_ddrAutoConfig
+
+densCont:
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_ADDR_CTRL_REG)
+
+ /* Config DDR2 registers (Extended mode, ODTs and pad calibration) */
+ MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+ tst r8, #SDRAM_DTYPE_DDR2
+ beq _extModeODTEnd
+
+
+ /* Set DDR Extended Mode register for working with CS[0] */
+ /* write 'nop' to SDRAM operation */
+ mov r6, #0x5 /* 'NOP' command */
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_OPERATION_REG)
+
+ /* poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll3:
+ ldr r6, [r5]
+ cmp r6, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll3
+
+ /* Now its safe to write new value to SDRAM Extended Mode register */
+ ldr r6, =DDR_SDRAM_EXT_MODE_CS0_DV
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_EXTENDED_MODE_REG)
+
+ /* Make the Dunit write the DRAM its new extended mode */
+ mov r6, #0x4 /* Extended Mode Register Set command */
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_OPERATION_REG)
+
+ /* poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll4:
+ ldr r6, [r5]
+ cmp r6, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll4
+
+ /* ODT configuration is done for single bank CS[0] only */
+ /* Config DDR2 On Die Termination (ODT) registers */
+ ldr r6, =DDR2_ODT_CTRL_LOW_CS0_DV
+ MV_REG_WRITE_ASM (r6, r5, DDR2_SDRAM_ODT_CTRL_LOW_REG)
+
+ ldr r6, =DDR2_ODT_CTRL_HIGH_CS0_DV
+ MV_REG_WRITE_ASM (r6, r5, DDR2_SDRAM_ODT_CTRL_HIGH_REG)
+
+ ldr r6, =DDR2_DUNIT_ODT_CTRL_CS0_DV
+ MV_REG_WRITE_ASM (r6, r5, DDR2_DUNIT_ODT_CONTROL_REG)
+
+
+ /* we will check what device we are running and perform
+ Initialization according to device value */
+
+_extModeODTEnd:
+
+ /* Implement Guideline (GL# MEM-2) P_CAL Automatic Calibration */
+ /* Does Not Work for Address/Control and Data Pads. */
+ /* Relevant for: 88F5181-A1/B0 and 88F5281-A0 */
+
+ /* Read device ID */
+ MV_CTRL_MODEL_GET_ASM(r6, r5);
+ /* Read device revision */
+ MV_CTRL_REV_GET_ASM(r8, r5);
+
+ /* Continue if OrionN */
+ ldr r5, =MV_5180_DEV_ID
+ cmp r6, r5
+ bne 1f
+ b glMem2End
+1:
+
+ /* Continue if Orion1 and device revision B1 */
+ ldr r5, =MV_5181_DEV_ID
+ cmp r6, r5
+ bne 1f
+
+ cmp r8, #MV_5181_B1_REV
+ bge glMem2End
+ b glMem2Start
+1:
+
+ /* Orion NAS */
+ ldr r5, =MV_5182_DEV_ID
+ cmp r6, r5
+ beq glMem2Start
+
+ /* Orion Shark */
+ ldr r5, =MV_8660_DEV_ID
+ cmp r6, r5
+ beq glMem2Start
+
+ b glMem2End
+
+glMem2Start:
+
+ /* DDR SDRAM Address/Control Pads Calibration */
+ MV_REG_READ_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+ /* Set Bit [31] to make the register writable */
+ orr r8, r6, #SDRAM_WR_EN
+
+ MV_REG_WRITE_ASM (r8, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+ bic r6, r6, #SDRAM_WR_EN /* Make register read-only */
+ bic r6, r6, #SDRAM_TUNE_EN /* Disable auto calibration */
+ bic r6, r6, #SDRAM_DRVN_MASK /* Clear r5[5:0]<DrvN> */
+ bic r6, r6, #SDRAM_DRVP_MASK /* Clear r5[11:6]<DrvP> */
+
+ /* Get the final N locked value of driving strength [22:17] */
+ mov r5, r6
+ mov r5, r5, LSL #9
+ mov r5, r5, LSR #26 /* r5[5:0]<DrvN> = r6[22:17]<LockN> */
+ orr r5, r5, r5, LSL #6 /* r5[11:6]<DrvP> = r5[5:0]<DrvN> */
+
+ /* Write to both <DrvN> bits [5:0] and <DrvP> bits [11:6] */
+ orr r6, r6, r5
+
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+
+ /* DDR SDRAM Data Pads Calibration */
+ MV_REG_READ_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+
+ /* Set Bit [31] to make the register writable */
+ orr r8, r6, #SDRAM_WR_EN
+
+ MV_REG_WRITE_ASM (r8, r5, SDRAM_DATA_PADS_CAL_REG)
+
+ bic r6, r6, #SDRAM_WR_EN /* Make register read-only */
+ bic r6, r6, #SDRAM_TUNE_EN /* Disable auto calibration */
+ bic r6, r6, #SDRAM_DRVN_MASK /* Clear r5[5:0]<DrvN> */
+ bic r6, r6, #SDRAM_DRVP_MASK /* Clear r5[11:6]<DrvP> */
+
+ /* Get the final N locked value of driving strength [22:17] */
+ mov r5, r6
+ mov r5, r5, LSL #9
+ mov r5, r5, LSR #26
+ orr r5, r5, r5, LSL #6 /* r5[5:0] = r6[22:17]<LockN> */
+
+ /* Write to both <DrvN> bits [5:0] and <DrvP> bits [11:6] */
+ orr r6, r6, r5
+
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+
+glMem2End:
+ /* Implement Guideline (GL# MEM-3) Drive Strength Value */
+ /* Relevant for: 88F5181-A1/B0/B1 and 88F5281-A0/B0 */
+
+ /* Get SDRAM Config value */
+ MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+
+ /* Get DIMM type */
+ tst r8, #SDRAM_DTYPE_DDR2
+ beq ddr1StrengthVal
+
+ddr2StrengthVal:
+ ldr r4, =DDR2_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV
+ ldr r8, =DDR2_DATA_PAD_STRENGTH_TYPICAL_DV
+ b setDrvStrength
+ddr1StrengthVal:
+ ldr r4, =DDR1_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV
+ ldr r8, =DDR1_DATA_PAD_STRENGTH_TYPICAL_DV
+
+setDrvStrength:
+ /* DDR SDRAM Address/Control Pads Calibration */
+ MV_REG_READ_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+ orr r6, r6, #SDRAM_WR_EN /* Make register writeable */
+
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+ HTOLL(r6,r5)
+
+ bic r6, r6, #SDRAM_WR_EN /* Make register read-only */
+ bic r6, r6, #SDRAM_PRE_DRIVER_STRENGTH_MASK
+ orr r6, r4, r6 /* Set default value for DDR */
+
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+
+ /* DDR SDRAM Data Pads Calibration */
+ MV_REG_READ_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+
+ orr r6, r6, #SDRAM_WR_EN /* Make register writeable */
+
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+ HTOLL(r6,r5)
+
+ bic r6, r6, #SDRAM_WR_EN /* Make register read-only */
+ bic r6, r6, #SDRAM_PRE_DRIVER_STRENGTH_MASK
+ orr r6, r8, r6 /* Set default value for DDR */
+
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+
+
+ /* Implement Guideline (GL# MEM-4) DQS Reference Delay Tuning */
+ /* Relevant for: 88F5181-A1/B0/B1 and 88F5281-A0/B0 */
+ /* Get the "sample on reset" register for the DDR frequancy */
+
+#if defined(MV_RUN_FROM_FLASH)
+ /* Calc the absolute address of the _cpuARMDDRCLK[] in the boot flash */
+ ldr r7, = _cpuARMDDRCLK
+ ldr r4, =_start
+ ldr r4, [r4]
+ sub r7, r7, r4
+ ldr r4, = Lrom_start_of_data
+ ldr r4, [r4]
+ add r7, r4, r7
+#else
+ /* Calc the absolute address of the _cpuARMDDRCLK[] in the boot flash */
+ ldr r7, = _cpuARMDDRCLK
+ ldr r4, =_start
+ sub r7, r7, r4
+ add r7, r7, #CFG_MONITOR_BASE
+#endif
+ /* Get the "sample on reset" register for the DDR frequancy */
+ MV_REG_READ_ASM (r4, r5, MPP_SAMPLE_AT_RESET)
+ ldr r5, =MSAR_ARMDDRCLCK_MASK
+ and r5, r4, r5
+#if 0 /* YOTAM TO BE FIX */
+ mov r5, r5, LSR #MSAR_ARMDDRCLCK_OFFS
+#endif
+
+ /* Read device ID */
+ MV_CTRL_MODEL_GET_ASM(r6, r8);
+
+ /* Continue if TC90 */
+ ldr r8, =MV_1281_DEV_ID
+ cmp r6, r6
+ beq armClkMsb
+
+ /* Continue if Orion2 */
+ ldr r8, =MV_5281_DEV_ID
+ cmp r6, r8
+#if 0 /* YOTAM TO BE FIX */
+ bne 1f
+#endif
+
+armClkMsb:
+#if 0 /* YOTAM TO BE FIX */
+ tst r4, #MSAR_ARMDDRCLCK_H_MASK
+ beq 1f
+ orr r5, r5, #BIT4
+1:
+ ldr r4, =MV_CPU_ARM_CLK_ELM_SIZE
+ mul r5, r4, r5
+ add r7, r7, r5
+ add r7, r7, #MV_CPU_ARM_CLK_DDR_OFF
+ ldr r5, [r7]
+#endif
+
+ /* Get SDRAM Config value */
+ MV_REG_READ_ASM (r8, r4, SDRAM_CONFIG_REG)
+
+ /* Get DIMM type */
+ tst r8, #SDRAM_DTYPE_DDR2
+ beq ddr1FtdllVal
+
+ddr2FtdllVal:
+ ldr r4, =FTDLL_DDR2_250MHZ
+ ldr r7, =_250MHz
+ cmp r5, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_200MHZ
+ ldr r7, =_200MHz
+ cmp r5, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_166MHZ
+ ldr r7, =_166MHz
+ cmp r5, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_133MHZ
+ b setFtdllReg
+
+ddr1FtdllVal:
+ ldr r4, =FTDLL_DDR1_200MHZ
+ ldr r7, =_200MHz
+ cmp r5, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR1_166MHZ
+ ldr r7, =_166MHz
+ cmp r5, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR1_133MHZ
+ ldr r7, =_133MHz
+ cmp r5, r7
+ beq setFtdllReg
+ ldr r4, =0
+
+setFtdllReg:
+
+#if !defined(MV_88W8660) && !defined(MV_88F6183) && !defined(MV_88F6183L)
+ MV_REG_READ_ASM (r8, r5, SDRAM_FTDLL_CONFIG_REG)
+ orr r8, r8, r4
+ MV_REG_WRITE_ASM (r8, r5, SDRAM_FTDLL_CONFIG_REG)
+ bic r8, r8, #1
+ MV_REG_WRITE_ASM (r8, r5, SDRAM_FTDLL_CONFIG_REG)
+#endif /* !defined(MV_88W8660) && !defined(MV_88F6183) && !defined(MV_88F6183L)*/
+
+
+setTimingReg:
+ /* Set default Timing parameters */
+ MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+ tst r8, #SDRAM_DTYPE_DDR2
+ bne ddr2TimeParam
+
+ddr1TimeParam:
+ ldr r6, =DDR1_TIMING_LOW_DV
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_TIMING_CTRL_LOW_REG)
+ ldr r6, =DDR1_TIMING_HIGH_DV
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_TIMING_CTRL_HIGH_REG)
+ b timeParamDone
+
+ddr2TimeParam:
+ ldr r6, =DDR2_TIMING_LOW_DV
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_TIMING_CTRL_LOW_REG)
+ ldr r6, =DDR2_TIMING_HIGH_DV
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_TIMING_CTRL_HIGH_REG)
+
+timeParamDone:
+ /* Open CS[0] window to requested size and enable it. Disable other */
+ /* windows */
+ ldr r6, =SCBAR_BASE_MASK
+ sub r3, r3, #1
+ and r3, r3, r6
+ orr r3, r3, #1 /* Enable bank */
+ MV_REG_WRITE_ASM (r3, r5, SDRAM_SIZE_REG(0))
+ ldr r6, =0
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(1))
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(2))
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(3))
+
+exit_ddrAutoConfig:
+ mov PC, r11 /* r11 is saved link register */
+
+
+/***************************************************************************************/
+/* r4 holds I2C EEPROM address
+ * r7 holds I2C EEPROM offset parameter for i2cRead and its --> returned value
+ * r8 holds SDRAM various configuration registers value.
+ * r13 holds Link register
+ */
+/**************************/
+_getDensity:
+ mov r13, LR /* Save link register */
+
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #NUM_OF_ROWS_OFFSET /* offset 3 */
+ bl _i2cRead
+ mov r8, r7 /* r8 save number of rows */
+
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #NUM_OF_COLS_OFFSET /* offset 4 */
+ bl _i2cRead
+ add r8, r8, r7 /* r8 = number of rows + number of col */
+
+ mov r7, #0x1
+ mov r8, r7, LSL r8 /* r8 = (1 << r8) */
+
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #SDRAM_WIDTH_OFFSET /* offset 13 */
+ bl _i2cRead
+ mul r8, r7, r8
+
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #NUM_OF_BANKS_OFFSET /* offset 17 */
+ bl _i2cRead
+ mul r7, r8, r7
+
+ mov PC, r13
+
+/**************************/
+_get_width:
+ mov r13, LR /* Save link register */
+
+ /* Get SDRAM width (SPD offset 13) */
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #SDRAM_WIDTH_OFFSET
+ bl _i2cRead /* result in r7 */
+
+ mov PC, r13
+
+/**************************/
+_get_CAL:
+ mov r13, LR /* Save link register */
+
+ /* Set maximum CL supported by DIMM */
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #SUPPORTED_CL_OFFSET /* offset 18 */
+ bl _i2cRead
+
+ mov PC, r13
+
+/**************************/
+/* R8 - sdram configuration register.
+ * Return value in flag if no-registered then Z-flag is set
+ */
+_is_Registered:
+ mov r13, LR /* Save link register */
+
+ /* Get registered/non registered info from DIMM */
+ tst r8, #SDRAM_DTYPE_DDR2
+ bne regDdr2
+
+regDdr1:
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #SDRAM_MODULES_ATTR_OFFSET
+ bl _i2cRead /* result in r7 */
+ tst r7, #0x2
+ b exit
+regDdr2:
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #DIMM_TYPE_INFO_OFFSET
+ bl _i2cRead /* result in r7 */
+ tst r7, #0x11 /* DIMM type = regular RDIMM (0x01) */
+ /* or Mini-RDIMM (0x10) */
+exit:
+ mov PC, r13
+
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.S b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.S
new file mode 100644
index 000000000..e34ebbf29
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.S
@@ -0,0 +1,668 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvDramIfBasicAsm.s
+*
+* DESCRIPTION:
+* Memory full detection and best timing configuration is done in
+* C code. C runtime environment requires a stack. This module API
+* initialize DRAM interface chip select 0 for basic functionality for
+* the use of stack.
+* The module API assumes DRAM information is stored in I2C EEPROM reside
+* in a given I2C address MV_BOARD_DIMM0_I2C_ADDR. The I2C EEPROM
+* internal data structure is assumed to be orgenzied in common DRAM
+* vendor SPD structure.
+* NOTE: DFCDL values are assumed to be already initialized prior to
+* this module API activity.
+*
+*
+* DEPENDENCIES:
+* None.
+*
+*******************************************************************************/
+
+/* includes */
+#define MV_ASMLANGUAGE
+#include "mvOsAsm.h"
+#include "mvSysHwConfig.h"
+#include "mvDramIfRegs.h"
+#include "mvDramIfConfig.h"
+#include "mvCpuIfRegs.h"
+#include "pex/mvPexRegs.h"
+#include "pci/mvPciRegs.h"
+#include "mvCtrlEnvSpec.h"
+#include "mvCtrlEnvAsm.h"
+#include "cpu/mvCpuArm.h"
+#include "mvCommon.h"
+
+/* defines */
+
+/* locals */
+.data
+.globl _mvDramIfConfig
+
+.text
+
+/*******************************************************************************
+* _mvDramIfConfig - Basic DRAM interface initialization.
+*
+* DESCRIPTION:
+* The function will initialize the following DRAM parameters using the
+* values prepared by mvDramIfDetect routine. Values are located
+* in predefined registers.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+
+_mvDramIfConfig:
+
+ /* Save register on stack */
+ cmp sp, #0
+ beq no_stack_s
+save_on_stack:
+ stmdb sp!, {r1, r2, r3, r4, r7, r11}
+no_stack_s:
+
+ /* 1) Write to SDRAM coniguration register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG1)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_CONFIG_REG)
+ str r4, [r1]
+
+ /* 2) Write Dunit control low register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG3)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_DUNIT_CTRL_REG)
+ str r4, [r1]
+
+ /* 3) Write SDRAM address control register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG4)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_ADDR_CTRL_REG)
+ str r4, [r1]
+
+ /* 4) Write SDRAM bank 0 size register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG0)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_SIZE_REG(0))
+ str r4, [r1]
+
+ /* 5) Write SDRAM open pages control register */
+ ldr r1, =(INTER_REGS_BASE + SDRAM_OPEN_PAGE_CTRL_REG)
+ ldr r4, =SDRAM_OPEN_PAGES_CTRL_REG_DV
+ str r4, [r1]
+
+ /* 6) Write SDRAM timing Low register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG5)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_TIMING_CTRL_LOW_REG)
+ str r4, [r1]
+
+ /* 7) Write SDRAM timing High register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG6)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_TIMING_CTRL_HIGH_REG)
+ str r4, [r1]
+
+ /* 8) Write SDRAM mode register */
+ /* The CPU must not attempt to change the SDRAM Mode register setting */
+ /* prior to DRAM controller completion of the DRAM initialization */
+ /* sequence. To guarantee this restriction, it is recommended that */
+ /* the CPU sets the SDRAM Operation register to NOP command, performs */
+ /* read polling until the register is back in Normal operation value, */
+ /* and then sets SDRAM Mode register to it’s new value. */
+
+ /* 8.1 write 'nop' to SDRAM operation */
+ mov r4, #0x5 /* 'NOP' command */
+ MV_REG_WRITE_ASM(r4, r1, SDRAM_OPERATION_REG)
+
+ /* 8.2 poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll1:
+ ldr r4, [r1]
+ cmp r4, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll1
+
+ /* 8.3 Now its safe to write new value to SDRAM Mode register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG2)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_MODE_REG)
+ str r4, [r1]
+
+ /* 8.4 Make the Dunit write the DRAM its new mode */
+ mov r4, #0x3 /* Mode Register Set command */
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+ /* 8.5 poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll2:
+ ldr r4, [r1]
+ cmp r4, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll2
+
+#ifndef DB_FPGA
+ /* Config DDR2 registers (Extended mode, ODTs and pad calibration) */
+ MV_REG_READ_ASM (r4, r1, SDRAM_CONFIG_REG)
+ tst r4, #SDRAM_DTYPE_DDR2
+ beq _extModeODTEnd
+#endif /* DB_FPGA */
+
+ /* 9) Write SDRAM Extended mode register This operation should be */
+ /* done for each memory bank */
+ /* write 'nop' to SDRAM operation */
+ mov r4, #0x5 /* 'NOP' command */
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+ /* poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll3:
+ ldr r4, [r1]
+ cmp r4, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll3
+
+ /* Now its safe to write new value to SDRAM Extended Mode register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG10)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_EXTENDED_MODE_REG)
+ str r4, [r1]
+
+ /* Go over each of the Banks */
+ ldr r3, =0 /* r3 = DRAM bank Num */
+
+extModeLoop:
+ /* Set the SDRAM Operation Control to each of the DRAM banks */
+ mov r2, r3 /* Do not swap the bank counter value */
+ MV_REG_WRITE_ASM (r2, r1, SDRAM_OPERATION_CTRL_REG)
+
+ /* Make the Dunit write the DRAM its new mode */
+ mov r4, #0x4 /* Extended Mode Register Set command */
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+ /* poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll4:
+ ldr r4, [r1]
+ cmp r4, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll4
+#ifndef DB_FPGA
+ add r3, r3, #1
+ cmp r3, #4 /* 4 = Number of banks */
+ bne extModeLoop
+
+extModeEnd:
+ /* Config DDR2 On Die Termination (ODT) registers */
+ /* Write SDRAM DDR2 ODT control low register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG7)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + DDR2_SDRAM_ODT_CTRL_LOW_REG)
+ str r4, [r1]
+
+ /* Write SDRAM DDR2 ODT control high register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG8)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + DDR2_SDRAM_ODT_CTRL_HIGH_REG)
+ str r4, [r1]
+
+ /* Write SDRAM DDR2 Dunit ODT control register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG9)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + DDR2_DUNIT_ODT_CONTROL_REG)
+ str r4, [r1]
+
+#endif /* DB_FPGA */
+_extModeODTEnd:
+#ifndef DB_FPGA
+ /* Implement Guideline (GL# MEM-2) P_CAL Automatic Calibration */
+ /* Does Not Work for Address/Control and Data Pads. */
+ /* Relevant for: 88F5181-A1/B0 and 88F5281-A0 */
+
+ /* Read device ID */
+ MV_CTRL_MODEL_GET_ASM(r3, r1);
+ /* Read device revision */
+ MV_CTRL_REV_GET_ASM(r2, r1);
+
+ /* Continue if OrionN */
+ ldr r1, =MV_5180_DEV_ID
+ cmp r3, r1
+ bne 1f
+ b glMem2End
+1:
+ /* Continue if Orion1 and device revision B1 */
+ ldr r1, =MV_5181_DEV_ID
+ cmp r3, r1
+ bne 1f
+
+ cmp r2, #MV_5181_B1_REV
+ bge glMem2End
+ b glMem2Start
+1:
+
+ /* Orion NAS */
+ ldr r1, =MV_5182_DEV_ID
+ cmp r3, r1
+ beq glMem2Start
+
+ /* Orion NAS */
+ ldr r1, =MV_5082_DEV_ID
+ cmp r3, r1
+ beq glMem2Start
+
+ /* Orion Shark */
+ ldr r1, =MV_8660_DEV_ID
+ cmp r3, r1
+ beq glMem2Start
+
+ b glMem2End
+
+glMem2Start:
+
+ /* DDR SDRAM Address/Control Pads Calibration */
+ MV_REG_READ_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+ /* Set Bit [31] to make the register writable */
+ orr r2, r3, #SDRAM_WR_EN
+
+ MV_REG_WRITE_ASM (r2, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+ bic r3, r3, #SDRAM_WR_EN /* Make register read-only */
+ bic r3, r3, #SDRAM_TUNE_EN /* Disable auto calibration */
+ bic r3, r3, #SDRAM_DRVN_MASK /* Clear r1[5:0]<DrvN> */
+ bic r3, r3, #SDRAM_DRVP_MASK /* Clear r1[11:6]<DrvP> */
+
+ /* Get the final N locked value of driving strength [22:17] */
+ mov r1, r3
+ mov r1, r1, LSL #9
+ mov r1, r1, LSR #26 /* r1[5:0]<DrvN> = r3[22:17]<LockN> */
+ orr r1, r1, r1, LSL #6 /* r1[11:6]<DrvP> = r1[5:0]<DrvN> */
+
+ /* Write to both <DrvN> bits [5:0] and <DrvP> bits [11:6] */
+ orr r3, r3, r1
+
+ MV_REG_WRITE_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+
+ /* DDR SDRAM Data Pads Calibration */
+ MV_REG_READ_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+
+ /* Set Bit [31] to make the register writable */
+ orr r2, r3, #SDRAM_WR_EN
+
+ MV_REG_WRITE_ASM (r2, r1, SDRAM_DATA_PADS_CAL_REG)
+
+ bic r3, r3, #SDRAM_WR_EN /* Make register read-only */
+ bic r3, r3, #SDRAM_TUNE_EN /* Disable auto calibration */
+ bic r3, r3, #SDRAM_DRVN_MASK /* Clear r1[5:0]<DrvN> */
+ bic r3, r3, #SDRAM_DRVP_MASK /* Clear r1[11:6]<DrvP> */
+
+ /* Get the final N locked value of driving strength [22:17] */
+ mov r1, r3
+ mov r1, r1, LSL #9
+ mov r1, r1, LSR #26
+ orr r1, r1, r1, LSL #6 /* r1[5:0] = r3[22:17]<LockN> */
+
+ /* Write to both <DrvN> bits [5:0] and <DrvP> bits [11:6] */
+ orr r3, r3, r1
+
+ MV_REG_WRITE_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+
+glMem2End:
+
+
+ /* Implement Guideline (GL# MEM-3) Drive Strength Value */
+ /* Relevant for: 88F5181-A1/B0/B1, 88F5281-A0/B0/C/D, 88F5182, */
+ /* 88F5082, 88F5181L, 88F6082/L, 88F6183, 88F6183L */
+
+ /* Get SDRAM Config value */
+ MV_REG_READ_ASM (r2, r1, SDRAM_CONFIG_REG)
+
+ /* Get DIMM type */
+ tst r2, #SDRAM_DTYPE_DDR2
+ beq ddr1StrengthVal
+
+ddr2StrengthVal:
+ ldr r4, =DDR2_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV
+ ldr r2, =DDR2_DATA_PAD_STRENGTH_TYPICAL_DV
+ b setDrvStrength
+ddr1StrengthVal:
+ ldr r4, =DDR1_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV
+ ldr r2, =DDR1_DATA_PAD_STRENGTH_TYPICAL_DV
+
+setDrvStrength:
+ /* DDR SDRAM Address/Control Pads Calibration */
+ MV_REG_READ_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+ orr r3, r3, #SDRAM_WR_EN /* Make register writeable */
+
+ MV_REG_WRITE_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+ HTOLL(r3,r1)
+
+ bic r3, r3, #SDRAM_WR_EN /* Make register read-only */
+ bic r3, r3, #SDRAM_PRE_DRIVER_STRENGTH_MASK
+ orr r3, r4, r3 /* Set default value for DDR */
+
+ MV_REG_WRITE_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+
+ /* DDR SDRAM Data Pads Calibration */
+ MV_REG_READ_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+
+ orr r3, r3, #SDRAM_WR_EN /* Make register writeable */
+
+ MV_REG_WRITE_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+ HTOLL(r3,r1)
+
+ bic r3, r3, #SDRAM_WR_EN /* Make register read-only */
+ bic r3, r3, #SDRAM_PRE_DRIVER_STRENGTH_MASK
+ orr r3, r2, r3 /* Set default value for DDR */
+
+ MV_REG_WRITE_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+
+#if !defined(MV_88W8660) && !defined(MV_88F6183) && !defined(MV_88F6183L)
+ /* Implement Guideline (GL# MEM-4) DQS Reference Delay Tuning */
+ /* Relevant for: 88F5181-A1/B0/B1 and 88F5281-A0/B0/C/D, 88F5182 */
+ /* 88F5082, 88F5181L, 88F6082/L */
+
+ /* Calc the absolute address of the _cpuARMDDRCLK[] in the boot flash */
+ ldr r7, = _cpuARMDDRCLK
+ ldr r4, =_start
+ sub r7, r7, r4
+ add r7, r7, #CFG_MONITOR_BASE
+
+ /* Get the "sample on reset" register for the DDR frequancy */
+ MV_REG_READ_ASM (r4, r1, MPP_SAMPLE_AT_RESET)
+ ldr r1, =MSAR_ARMDDRCLCK_MASK
+ and r1, r4, r1
+#if 0 /* YOTAM TO BE FIX */
+ mov r1, r1, LSR #MSAR_ARMDDRCLCK_OFFS
+#endif
+
+ /* Read device ID */
+ MV_CTRL_MODEL_GET_ASM(r3, r2);
+
+ /* Continue if TC90 */
+ ldr r2, =MV_1281_DEV_ID
+ cmp r3, r2
+ beq armClkMsb
+
+ /* Continue if Orion2 */
+ ldr r2, =MV_5281_DEV_ID
+ cmp r3, r2
+#if 0 /* YOTAM TO BE FIX */
+ bne 1f
+#endif
+
+armClkMsb:
+#if 0 /* YOTAM TO BE FIX */
+ tst r4, #MSAR_ARMDDRCLCK_H_MASK
+ beq 1f
+ orr r1, r1, #BIT4
+1:
+ ldr r4, =MV_CPU_ARM_CLK_ELM_SIZE
+ mul r1, r4, r1
+ add r7, r7, r1
+ add r7, r7, #MV_CPU_ARM_CLK_DDR_OFF
+ ldr r1, [r7]
+#endif
+
+ /* Get SDRAM Config value */
+ MV_REG_READ_ASM (r2, r4, SDRAM_CONFIG_REG)
+
+ /* Get DIMM type */
+ tst r2, #SDRAM_DTYPE_DDR2
+ beq ddr1FtdllVal
+
+ddr2FtdllVal:
+ ldr r2, =MV_5281_DEV_ID
+ cmp r3, r2
+ bne 2f
+ MV_CTRL_REV_GET_ASM(r3, r2)
+ cmp r3, #MV_5281_D0_REV
+ beq orin2_d0_ddr2_ftdll_val
+ cmp r3, #MV_5281_D1_REV
+ beq orin2_d1_ddr2_ftdll_val
+ cmp r3, #MV_5281_D2_REV
+ beq orin2_d1_ddr2_ftdll_val
+ b ddr2_default_val
+
+/* Set Orion 2 D1 ftdll values for DDR2 */
+orin2_d1_ddr2_ftdll_val:
+ ldr r4, =FTDLL_DDR2_250MHZ_5281_D1
+ ldr r7, =_250MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_200MHZ_5281_D1
+ ldr r7, =_200MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_166MHZ_5281_D0
+ ldr r7, =_166MHz
+ cmp r1, r7
+ beq setFtdllReg
+ b ddr2_default_val
+
+/* Set Orion 2 D0 ftdll values for DDR2 */
+orin2_d0_ddr2_ftdll_val:
+ ldr r4, =FTDLL_DDR2_250MHZ_5281_D0
+ ldr r7, =_250MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_200MHZ_5281_D0
+ ldr r7, =_200MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_166MHZ_5281_D0
+ ldr r7, =_166MHz
+ cmp r1, r7
+ beq setFtdllReg
+ b ddr2_default_val
+2:
+ ldr r2, =MV_5182_DEV_ID
+ cmp r3, r2
+ bne 3f
+
+/* Set Orion nas ftdll values for DDR2 */
+orin_nas_ftdll_val:
+ ldr r4, =FTDLL_DDR2_166MHZ_5182
+ ldr r7, =_166MHz
+ cmp r1, r7
+ beq setFtdllReg
+
+/* default values for all other devices */
+3:
+ddr2_default_val:
+ ldr r4, =FTDLL_DDR2_250MHZ
+ ldr r7, =_250MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_200MHZ
+ ldr r7, =_200MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_166MHZ
+ ldr r7, =_166MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR2_133MHZ
+ ldr r7, =_133MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =0
+ b setFtdllReg
+
+ddr1FtdllVal:
+ ldr r2, =MV_5281_DEV_ID
+ cmp r3, r2
+ bne 2f
+ MV_CTRL_REV_GET_ASM(r3, r2)
+ cmp r3, #MV_5281_D0_REV
+ bge orin2_ddr1_ftdll_val
+ b ddr1_default_val
+
+/* Set Orion 2 D0 and above ftdll values for DDR1 */
+orin2_ddr1_ftdll_val:
+ ldr r4, =FTDLL_DDR1_200MHZ_5281_D0
+ ldr r7, =_200MHz
+ cmp r1, r7
+ beq setFtdllReg
+ ldr r4, =FTDLL_DDR1_166MHZ_5281_D0
+ ldr r7, =_166MHz
+ cmp r1, r7
+ beq setFtdllReg
+ b ddr1_default_val
+2:
+ ldr r2, =MV_5181_DEV_ID
+ cmp r3, r2
+ bne 3f
+ MV_CTRL_REV_GET_ASM(r3, r2)
+ cmp r3, #MV_5181_B1_REV
+ bge orin1_ddr1_ftdll_val
+ b ddr1_default_val
+
+/* Set Orion 1 ftdll values for DDR1 */
+orin1_ddr1_ftdll_val:
+ ldr r4, =FTDLL_DDR1_166MHZ_5181_B1
+ ldr r7, =_166MHz
+ cmp r1, r7
+ beq setFtdllReg
+3:
+ddr1_default_val:
+ ldr r4, =FTDLL_DDR1_133MHZ
+ ldr r7, =_133MHz
+ cmp r1, r7
+ beq setFtdllReg
+
+ ldr r4, =FTDLL_DDR1_166MHZ
+ ldr r7, =_166MHz
+ cmp r1, r7
+ beq setFtdllReg
+
+ ldr r4, =FTDLL_DDR1_200MHZ
+ ldr r7, =_200MHz
+ cmp r1, r7
+ beq setFtdllReg
+
+ ldr r4, =0
+
+setFtdllReg:
+
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_FTDLL_CONFIG_REG)
+ HTOLL(r4,r1)
+ bic r4, r4, #1
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_FTDLL_CONFIG_REG)
+
+#endif /* !defined(MV_88W8660) && !defined(MV_88F6183) && !defined(MV_88F6183L) */
+#endif /* DB_FPGA */
+
+restoreTmpRegs:
+ /* Restore the registers we used to save the DDR detect values */
+
+ ldr r4, =DRAM_BUF_REG0_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG0)
+
+ ldr r4, =DRAM_BUF_REG1_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG1)
+
+ ldr r4, =DRAM_BUF_REG2_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG2)
+
+ ldr r4, =DRAM_BUF_REG3_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG3)
+
+ ldr r4, =DRAM_BUF_REG4_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG4)
+
+ ldr r4, =DRAM_BUF_REG5_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG5)
+
+ ldr r4, =DRAM_BUF_REG6_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG6)
+
+ ldr r4, =DRAM_BUF_REG7_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG7)
+
+ ldr r4, =DRAM_BUF_REG8_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG8)
+
+ ldr r4, =DRAM_BUF_REG9_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG9)
+
+ ldr r4, =DRAM_BUF_REG10_DV
+ MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG10)
+
+
+ /* Restore registers */
+ /* Save register on stack */
+ cmp sp, #0
+ beq no_stack_l
+load_from_stack:
+ ldmia sp!, {r1, r2, r3, r4, r7, r11}
+no_stack_l:
+
+ mov pc, lr
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.h
new file mode 100644
index 000000000..a7c66444a
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.h
@@ -0,0 +1,192 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfConfigh
+#define __INCmvDramIfConfigh
+
+/* includes */
+
+/* defines */
+
+/* registers defaults values */
+
+#define SDRAM_CONFIG_DV \
+ (SDRAM_PERR_WRITE | \
+ SDRAM_SRMODE | \
+ SDRAM_SRCLK_GATED)
+
+#define SDRAM_DUNIT_CTRL_LOW_DV \
+ (SDRAM_CTRL_POS_RISE | \
+ SDRAM_CLK1DRV_NORMAL | \
+ SDRAM_LOCKEN_ENABLE)
+
+#define SDRAM_ADDR_CTRL_DV 0
+
+#define SDRAM_TIMING_CTRL_LOW_REG_DV \
+ ((0x2 << SDRAM_TRCD_OFFS) | \
+ (0x2 << SDRAM_TRP_OFFS) | \
+ (0x1 << SDRAM_TWR_OFFS) | \
+ (0x0 << SDRAM_TWTR_OFFS) | \
+ (0x5 << SDRAM_TRAS_OFFS) | \
+ (0x1 << SDRAM_TRRD_OFFS))
+/* TRFC 0x27, TW2W 0x1 */
+#define SDRAM_TIMING_CTRL_HIGH_REG_DV (( 0x7 << SDRAM_TRFC_OFFS ) |\
+ ( 0x2 << SDRAM_TRFC_EXT_OFFS) |\
+ ( 0x1 << SDRAM_TW2W_OFFS))
+
+#define SDRAM_OPEN_PAGES_CTRL_REG_DV SDRAM_OPEN_PAGE_EN
+
+/* DDR2 ODT default register values */
+
+/* Presence Ctrl Low Ctrl High Dunit Ctrl Ext Mode */
+/* CS0 0x84210000 0x00000000 0x0000780F 0x00000440 */
+/* CS0+CS1 0x84210000 0x00000000 0x0000780F 0x00000440 */
+/* CS0+CS2 0x030C030C 0x00000000 0x0000740F 0x00000404 */
+/* CS0+CS1+CS2 0x030C030C 0x00000000 0x0000740F 0x00000404 */
+/* CS0+CS2+CS3 0x030C030C 0x00000000 0x0000740F 0x00000404 */
+/* CS0+CS1+CS2+CS3 0x030C030C 0x00000000 0x0000740F 0x00000404 */
+
+#define DDR2_ODT_CTRL_LOW_CS0_DV 0x84210000
+#define DDR2_ODT_CTRL_HIGH_CS0_DV 0x00000000
+#define DDR2_DUNIT_ODT_CTRL_CS0_DV 0x0000780F
+#define DDR_SDRAM_EXT_MODE_CS0_DV 0x00000440
+
+#define DDR2_ODT_CTRL_LOW_CS0_CS2_DV 0x030C030C
+#define DDR2_ODT_CTRL_HIGH_CS0_CS2_DV 0x00000000
+#define DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV 0x0000740F
+#define DDR_SDRAM_EXT_MODE_CS0_CS2_DV 0x00000404
+
+
+/* DDR SDRAM Adderss/Control and Data Pads Calibration default values */
+#define DDR1_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV \
+ (1 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+#define DDR2_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV \
+ (3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+
+
+#define DDR1_DATA_PAD_STRENGTH_TYPICAL_DV \
+ (1 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+#define DDR2_DATA_PAD_STRENGTH_TYPICAL_DV \
+ (3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+
+/* DDR SDRAM Mode Register default value */
+#define DDR1_MODE_REG_DV 0x00000000
+#define DDR2_MODE_REG_DV 0x00000400
+
+/* DDR SDRAM Timing parameter default values */
+#define DDR1_TIMING_LOW_DV 0x11602220
+#define DDR1_TIMING_HIGH_DV 0x0000000d
+
+#define DDR2_TIMING_LOW_DV 0x11812220
+#define DDR2_TIMING_HIGH_DV 0x0000030f
+
+/* For Guideline (GL# MEM-4) DQS Reference Delay Tuning */
+#define FTDLL_DDR1_166MHZ ((0x1 << 0) | \
+ (0x7F<< 12) | \
+ (0x1 << 22))
+
+#define FTDLL_DDR1_133MHZ FTDLL_DDR1_166MHZ
+
+#define FTDLL_DDR1_200MHZ ((0x1 << 0) | \
+ (0x1 << 12) | \
+ (0x3 << 14) | \
+ (0x1 << 18) | \
+ (0x1 << 22))
+
+
+#define FTDLL_DDR2_166MHZ ((0x1 << 0) | \
+ (0x1 << 12) | \
+ (0x1 << 14) | \
+ (0x1 << 16) | \
+ (0x1 << 19) | \
+ (0xF << 20))
+
+#define FTDLL_DDR2_133MHZ FTDLL_DDR2_166MHZ
+
+#define FTDLL_DDR2_200MHZ ((0x1 << 0) | \
+ (0x1 << 12) | \
+ (0x1 << 14) | \
+ (0x1 << 16) | \
+ (0x1 << 19) | \
+ (0xF << 20))
+
+#define FTDLL_DDR2_250MHZ 0x445001
+
+/* Orion 1 B1 and above */
+#define FTDLL_DDR1_166MHZ_5181_B1 0x45D001
+
+/* Orion nas */
+#define FTDLL_DDR2_166MHZ_5182 0x597001
+
+/* Orion 2 D0 and above */
+#define FTDLL_DDR1_166MHZ_5281_D0 0x8D0001
+#define FTDLL_DDR1_200MHZ_5281_D0 0x8D0001
+#define FTDLL_DDR2_166MHZ_5281_D0 0x485001
+#define FTDLL_DDR2_200MHZ_5281_D0 0x485001
+#define FTDLL_DDR2_250MHZ_5281_D0 0x445001
+#define FTDLL_DDR2_200MHZ_5281_D1 0x995001
+#define FTDLL_DDR2_250MHZ_5281_D1 0x984801
+
+#endif /* __INCmvDramIfh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfRegs.h
new file mode 100644
index 000000000..e9cd7c4e5
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfRegs.h
@@ -0,0 +1,306 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDramIfRegsh
+#define __INCmvDramIfRegsh
+
+
+/* DDR SDRAM Controller Address Decode Registers */
+/* SDRAM CSn Base Address Register (SCBAR) */
+#define SDRAM_BASE_ADDR_REG(csNum) (0x1500 + (csNum * 8))
+#define SCBAR_BASE_OFFS 16
+#define SCBAR_BASE_MASK (0xffff << SCBAR_BASE_OFFS)
+#define SCBAR_BASE_ALIGNMENT 0x10000
+
+/* SDRAM CSn Size Register (SCSR) */
+#define SDRAM_SIZE_REG(csNum) (0x1504 + (csNum * 8))
+#define SCSR_WIN_EN BIT0
+#define SCSR_SIZE_OFFS 16
+#define SCSR_SIZE_MASK (0xffff << SCSR_SIZE_OFFS)
+#define SCSR_SIZE_ALIGNMENT 0x10000
+
+/* configuration register */
+#define SDRAM_CONFIG_REG 0x1400
+#define SDRAM_REFRESH_OFFS 0
+#define SDRAM_REFRESH_MAX 0x3000
+#define SDRAM_REFRESH_MASK (SDRAM_REFRESH_MAX << SDRAM_REFRESH_OFFS)
+#define SDRAM_DWIDTH_OFFS 14
+#define SDRAM_DWIDTH_MASK (3 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DWIDTH_16BIT (1 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DWIDTH_32BIT (2 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DTYPE_OFFS 16
+#define SDRAM_DTYPE_MASK (1 << SDRAM_DTYPE_OFFS)
+#define SDRAM_DTYPE_DDR1 (0 << SDRAM_DTYPE_OFFS)
+#define SDRAM_DTYPE_DDR2 (1 << SDRAM_DTYPE_OFFS)
+#define SDRAM_REGISTERED (1 << 17)
+#define SDRAM_PERR_OFFS 18
+#define SDRAM_PERR_MASK (1 << SDRAM_PERR_OFFS)
+#define SDRAM_PERR_NO_WRITE (0 << SDRAM_PERR_OFFS)
+#define SDRAM_PERR_WRITE (1 << SDRAM_PERR_OFFS)
+#define SDRAM_DCFG_OFFS 20
+#define SDRAM_DCFG_MASK (0x3 << SDRAM_DCFG_OFFS)
+#define SDRAM_DCFG_X16_DEV (1 << SDRAM_DCFG_OFFS)
+#define SDRAM_DCFG_X8_DEV (2 << SDRAM_DCFG_OFFS)
+#define SDRAM_SRMODE (1 << 24)
+#define SDRAM_SRCLK_OFFS 25
+#define SDRAM_SRCLK_MASK (1 << SDRAM_SRCLK_OFFS)
+#define SDRAM_SRCLK_KEPT (0 << SDRAM_SRCLK_OFFS)
+#define SDRAM_SRCLK_GATED (1 << SDRAM_SRCLK_OFFS)
+#define SDRAM_CATTH_OFFS 26
+#define SDRAM_CATTHR_EN (1 << SDRAM_CATTH_OFFS)
+
+
+/* dunit control register */
+#define SDRAM_DUNIT_CTRL_REG 0x1404
+#define SDRAM_CTRL_POS_OFFS 6
+#define SDRAM_CTRL_POS_FALL (0 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CTRL_POS_RISE (1 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CLK1DRV_OFFS 12
+#define SDRAM_CLK1DRV_MASK (1 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK1DRV_HIGH_Z (0 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK1DRV_NORMAL (1 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_LOCKEN_OFFS 18
+#define SDRAM_LOCKEN_MASK (1 << SDRAM_LOCKEN_OFFS)
+#define SDRAM_LOCKEN_DISABLE (0 << SDRAM_LOCKEN_OFFS)
+#define SDRAM_LOCKEN_ENABLE (1 << SDRAM_LOCKEN_OFFS)
+#define SDRAM_ST_BURST_DEL_OFFS 24
+#define SDRAM_ST_BURST_DEL_MAX 0xf
+#define SDRAM_ST_BURST_DEL_MASK (SDRAM_ST_BURST_DEL_MAX<<SDRAM_ST_BURST_DEL_OFFS)
+
+/* sdram timing control low register */
+#define SDRAM_TIMING_CTRL_LOW_REG 0x1408
+#define SDRAM_TRCD_OFFS 4
+#define SDRAM_TRCD_MASK (0xF << SDRAM_TRCD_OFFS)
+#define SDRAM_TRP_OFFS 8
+#define SDRAM_TRP_MASK (0xF << SDRAM_TRP_OFFS)
+#define SDRAM_TWR_OFFS 12
+#define SDRAM_TWR_MASK (0xF << SDRAM_TWR_OFFS)
+#define SDRAM_TWTR_OFFS 16
+#define SDRAM_TWTR_MASK (0xF << SDRAM_TWTR_OFFS)
+#define SDRAM_TRAS_OFFS 20
+#define SDRAM_TRAS_MASK (0xF << SDRAM_TRAS_OFFS)
+#define SDRAM_TRRD_OFFS 24
+#define SDRAM_TRRD_MASK (0xF << SDRAM_TRRD_OFFS)
+#define SDRAM_TRTP_OFFS 28
+#define SDRAM_TRTP_MASK (0xF << SDRAM_TRTP_OFFS)
+
+/* sdram timing control high register */
+#define SDRAM_TIMING_CTRL_HIGH_REG 0x140c
+#define SDRAM_TRFC_OFFS 0
+#define SDRAM_TRFC_MASK (0xF << SDRAM_TRFC_OFFS)
+#define SDRAM_TR2R_OFFS 4
+#define SDRAM_TR2R_MASK (0x3 << SDRAM_TR2R_OFFS)
+#define SDRAM_TR2W_W2R_OFFS 6
+#define SDRAM_TR2W_W2R_MASK (0x3 << SDRAM_TR2W_W2R_OFFS)
+#define SDRAM_TRFC_EXT_OFFS 8
+#define SDRAM_TRFC_EXT_MASK (0x1 << SDRAM_TRFC_EXT_OFFS)
+#define SDRAM_TW2W_OFFS 10
+#define SDRAM_TW2W_MASK (0x1 << SDRAM_TW2W_OFFS)
+
+/* address control register */
+#define SDRAM_ADDR_CTRL_REG 0x1410
+#define SDRAM_DSIZE_OFFS 4
+#define SDRAM_DSIZE_MASK (0x3 << SDRAM_DSIZE_OFFS)
+#define SDRAM_DSIZE_128Mb (0x0 << SDRAM_DSIZE_OFFS)
+#define SDRAM_DSIZE_256Mb (0x1 << SDRAM_DSIZE_OFFS)
+#define SDRAM_DSIZE_512Mb (0x2 << SDRAM_DSIZE_OFFS)
+
+/* SDRAM Open Pages Control registers */
+#define SDRAM_OPEN_PAGE_CTRL_REG 0x1414
+#define SDRAM_OPEN_PAGE_EN (0 << 0)
+#define SDRAM_OPEN_PAGE_DIS (1 << 0)
+
+/* sdram opertion register */
+#define SDRAM_OPERATION_REG 0x1418
+#define SDRAM_CMD_OFFS 0
+#define SDRAM_CMD_MASK (0x7 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_NORMAL (0x0 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_PRECHARGE_ALL (0x1 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_REFRESH_ALL (0x2 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_REG_SET_CMD (0x3 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EXT_MODE_SET (0x4 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_NOP (0x5 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_SLF_RFRSH (0x7 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EMRS2_CMD (0x8 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EMRS3_CMD (0x9 << SDRAM_CMD_OFFS)
+
+/* sdram mode register */
+#define SDRAM_MODE_REG 0x141c
+#define SDRAM_BURST_LEN_OFFS 0
+#define SDRAM_BURST_LEN_MASK (0x7 << SDRAM_BURST_LEN_OFFS)
+#define SDRAM_BURST_LEN_4 (0x2 << SDRAM_BURST_LEN_OFFS)
+#define SDRAM_CL_OFFS 4
+#define SDRAM_CL_MASK (0x7 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_2 (0x2 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_3 (0x3 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_4 (0x4 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_1_5 (0x5 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_2_5 (0x6 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_3 (0x3 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_4 (0x4 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_5 (0x5 << SDRAM_CL_OFFS)
+#define SDRAM_TM_OFFS 7
+#define SDRAM_TM_MASK (1 << SDRAM_TM_OFFS)
+#define SDRAM_TM_NORMAL (0 << SDRAM_TM_OFFS)
+#define SDRAM_TM_TEST_MODE (1 << SDRAM_TM_OFFS)
+#define SDRAM_DLL_OFFS 8
+#define SDRAM_DLL_MASK (1 << SDRAM_DLL_OFFS)
+#define SDRAM_DLL_NORMAL (0 << SDRAM_DLL_OFFS)
+#define SDRAM_DLL_RESET (1 << SDRAM_DLL_OFFS)
+#define SDRAM_WR_OFFS 11
+#define SDRAM_WR_MAX 7
+#define SDRAM_WR_MASK (SDRAM_WR_MAX << SDRAM_WR_OFFS)
+#define SDRAM_PD_OFFS 12
+#define SDRAM_PD_MASK (1 << SDRAM_PD_OFFS)
+#define SDRAM_PD_FAST_EXIT (0 << SDRAM_PD_OFFS)
+#define SDRAM_PD_SLOW_EXIT (1 << SDRAM_PD_OFFS)
+
+/* DDR SDRAM Extended Mode register (DSEMR) */
+#define SDRAM_EXTENDED_MODE_REG 0x1420
+#define DSEMR_DLL_ENABLE (1 << 0)
+#define DSEMR_DS_OFFS 1
+#define DSEMR_DS_MASK (1 << DSEMR_DS_OFFS)
+#define DSEMR_DS_NORMAL (0 << DSEMR_DS_OFFS)
+#define DSEMR_DS_REDUCED (1 << DSEMR_DS_OFFS)
+#define DSEMR_RTT0_OFFS 2
+#define DSEMR_RTT1_OFFS 6
+#define DSEMR_RTT_ODT_DISABLE ((0 << DSEMR_RTT0_OFFS)||(0 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_75_OHM ((1 << DSEMR_RTT0_OFFS)||(0 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_150_OHM ((0 << DSEMR_RTT0_OFFS)||(1 << DSEMR_RTT1_OFFS))
+#define DSEMR_OCD_OFFS 7
+#define DSEMR_OCD_MASK (0x7 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_EXIT_CALIB (0 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_DRIVE1 (1 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_DRIVE0 (2 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_ADJUST_MODE (4 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_CALIB_DEFAULT (7 << DSEMR_OCD_OFFS)
+#define DSEMR_DQS_OFFS 10
+#define DSEMR_DQS_MASK (1 << DSEMR_DQS_OFFS)
+#define DSEMR_DQS_DIFFERENTIAL (0 << DSEMR_DQS_OFFS)
+#define DSEMR_DQS_SINGLE_ENDED (0 << DSEMR_DQS_OFFS)
+#define DSEMR_RDQS_ENABLE (1 << 11)
+#define DSEMR_QOFF_OUTPUT_BUFF_EN (1 << 12)
+
+/* DDR SDRAM Operation Control Register */
+#define SDRAM_OPERATION_CTRL_REG 0x142c
+
+/* Dunit FTDLL Configuration Register */
+#define SDRAM_FTDLL_CONFIG_REG 0x1484
+
+/* Pads Calibration register */
+#define SDRAM_ADDR_CTRL_PADS_CAL_REG 0x14c0
+#define SDRAM_DATA_PADS_CAL_REG 0x14c4
+#define SDRAM_DRVN_OFFS 0
+#define SDRAM_DRVN_MASK (0x3F << SDRAM_DRVN_OFFS)
+#define SDRAM_DRVP_OFFS 6
+#define SDRAM_DRVP_MASK (0x3F << SDRAM_DRVP_OFFS)
+#define SDRAM_PRE_DRIVER_STRENGTH_OFFS 12
+#define SDRAM_PRE_DRIVER_STRENGTH_MASK (3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+#define SDRAM_TUNE_EN BIT16
+#define SDRAM_LOCK_OFFS 17
+#define SDRAM_LOCK_MAKS (0x1F << SDRAM_LOCK_OFFS)
+#define SDRAM_LOCKN_OFFS 17
+#define SDRAM_LOCKN_MAKS (0x3F << SDRAM_LOCKN_OFFS)
+#define SDRAM_LOCKP_OFFS 23
+#define SDRAM_LOCKP_MAKS (0x3F << SDRAM_LOCKP_OFFS)
+#define SDRAM_WR_EN (1 << 31)
+
+/* DDR2 SDRAM ODT Control (Low) Register (DSOCLR) */
+#define DDR2_SDRAM_ODT_CTRL_LOW_REG 0x1494
+#define DSOCLR_ODT_RD_OFFS(odtNum) (odtNum * 4)
+#define DSOCLR_ODT_RD_MASK(odtNum) (0xf << DSOCLR_ODT_RD_OFFS(odtNum))
+#define DSOCLR_ODT_RD(odtNum, bank) ((1 << bank) << DSOCLR_ODT_RD_OFFS(odtNum))
+#define DSOCLR_ODT_WR_OFFS(odtNum) (16 + (odtNum * 4))
+#define DSOCLR_ODT_WR_MASK(odtNum) (0xf << DSOCLR_ODT_WR_OFFS(odtNum))
+#define DSOCLR_ODT_WD(odtNum, bank) ((1 << bank) << DSOCLR_ODT_WR_OFFS(odtNum))
+
+/* DDR2 SDRAM ODT Control (High) Register (DSOCHR) */
+#define DDR2_SDRAM_ODT_CTRL_HIGH_REG 0x1498
+/* Optional control values to DSOCHR_ODT_EN macro */
+#define DDR2_ODT_CTRL_DUNIT 0
+#define DDR2_ODT_CTRL_NEVER 1
+#define DDR2_ODT_CTRL_ALWAYS 3
+#define DSOCHR_ODT_EN_OFFS(odtNum) (odtNum * 2)
+#define DSOCHR_ODT_EN_MASK(odtNum) (0x3 << DSOCHR_ODT_EN_OFFS(odtNum))
+#define DSOCHR_ODT_EN(odtNum, ctrl) ((1 << ctrl) << DSOCHR_ODT_RD_OFFS(odtNum))
+
+/* DDR2 Dunit ODT Control Register (DDOCR)*/
+#define DDR2_DUNIT_ODT_CONTROL_REG 0x149c
+#define DDOCR_ODT_RD_OFFS 0
+#define DDOCR_ODT_RD_MASK (0xf << DDOCR_ODT_RD_OFFS)
+#define DDOCR_ODT_RD(bank) ((1 << bank) << DDOCR_ODT_RD_OFFS)
+#define DDOCR_ODT_WR_OFFS 4
+#define DDOCR_ODT_WR_MASK (0xf << DDOCR_ODT_WR_OFFS)
+#define DDOCR_ODT_WR(bank) ((1 << bank) << DDOCR_ODT_WR_OFFS)
+#define DSOCR_ODT_EN_OFFS 8
+#define DSOCR_ODT_EN_MASK (0x3 << DSOCR_ODT_EN_OFFS)
+#define DSOCR_ODT_EN(ctrl) ((1 << ctrl) << DSOCR_ODT_EN_OFFS)
+#define DSOCR_ODT_SEL_OFFS 10
+#define DSOCR_ODT_SEL_MASK (0x3 << DSOCR_ODT_SEL_OFFS)
+
+/* DDR SDRAM Initialization Control Register (DSICR) */
+#define DDR_SDRAM_INIT_CTRL_REG 0x1480
+#define DSICR_INIT_EN (1 << 0)
+
+#endif /* __INCmvDramIfRegsh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvCompVer.txt
new file mode 100644
index 000000000..38a926440
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.4
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c
new file mode 100644
index 000000000..a214c95cd
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c
@@ -0,0 +1,1855 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "ddr2/mvDramIf.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+#include "ddr2/mvDramIfStaticInit.h"
+
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+/* DRAM bank presence encoding */
+#define BANK_PRESENT_CS0 0x1
+#define BANK_PRESENT_CS0_CS1 0x3
+#define BANK_PRESENT_CS0_CS2 0x5
+#define BANK_PRESENT_CS0_CS1_CS2 0x7
+#define BANK_PRESENT_CS0_CS2_CS3 0xd
+#define BANK_PRESENT_CS0_CS2_CS3_CS4 0xf
+
+/* locals */
+#ifndef MV_STATIC_DRAM_ON_BOARD
+static void sdramDDr2OdtConfig(MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 dunitCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas, MV_U32 busClk, MV_STATUS TTmode );
+static MV_U32 dunitCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk);
+static MV_U32 sdramModeRegCalc(MV_U32 minCas);
+static MV_U32 sdramExtModeRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk);
+static MV_U32 sdramAddrCtrlRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_DRAM_BANK_INFO *pBankInfoDIMM1);
+static MV_U32 sdramConfigRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_DRAM_BANK_INFO *pBankInfo2, MV_U32 busClk);
+static MV_U32 minCasCalc(MV_DRAM_BANK_INFO *pBankInfo,MV_DRAM_BANK_INFO *pBankInfo2, MV_U32 busClk, MV_U32 forcedCl);
+static MV_U32 sdramTimeCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas, MV_U32 busClk);
+static MV_U32 sdramTimeCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk);
+static MV_U32 sdramDdr2TimeLoRegCalc(MV_U32 minCas);
+static MV_U32 sdramDdr2TimeHiRegCalc(MV_U32 minCas);
+#endif
+MV_32 DRAM_CS_Order[MV_DRAM_MAX_CS] = {N_A
+
+#ifdef MV_INCLUDE_SDRAM_CS1
+ ,N_A
+#endif
+#ifdef MV_INCLUDE_SDRAM_CS2
+ ,N_A
+#endif
+#ifdef MV_INCLUDE_SDRAM_CS3
+ ,N_A
+#endif
+ };
+/* Get DRAM size of CS num */
+MV_U32 mvDramCsSizeGet(MV_U32 csNum)
+{
+ MV_DRAM_BANK_INFO bankInfo;
+ MV_U32 size, deviceW, dimmW;
+#ifdef MV78XX0
+ MV_U32 temp;
+#endif
+
+ if(MV_OK == mvDramBankInfoGet(csNum, &bankInfo))
+ {
+ if (0 == bankInfo.size)
+ return 0;
+
+ /* Note that the Dimm width might be different then the device DRAM width */
+#ifdef MV78XX0
+ temp = MV_REG_READ(SDRAM_CONFIG_REG);
+ deviceW = ((temp & SDRAM_DWIDTH_MASK) == SDRAM_DWIDTH_32BIT )? 32 : 64;
+#else
+ deviceW = 16 /* KW family */;
+#endif
+ dimmW = bankInfo.dataWidth - (bankInfo.dataWidth % 16);
+ size = ((bankInfo.size << 20) / (dimmW/deviceW));
+ return size;
+ }
+ else
+ return 0;
+}
+/*******************************************************************************
+* mvDramIfDetect - Prepare DRAM interface configuration values.
+*
+* DESCRIPTION:
+* This function implements the full DRAM detection and timing
+* configuration for best system performance.
+* Since this routine runs from a ROM device (Boot Flash), its stack
+* resides on RAM, that might be the system DRAM. Changing DRAM
+* configuration values while keeping vital data in DRAM is risky. That
+* is why the function does not preform the configuration setting but
+* prepare those in predefined 32bit registers (in this case IDMA
+* registers are used) for other routine to perform the settings.
+* The function will call for board DRAM SPD information for each DRAM
+* chip select. The function will then analyze those SPD parameters of
+* all DRAM banks in order to decide on DRAM configuration compatible
+* for all DRAM banks.
+* The function will set the CPU DRAM address decode registers.
+* Note: This routine prepares values that will overide configuration of
+* mvDramBasicAsmInit().
+*
+* INPUT:
+* forcedCl - Forced CAL Latency. If equal to zero, do not force.
+* eccDisable - Force down the ECC.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfDetect(MV_U32 forcedCl, MV_BOOL eccDisable)
+{
+ MV_32 MV_DRAM_CS_order[MV_DRAM_MAX_CS] = {
+ SDRAM_CS0
+#ifdef MV_INCLUDE_SDRAM_CS1
+ ,SDRAM_CS1
+#endif
+#ifdef MV_INCLUDE_SDRAM_CS2
+ ,SDRAM_CS2
+#endif
+#ifdef MV_INCLUDE_SDRAM_CS3
+ ,SDRAM_CS3
+#endif
+ };
+ MV_U32 busClk, deviceW, dimmW;
+ MV_U32 numOfAllDevices = 0;
+ MV_STATUS TTMode;
+#ifndef MV_STATIC_DRAM_ON_BOARD
+ MV_DRAM_BANK_INFO bankInfo[MV_DRAM_MAX_CS];
+ MV_U32 size, base = 0, i, j, temp, busClkPs;
+ MV_U8 minCas;
+ MV_CPU_DEC_WIN dramDecWin;
+ dramDecWin.addrWin.baseHigh = 0;
+#endif
+
+ busClk = mvBoardSysClkGet();
+
+ if (0 == busClk)
+ {
+ mvOsPrintf("Dram: ERR. Can't detect system clock! \n");
+ return MV_ERROR;
+ }
+
+#ifndef MV_STATIC_DRAM_ON_BOARD
+
+ busClkPs = 1000000000 / (busClk / 1000); /* in ps units */
+ /* we will use bank 0 as the representative of the all the DRAM banks, */
+ /* since bank 0 must exist. */
+ for(i = 0; i < MV_DRAM_MAX_CS; i++)
+ {
+ /* if Bank exist */
+ if(MV_OK == mvDramBankInfoGet(i, &bankInfo[i]))
+ {
+ DB(mvOsPrintf("Dram: Find bank %d\n", i));
+ /* check it isn't SDRAM */
+ if(bankInfo[i].memoryType != MEM_TYPE_DDR2)
+ {
+ mvOsOutput("Dram: ERR. SDRAM type not supported !!!\n");
+ return MV_ERROR;
+ }
+
+ /* All banks must support the Mclk freqency */
+ if(bankInfo[i].minCycleTimeAtMaxCasLatPs > busClkPs)
+ {
+ mvOsOutput("Dram: ERR. Bank %d doesn't support memory clock!!!\n", i);
+ return MV_ERROR;
+ }
+
+ /* All banks must support registry in order to activate it */
+ if(bankInfo[i].registeredAddrAndControlInputs !=
+ bankInfo[0].registeredAddrAndControlInputs)
+ {
+ mvOsOutput("Dram: ERR. different Registered settings !!!\n");
+ return MV_ERROR;
+ }
+
+ /* All banks must support same ECC mode */
+ if(bankInfo[i].errorCheckType !=
+ bankInfo[0].errorCheckType)
+ {
+ mvOsOutput("Dram: ERR. different ECC settings !!!\n");
+ return MV_ERROR;
+ }
+
+ }
+ else
+ {
+ if( i == 0 ) /* bank 0 doesn't exist */
+ {
+ mvOsOutput("Dram: ERR. Fail to detect bank 0 !!!\n");
+ return MV_ERROR;
+ }
+ else
+ {
+ DB(mvOsPrintf("Dram: Could not find bank %d\n", i));
+ bankInfo[i].size = 0; /* Mark this bank as non exist */
+ }
+ }
+ }
+
+#ifdef MV_INCLUDE_SDRAM_CS2
+ if (bankInfo[SDRAM_CS0].size < bankInfo[SDRAM_CS2].size)
+ {
+ MV_DRAM_CS_order[0] = SDRAM_CS2;
+ MV_DRAM_CS_order[1] = SDRAM_CS3;
+ MV_DRAM_CS_order[2] = SDRAM_CS0;
+ MV_DRAM_CS_order[3] = SDRAM_CS1;
+ DRAM_CS_Order[0] = SDRAM_CS2;
+ DRAM_CS_Order[1] = SDRAM_CS3;
+ DRAM_CS_Order[2] = SDRAM_CS0;
+ DRAM_CS_Order[3] = SDRAM_CS1;
+
+ }
+ else
+#endif
+ {
+ MV_DRAM_CS_order[0] = SDRAM_CS0;
+ MV_DRAM_CS_order[1] = SDRAM_CS1;
+ DRAM_CS_Order[0] = SDRAM_CS0;
+ DRAM_CS_Order[1] = SDRAM_CS1;
+#ifdef MV_INCLUDE_SDRAM_CS2
+ MV_DRAM_CS_order[2] = SDRAM_CS2;
+ MV_DRAM_CS_order[3] = SDRAM_CS3;
+ DRAM_CS_Order[2] = SDRAM_CS2;
+ DRAM_CS_Order[3] = SDRAM_CS3;
+#endif
+ }
+
+ for(j = 0; j < MV_DRAM_MAX_CS; j++)
+ {
+ i = MV_DRAM_CS_order[j];
+
+ if (0 == bankInfo[i].size)
+ continue;
+
+ /* Init the CPU window decode */
+ /* Note that the Dimm width might be different then the device DRAM width */
+#ifdef MV78XX0
+ temp = MV_REG_READ(SDRAM_CONFIG_REG);
+ deviceW = ((temp & SDRAM_DWIDTH_MASK) == SDRAM_DWIDTH_32BIT )? 32 : 64;
+#else
+ deviceW = 16 /* KW family */;
+#endif
+ dimmW = bankInfo[0].dataWidth - (bankInfo[0].dataWidth % 16);
+ size = ((bankInfo[i].size << 20) / (dimmW/deviceW));
+
+ /* We can not change DRAM window settings while excecuting */
+ /* code from it. That is why we skip the DRAM CS[0], saving */
+ /* it to the ROM configuration routine */
+
+ numOfAllDevices += bankInfo[i].numberOfDevices;
+ if (i == MV_DRAM_CS_order[0])
+ {
+ MV_U32 sizeToReg;
+ /* Translate the given window size to register format */
+ sizeToReg = ctrlSizeToReg(size, SCSR_SIZE_ALIGNMENT);
+ /* Size parameter validity check. */
+ if (-1 == sizeToReg)
+ {
+ mvOsOutput("DRAM: mvCtrlAddrDecToReg: ERR. Win %d size invalid.\n"
+ ,i);
+ return MV_BAD_PARAM;
+ }
+
+ DB(mvOsPrintf("Dram: Bank 0 Size - %x\n",sizeToReg);)
+ sizeToReg = (sizeToReg << SCSR_SIZE_OFFS);
+ sizeToReg |= SCSR_WIN_EN;
+ MV_REG_WRITE(DRAM_BUF_REG0, sizeToReg);
+ }
+ else
+ {
+ dramDecWin.addrWin.baseLow = base;
+ dramDecWin.addrWin.size = size;
+ dramDecWin.enable = MV_TRUE;
+ DB(mvOsPrintf("Dram: Enable window %d base 0x%x, size=0x%x\n",i, base, size));
+
+ /* Check if the DRAM size is more then 3GByte */
+ if (base < 0xC0000000)
+ {
+ DB(mvOsPrintf("Dram: Enable window %d base 0x%x, size=0x%x\n",i, base, size));
+ if (MV_OK != mvCpuIfTargetWinSet(i, &dramDecWin))
+ {
+ mvOsPrintf("Dram: ERR. Fail to set bank %d!!!\n", SDRAM_CS0 + i);
+ return MV_ERROR;
+ }
+ }
+ }
+
+ base += size;
+
+ /* update the suportedCasLatencies mask */
+ bankInfo[0].suportedCasLatencies &= bankInfo[i].suportedCasLatencies;
+ }
+
+ /* calculate minimum CAS */
+ minCas = minCasCalc(&bankInfo[0], &bankInfo[2], busClk, forcedCl);
+ if (0 == minCas)
+ {
+ mvOsOutput("Dram: Warn: Could not find CAS compatible to SysClk %dMhz\n",
+ (busClk / 1000000));
+
+ minCas = DDR2_CL_4; /* Continue with this CAS */
+ mvOsOutput("Set default CAS latency 4\n");
+ }
+
+ /* calc SDRAM_CONFIG_REG and save it to temp register */
+ temp = sdramConfigRegCalc(&bankInfo[0],&bankInfo[2], busClk);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. sdramConfigRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+
+ /* check if ECC is enabled by the user */
+ if(eccDisable)
+ {
+ /* turn off ECC*/
+ temp &= ~BIT18;
+ }
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG1, temp);
+
+ /* calc SDRAM_MODE_REG and save it to temp register */
+ temp = sdramModeRegCalc(minCas);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. sdramModeRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: sdramModeRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG2, temp);
+
+ /* calc SDRAM_EXTENDED_MODE_REG and save it to temp register */
+ temp = sdramExtModeRegCalc(&bankInfo[0], busClk);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. sdramExtModeRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: sdramExtModeRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG10, temp);
+
+ /* calc D_UNIT_CONTROL_LOW and save it to temp register */
+ TTMode = MV_FALSE;
+ DB(mvOsPrintf("Dram: numOfAllDevices = %x\n",numOfAllDevices);)
+ if( (numOfAllDevices > 9) && (bankInfo[0].registeredAddrAndControlInputs == MV_FALSE) )
+ {
+ if ( ( (numOfAllDevices > 9) && (busClk > MV_BOARD_SYSCLK_200MHZ) ) ||
+ (numOfAllDevices > 18) )
+ {
+ mvOsOutput("Enable 2T ");
+ TTMode = MV_TRUE;
+ }
+ }
+
+ temp = dunitCtrlLowRegCalc(&bankInfo[0], minCas, busClk, TTMode );
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. dunitCtrlLowRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: dunitCtrlLowRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG3, temp);
+
+ /* calc D_UNIT_CONTROL_HIGH and save it to temp register */
+ temp = dunitCtrlHighRegCalc(&bankInfo[0], busClk);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. dunitCtrlHighRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: dunitCtrlHighRegCalc - %x\n",temp);)
+ /* check if ECC is enabled by the user */
+ if(eccDisable)
+ {
+ /* turn off sample stage if no ecc */
+ temp &= ~SDRAM__D2P_EN;;
+ }
+ MV_REG_WRITE(DRAM_BUF_REG13, temp);
+
+ /* calc SDRAM_ADDR_CTRL_REG and save it to temp register */
+ temp = sdramAddrCtrlRegCalc(&bankInfo[0],&bankInfo[2]);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. sdramAddrCtrlRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: sdramAddrCtrlRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG4, temp);
+
+ /* calc SDRAM_TIMING_CTRL_LOW_REG and save it to temp register */
+ temp = sdramTimeCtrlLowRegCalc(&bankInfo[0], minCas, busClk);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. sdramTimeCtrlLowRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: sdramTimeCtrlLowRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG5, temp);
+
+ /* calc SDRAM_TIMING_CTRL_HIGH_REG and save it to temp register */
+ temp = sdramTimeCtrlHighRegCalc(&bankInfo[0], busClk);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. sdramTimeCtrlHighRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: sdramTimeCtrlHighRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG6, temp);
+
+ sdramDDr2OdtConfig(bankInfo);
+
+ /* calc DDR2_SDRAM_TIMING_LOW_REG and save it to temp register */
+ temp = sdramDdr2TimeLoRegCalc(minCas);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. sdramDdr2TimeLoRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: sdramDdr2TimeLoRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG11, temp);
+
+ /* calc DDR2_SDRAM_TIMING_HIGH_REG and save it to temp register */
+ temp = sdramDdr2TimeHiRegCalc(minCas);
+ if(-1 == temp)
+ {
+ mvOsOutput("Dram: ERR. sdramDdr2TimeHiRegCalc failed !!!\n");
+ return MV_ERROR;
+ }
+ DB(mvOsPrintf("Dram: sdramDdr2TimeHiRegCalc - %x\n",temp);)
+ MV_REG_WRITE(DRAM_BUF_REG12, temp);
+#endif
+
+ /* Note that DDR SDRAM Address/Control and Data pad calibration */
+ /* settings is done in mvSdramIfConfig.s */
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvDramIfBankBaseGet - Get DRAM interface bank base.
+*
+* DESCRIPTION:
+* This function returns the 32 bit base address of a given DRAM bank.
+*
+* INPUT:
+* bankNum - Bank number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* DRAM bank size. If bank is disabled or paramter is invalid, the
+* function returns -1.
+*
+*******************************************************************************/
+MV_U32 mvDramIfBankBaseGet(MV_U32 bankNum)
+{
+ DB(mvOsPrintf("Dram: mvDramIfBankBaseGet Bank %d base addr is %x \n",
+ bankNum, mvCpuIfTargetWinBaseLowGet(SDRAM_CS0 + bankNum)));
+ return mvCpuIfTargetWinBaseLowGet(SDRAM_CS0 + bankNum);
+}
+
+/*******************************************************************************
+* mvDramIfBankSizeGet - Get DRAM interface bank size.
+*
+* DESCRIPTION:
+* This function returns the size of a given DRAM bank.
+*
+* INPUT:
+* bankNum - Bank number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* DRAM bank size. If bank is disabled the function return '0'. In case
+* or paramter is invalid, the function returns -1.
+*
+*******************************************************************************/
+MV_U32 mvDramIfBankSizeGet(MV_U32 bankNum)
+{
+ DB(mvOsPrintf("Dram: mvDramIfBankSizeGet Bank %d size is %x \n",
+ bankNum, mvCpuIfTargetWinSizeGet(SDRAM_CS0 + bankNum)));
+ return mvCpuIfTargetWinSizeGet(SDRAM_CS0 + bankNum);
+}
+
+
+/*******************************************************************************
+* mvDramIfSizeGet - Get DRAM interface total size.
+*
+* DESCRIPTION:
+* This function get the DRAM total size.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* DRAM total size. In case or paramter is invalid, the function
+* returns -1.
+*
+*******************************************************************************/
+MV_U32 mvDramIfSizeGet(MV_VOID)
+{
+ MV_U32 size = 0, i;
+
+ for(i = 0; i < MV_DRAM_MAX_CS; i++)
+ size += mvDramIfBankSizeGet(i);
+
+ DB(mvOsPrintf("Dram: mvDramIfSizeGet size is %x \n",size));
+ return size;
+}
+
+/*******************************************************************************
+* mvDramIfSingleBitErrThresholdSet - Set single bit ECC threshold.
+*
+* DESCRIPTION:
+* The ECC single bit error threshold is the number of single bit
+* errors to happen before the Dunit generates an interrupt.
+* This function set single bit ECC threshold.
+*
+* INPUT:
+* threshold - threshold.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM if threshold is to big, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfSingleBitErrThresholdSet(MV_U32 threshold)
+{
+ MV_U32 regVal;
+
+ if (threshold > SECR_THRECC_MAX)
+ {
+ return MV_BAD_PARAM;
+ }
+
+ regVal = MV_REG_READ(SDRAM_ECC_CONTROL_REG);
+ regVal &= ~SECR_THRECC_MASK;
+ regVal |= ((SECR_THRECC(threshold) & SECR_THRECC_MASK));
+ MV_REG_WRITE(SDRAM_ECC_CONTROL_REG, regVal);
+
+ return MV_OK;
+}
+
+#ifndef MV_STATIC_DRAM_ON_BOARD
+/*******************************************************************************
+* minCasCalc - Calculate the Minimum CAS latency which can be used.
+*
+* DESCRIPTION:
+* Calculate the minimum CAS latency that can be used, base on the DRAM
+* parameters and the SDRAM bus Clock freq.
+*
+* INPUT:
+* busClk - the DRAM bus Clock.
+* pBankInfo - bank info parameters.
+* forcedCl - Forced CAS Latency multiplied by 10. If equal to zero, do not force.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* The minimum CAS Latency. The function returns 0 if max CAS latency
+* supported by banks is incompatible with system bus clock frequancy.
+*
+*******************************************************************************/
+
+static MV_U32 minCasCalc(MV_DRAM_BANK_INFO *pBankInfo,MV_DRAM_BANK_INFO *pBankInfo2, MV_U32 busClk, MV_U32 forcedCl)
+{
+ MV_U32 count = 1, j;
+ MV_U32 busClkPs = 1000000000 / (busClk / 1000); /* in ps units */
+ MV_U32 startBit, stopBit;
+ MV_U32 minCas0 = 0, minCas2 = 0;
+
+
+ /* DDR 2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | TBD | 5 | 4 | 3 | 2 | TBD | TBD *
+ Disco VI= * TBD | TBD | 5 | 4 | 3 | TBD | TBD | TBD *
+ Disco Duo= * TBD | 6 | 5 | 4 | 3 | TBD | TBD | TBD *
+ *********************************************************/
+
+
+ /* If we are asked to use the forced CAL we change the suported CAL to be forcedCl only */
+ if (forcedCl)
+ {
+ mvOsOutput("DRAM: Using forced CL %d.%d\n", (forcedCl / 10), (forcedCl % 10));
+
+ if (forcedCl == 30)
+ pBankInfo->suportedCasLatencies = 0x08;
+ else if (forcedCl == 40)
+ pBankInfo->suportedCasLatencies = 0x10;
+ else if (forcedCl == 50)
+ pBankInfo->suportedCasLatencies = 0x20;
+ else if (forcedCl == 60)
+ pBankInfo->suportedCasLatencies = 0x40;
+ else
+ {
+ mvOsPrintf("Forced CL %d.%d not supported. Set default CL 4\n",
+ (forcedCl / 10), (forcedCl % 10));
+ pBankInfo->suportedCasLatencies = 0x10;
+ }
+
+ return pBankInfo->suportedCasLatencies;
+ }
+
+ /* go over the supported cas mask from Max Cas down and check if the */
+ /* SysClk stands in its time requirments. */
+
+ DB(mvOsPrintf("Dram: minCasCalc supported mask = %x busClkPs = %x \n",
+ pBankInfo->suportedCasLatencies,busClkPs ));
+ count = 1;
+ for(j = 7; j > 0; j--)
+ {
+ if((pBankInfo->suportedCasLatencies >> j) & BIT0 )
+ {
+ /* Reset the bits for CL incompatible for the sysClk */
+ switch (count)
+ {
+ case 1:
+ if (pBankInfo->minCycleTimeAtMaxCasLatPs > busClkPs)
+ pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ case 2:
+ if (pBankInfo->minCycleTimeAtMaxCasLatMinus1Ps > busClkPs)
+ pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ case 3:
+ if (pBankInfo->minCycleTimeAtMaxCasLatMinus2Ps > busClkPs)
+ pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ default:
+ pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+ break;
+ }
+ }
+ }
+
+ DB(mvOsPrintf("Dram: minCasCalc support = %x (after SysCC calc)\n",
+ pBankInfo->suportedCasLatencies ));
+
+ count = 1;
+ DB(mvOsPrintf("Dram2: minCasCalc supported mask = %x busClkPs = %x \n",
+ pBankInfo2->suportedCasLatencies,busClkPs ));
+ for(j = 7; j > 0; j--)
+ {
+ if((pBankInfo2->suportedCasLatencies >> j) & BIT0 )
+ {
+ /* Reset the bits for CL incompatible for the sysClk */
+ switch (count)
+ {
+ case 1:
+ if (pBankInfo2->minCycleTimeAtMaxCasLatPs > busClkPs)
+ pBankInfo2->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ case 2:
+ if (pBankInfo2->minCycleTimeAtMaxCasLatMinus1Ps > busClkPs)
+ pBankInfo2->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ case 3:
+ if (pBankInfo2->minCycleTimeAtMaxCasLatMinus2Ps > busClkPs)
+ pBankInfo2->suportedCasLatencies &= ~(BIT0 << j);
+ count++;
+ break;
+ default:
+ pBankInfo2->suportedCasLatencies &= ~(BIT0 << j);
+ break;
+ }
+ }
+ }
+
+ DB(mvOsPrintf("Dram2: minCasCalc support = %x (after SysCC calc)\n",
+ pBankInfo2->suportedCasLatencies ));
+
+ startBit = 3; /* DDR2 support CL start with CL3 (bit 3) */
+ stopBit = 6; /* DDR2 support CL stops with CL6 (bit 6) */
+
+ for(j = startBit; j <= stopBit ; j++)
+ {
+ if((pBankInfo->suportedCasLatencies >> j) & BIT0 )
+ {
+ DB(mvOsPrintf("Dram: minCasCalc choose CAS %x \n",(BIT0 << j)));
+ minCas0 = (BIT0 << j);
+ break;
+ }
+ }
+
+ for(j = startBit; j <= stopBit ; j++)
+ {
+ if((pBankInfo2->suportedCasLatencies >> j) & BIT0 )
+ {
+ DB(mvOsPrintf("Dram: minCasCalc choose CAS %x \n",(BIT0 << j)));
+ minCas2 = (BIT0 << j);
+ break;
+ }
+ }
+
+ if (minCas2 > minCas0)
+ return minCas2;
+ else
+ return minCas0;
+
+ return 0;
+}
+
+/*******************************************************************************
+* sdramConfigRegCalc - Calculate sdram config register
+*
+* DESCRIPTION: Calculate sdram config register optimized value based
+* on the bank info parameters.
+*
+* INPUT:
+* busClk - the DRAM bus Clock.
+* pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram config reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramConfigRegCalc(MV_DRAM_BANK_INFO *pBankInfo,MV_DRAM_BANK_INFO *pBankInfo2, MV_U32 busClk)
+{
+ MV_U32 sdramConfig = 0;
+ MV_U32 refreshPeriod;
+
+ busClk /= 1000000; /* we work with busClk in MHz */
+
+ sdramConfig = MV_REG_READ(SDRAM_CONFIG_REG);
+
+ /* figure out the memory refresh internal */
+ switch (pBankInfo->refreshInterval & 0xf)
+ {
+ case 0x0: /* refresh period is 15.625 usec */
+ refreshPeriod = 15625;
+ break;
+ case 0x1: /* refresh period is 3.9 usec */
+ refreshPeriod = 3900;
+ break;
+ case 0x2: /* refresh period is 7.8 usec */
+ refreshPeriod = 7800;
+ break;
+ case 0x3: /* refresh period is 31.3 usec */
+ refreshPeriod = 31300;
+ break;
+ case 0x4: /* refresh period is 62.5 usec */
+ refreshPeriod = 62500;
+ break;
+ case 0x5: /* refresh period is 125 usec */
+ refreshPeriod = 125000;
+ break;
+ default: /* refresh period undefined */
+ mvOsPrintf("Dram: ERR. DRAM refresh period is unknown!\n");
+ return -1;
+ }
+
+ /* Now the refreshPeriod is in register format value */
+ refreshPeriod = (busClk * refreshPeriod) / 1000;
+
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc calculated refresh interval %0x\n",
+ refreshPeriod));
+
+ /* make sure the refresh value is only 14 bits */
+ if(refreshPeriod > SDRAM_REFRESH_MAX)
+ {
+ refreshPeriod = SDRAM_REFRESH_MAX;
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc adjusted refresh interval %0x\n",
+ refreshPeriod));
+ }
+
+ /* Clear the refresh field */
+ sdramConfig &= ~SDRAM_REFRESH_MASK;
+
+ /* Set new value to refresh field */
+ sdramConfig |= (refreshPeriod & SDRAM_REFRESH_MASK);
+
+ /* registered DRAM ? */
+ if ( pBankInfo->registeredAddrAndControlInputs )
+ {
+ /* it's registered DRAM, so set the reg. DRAM bit */
+ sdramConfig |= SDRAM_REGISTERED;
+ DB(mvOsPrintf("DRAM Attribute: Registered address and control inputs.\n");)
+ }
+
+ /* ECC and IERR support */
+ sdramConfig &= ~SDRAM_ECC_MASK; /* Clear ECC field */
+ sdramConfig &= ~SDRAM_IERR_MASK; /* Clear IErr field */
+
+ if ( pBankInfo->errorCheckType )
+ {
+ sdramConfig |= SDRAM_ECC_EN;
+ sdramConfig |= SDRAM_IERR_REPORTE;
+ DB(mvOsPrintf("Dram: mvDramIfDetect Enabling ECC\n"));
+ }
+ else
+ {
+ sdramConfig |= SDRAM_ECC_DIS;
+ sdramConfig |= SDRAM_IERR_IGNORE;
+ DB(mvOsPrintf("Dram: mvDramIfDetect Disabling ECC!\n"));
+ }
+ /* Set static default settings */
+ sdramConfig |= SDRAM_CONFIG_DV;
+
+ DB(mvOsPrintf("Dram: sdramConfigRegCalc set sdramConfig to 0x%x\n",
+ sdramConfig));
+
+ return sdramConfig;
+}
+
+/*******************************************************************************
+* sdramModeRegCalc - Calculate sdram mode register
+*
+* DESCRIPTION: Calculate sdram mode register optimized value based
+* on the bank info parameters and the minCas.
+*
+* INPUT:
+* minCas - minimum CAS supported.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram mode reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramModeRegCalc(MV_U32 minCas)
+{
+ MV_U32 sdramMode;
+
+ sdramMode = MV_REG_READ(SDRAM_MODE_REG);
+
+ /* Clear CAS Latency field */
+ sdramMode &= ~SDRAM_CL_MASK;
+
+ DB(mvOsPrintf("DRAM CAS Latency ");)
+
+ switch (minCas)
+ {
+ case DDR2_CL_3:
+ sdramMode |= SDRAM_DDR2_CL_3;
+ DB(mvOsPrintf("3.\n");)
+ break;
+ case DDR2_CL_4:
+ sdramMode |= SDRAM_DDR2_CL_4;
+ DB(mvOsPrintf("4.\n");)
+ break;
+ case DDR2_CL_5:
+ sdramMode |= SDRAM_DDR2_CL_5;
+ DB(mvOsPrintf("5.\n");)
+ break;
+ case DDR2_CL_6:
+ sdramMode |= SDRAM_DDR2_CL_6;
+ DB(mvOsPrintf("6.\n");)
+ break;
+ default:
+ mvOsOutput("\nsdramModeRegCalc ERROR: Max. CL out of range\n");
+ return -1;
+ }
+
+ DB(mvOsPrintf("\nsdramModeRegCalc register 0x%x\n", sdramMode ));
+
+ return sdramMode;
+}
+/*******************************************************************************
+* sdramExtModeRegCalc - Calculate sdram Extended mode register
+*
+* DESCRIPTION:
+* Return sdram Extended mode register value based
+* on the bank info parameters and bank presence.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+* busClk - DRAM frequency
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram Extended mode reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramExtModeRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk)
+{
+ MV_U32 populateBanks = 0;
+ int bankNum;
+
+ /* Represent the populate banks in binary form */
+ for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ if (0 != pBankInfo[bankNum].size)
+ {
+ populateBanks |= (1 << bankNum);
+ }
+ }
+
+ switch(populateBanks)
+ {
+ case(BANK_PRESENT_CS0):
+ case(BANK_PRESENT_CS0_CS1):
+ return DDR_SDRAM_EXT_MODE_CS0_CS1_DV;
+
+ case(BANK_PRESENT_CS0_CS2):
+ case(BANK_PRESENT_CS0_CS1_CS2):
+ case(BANK_PRESENT_CS0_CS2_CS3):
+ case(BANK_PRESENT_CS0_CS2_CS3_CS4):
+ if (busClk >= MV_BOARD_SYSCLK_267MHZ)
+ return DDR_SDRAM_EXT_MODE_FAST_CS0_CS1_CS2_CS3_DV;
+ else
+ return DDR_SDRAM_EXT_MODE_CS0_CS1_CS2_CS3_DV;
+
+ default:
+ mvOsOutput("sdramExtModeRegCalc: Invalid DRAM bank presence\n");
+ return -1;
+ }
+ return 0;
+}
+
+/*******************************************************************************
+* dunitCtrlLowRegCalc - Calculate sdram dunit control low register
+*
+* DESCRIPTION: Calculate sdram dunit control low register optimized value based
+* on the bank info parameters and the minCas.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+* minCas - minimum CAS supported.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram dunit control low reg value.
+*
+*******************************************************************************/
+static MV_U32 dunitCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas, MV_U32 busClk, MV_STATUS TTMode)
+{
+ MV_U32 dunitCtrlLow, cl;
+ MV_U32 sbOutR[4]={3,5,7,9} ;
+ MV_U32 sbOutU[4]={1,3,5,7} ;
+
+ dunitCtrlLow = MV_REG_READ(SDRAM_DUNIT_CTRL_REG);
+
+ DB(mvOsPrintf("Dram: dunitCtrlLowRegCalc\n"));
+
+ /* Clear StBurstOutDel field */
+ dunitCtrlLow &= ~SDRAM_SB_OUT_MASK;
+
+ /* Clear StBurstInDel field */
+ dunitCtrlLow &= ~SDRAM_SB_IN_MASK;
+
+ /* Clear CtrlPos field */
+ dunitCtrlLow &= ~SDRAM_CTRL_POS_MASK;
+
+ /* Clear 2T field */
+ dunitCtrlLow &= ~SDRAM_2T_MASK;
+ if (TTMode == MV_TRUE)
+ {
+ dunitCtrlLow |= SDRAM_2T_MODE;
+ }
+
+ /* For proper sample of read data set the Dunit Control register's */
+ /* stBurstInDel bits [27:24] */
+ /* 200MHz - 267MHz None reg = CL + 1 */
+ /* 200MHz - 267MHz reg = CL + 2 */
+ /* > 267MHz None reg = CL + 2 */
+ /* > 267MHz reg = CL + 3 */
+
+ /* For proper sample of read data set the Dunit Control register's */
+ /* stBurstOutDel bits [23:20] */
+ /********-********-********-********-
+ * CL=3 | CL=4 | CL=5 | CL=6 |
+ *********-********-********-********-
+ Not Reg. * 0001 | 0011 | 0101 | 0111 |
+ *********-********-********-********-
+ Registered * 0011 | 0101 | 0111 | 1001 |
+ *********-********-********-********/
+
+ /* Set Dunit Control low default value */
+ dunitCtrlLow |= SDRAM_DUNIT_CTRL_LOW_DDR2_DV;
+
+ switch (minCas)
+ {
+ case DDR2_CL_3: cl = 3; break;
+ case DDR2_CL_4: cl = 4; break;
+ case DDR2_CL_5: cl = 5; break;
+ case DDR2_CL_6: cl = 6; break;
+ default:
+ mvOsOutput("Dram: dunitCtrlLowRegCalc Max. CL out of range %d\n", minCas);
+ return -1;
+ }
+
+ /* registerd DDR SDRAM? */
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ {
+ dunitCtrlLow |= (sbOutR[cl-3]) << SDRAM_SB_OUT_DEL_OFFS;
+ }
+ else
+ {
+ dunitCtrlLow |= (sbOutU[cl-3]) << SDRAM_SB_OUT_DEL_OFFS;
+ }
+
+ DB(mvOsPrintf("\n\ndunitCtrlLowRegCalc: CL = %d, frequencies=%d\n", cl, busClk));
+
+ if (busClk <= MV_BOARD_SYSCLK_267MHZ)
+ {
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ cl = cl + 2;
+ else
+ cl = cl + 1;
+ }
+ else
+ {
+ if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+ cl = cl + 3;
+ else
+ cl = cl + 2;
+ }
+
+ DB(mvOsPrintf("dunitCtrlLowRegCalc: SDRAM_SB_IN_DEL_OFFS = %d \n", cl));
+ dunitCtrlLow |= cl << SDRAM_SB_IN_DEL_OFFS;
+
+ DB(mvOsPrintf("Dram: Reg dunit control low = %x\n", dunitCtrlLow ));
+
+ return dunitCtrlLow;
+}
+
+/*******************************************************************************
+* dunitCtrlHighRegCalc - Calculate sdram dunit control high register
+*
+* DESCRIPTION: Calculate sdram dunit control high register optimized value based
+* on the bus clock.
+*
+* INPUT:
+* busClk - DRAM frequency.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram dunit control high reg value.
+*
+*******************************************************************************/
+static MV_U32 dunitCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk)
+{
+ MV_U32 dunitCtrlHigh;
+ dunitCtrlHigh = MV_REG_READ(SDRAM_DUNIT_CTRL_HI_REG);
+ if(busClk > MV_BOARD_SYSCLK_300MHZ)
+ dunitCtrlHigh |= SDRAM__P2D_EN;
+ else
+ dunitCtrlHigh &= ~SDRAM__P2D_EN;
+
+ if(busClk > MV_BOARD_SYSCLK_267MHZ)
+ dunitCtrlHigh |= (SDRAM__WR_MESH_DELAY_EN | SDRAM__PUP_ZERO_SKEW_EN | SDRAM__ADD_HALF_FCC_EN);
+
+ /* If ECC support we turn on D2P sample */
+ dunitCtrlHigh &= ~SDRAM__D2P_EN; /* Clear D2P bit */
+ if (( pBankInfo->errorCheckType ) && (busClk > MV_BOARD_SYSCLK_267MHZ))
+ dunitCtrlHigh |= SDRAM__D2P_EN;
+
+ return dunitCtrlHigh;
+}
+
+/*******************************************************************************
+* sdramAddrCtrlRegCalc - Calculate sdram address control register
+*
+* DESCRIPTION: Calculate sdram address control register optimized value based
+* on the bank info parameters and the minCas.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram address control reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramAddrCtrlRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_DRAM_BANK_INFO *pBankInfoDIMM1)
+{
+ MV_U32 addrCtrl = 0;
+
+ if (pBankInfoDIMM1->size)
+ {
+ switch (pBankInfoDIMM1->sdramWidth)
+ {
+ case 4: /* memory is x4 */
+ mvOsOutput("sdramAddrCtrlRegCalc: Error - x4 not supported!\n");
+ return -1;
+ break;
+ case 8: /* memory is x8 */
+ addrCtrl |= SDRAM_ADDRSEL_X8(2) | SDRAM_ADDRSEL_X8(3);
+ DB(mvOsPrintf("sdramAddrCtrlRegCalc: sdramAddrCtrlRegCalc SDRAM device DIMM2 width x8\n"));
+ break;
+ case 16:
+ addrCtrl |= SDRAM_ADDRSEL_X16(2) | SDRAM_ADDRSEL_X16(3);
+ DB(mvOsPrintf("sdramAddrCtrlRegCalc: sdramAddrCtrlRegCalc SDRAM device DIMM2 width x16\n"));
+ break;
+ default: /* memory width unsupported */
+ mvOsOutput("sdramAddrCtrlRegCalc: ERR. DRAM chip width is unknown!\n");
+ return -1;
+ }
+ }
+
+ switch (pBankInfo->sdramWidth)
+ {
+ case 4: /* memory is x4 */
+ mvOsOutput("sdramAddrCtrlRegCalc: Error - x4 not supported!\n");
+ return -1;
+ break;
+ case 8: /* memory is x8 */
+ addrCtrl |= SDRAM_ADDRSEL_X8(0) | SDRAM_ADDRSEL_X8(1);
+ DB(mvOsPrintf("sdramAddrCtrlRegCalc: sdramAddrCtrlRegCalc SDRAM device width x8\n"));
+ break;
+ case 16:
+ addrCtrl |= SDRAM_ADDRSEL_X16(0) | SDRAM_ADDRSEL_X16(1);
+ DB(mvOsPrintf("sdramAddrCtrlRegCalc: sdramAddrCtrlRegCalc SDRAM device width x16\n"));
+ break;
+ default: /* memory width unsupported */
+ mvOsOutput("sdramAddrCtrlRegCalc: ERR. DRAM chip width is unknown!\n");
+ return -1;
+ }
+
+ /* Note that density is in MB units */
+ switch (pBankInfo->deviceDensity)
+ {
+ case 256: /* 256 Mbit */
+ DB(mvOsPrintf("DRAM Device Density 256Mbit\n"));
+ addrCtrl |= SDRAM_DSIZE_256Mb(0) | SDRAM_DSIZE_256Mb(1);
+ break;
+ case 512: /* 512 Mbit */
+ DB(mvOsPrintf("DRAM Device Density 512Mbit\n"));
+ addrCtrl |= SDRAM_DSIZE_512Mb(0) | SDRAM_DSIZE_512Mb(1);
+ break;
+ case 1024: /* 1 Gbit */
+ DB(mvOsPrintf("DRAM Device Density 1Gbit\n"));
+ addrCtrl |= SDRAM_DSIZE_1Gb(0) | SDRAM_DSIZE_1Gb(1);
+ break;
+ case 2048: /* 2 Gbit */
+ DB(mvOsPrintf("DRAM Device Density 2Gbit\n"));
+ addrCtrl |= SDRAM_DSIZE_2Gb(0) | SDRAM_DSIZE_2Gb(1);
+ break;
+ default:
+ mvOsOutput("Dram: sdramAddrCtrl unsupported RAM-Device size %d\n",
+ pBankInfo->deviceDensity);
+ return -1;
+ }
+
+ if (pBankInfoDIMM1->size)
+ {
+ switch (pBankInfoDIMM1->deviceDensity)
+ {
+ case 256: /* 256 Mbit */
+ DB(mvOsPrintf("DIMM2: DRAM Device Density 256Mbit\n"));
+ addrCtrl |= SDRAM_DSIZE_256Mb(2) | SDRAM_DSIZE_256Mb(3);
+ break;
+ case 512: /* 512 Mbit */
+ DB(mvOsPrintf("DIMM2: DRAM Device Density 512Mbit\n"));
+ addrCtrl |= SDRAM_DSIZE_512Mb(2) | SDRAM_DSIZE_512Mb(3);
+ break;
+ case 1024: /* 1 Gbit */
+ DB(mvOsPrintf("DIMM2: DRAM Device Density 1Gbit\n"));
+ addrCtrl |= SDRAM_DSIZE_1Gb(2) | SDRAM_DSIZE_1Gb(3);
+ break;
+ case 2048: /* 2 Gbit */
+ DB(mvOsPrintf("DIMM2: DRAM Device Density 2Gbit\n"));
+ addrCtrl |= SDRAM_DSIZE_2Gb(2) | SDRAM_DSIZE_2Gb(3);
+ break;
+ default:
+ mvOsOutput("DIMM2: Dram: sdramAddrCtrl unsupported RAM-Device size %d\n",
+ pBankInfoDIMM1->deviceDensity);
+ return -1;
+ }
+ }
+ /* SDRAM address control */
+ DB(mvOsPrintf("Dram: setting sdram address control with: %x \n", addrCtrl));
+
+ return addrCtrl;
+}
+
+/*******************************************************************************
+* sdramTimeCtrlLowRegCalc - Calculate sdram timing control low register
+*
+* DESCRIPTION:
+* This function calculates sdram timing control low register
+* optimized value based on the bank info parameters and the minCas.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+* minCas - minimum CAS supported.
+* busClk - Bus clock
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram timing control low reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramTimeCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas, MV_U32 busClk)
+{
+ MV_U32 tRp = 0;
+ MV_U32 tRrd = 0;
+ MV_U32 tRcd = 0;
+ MV_U32 tRas = 0;
+ MV_U32 tWr = 0;
+ MV_U32 tWtr = 0;
+ MV_U32 tRtp = 0;
+ MV_U32 timeCtrlLow = 0;
+
+ MV_U32 bankNum;
+
+ busClk = busClk / 1000000; /* In MHz */
+
+ /* Scan all DRAM banks to find maximum timing values */
+ for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ tRp = MV_MAX(tRp, pBankInfo[bankNum].minRowPrechargeTime);
+ tRrd = MV_MAX(tRrd, pBankInfo[bankNum].minRowActiveToRowActive);
+ tRcd = MV_MAX(tRcd, pBankInfo[bankNum].minRasToCasDelay);
+ tRas = MV_MAX(tRas, pBankInfo[bankNum].minRasPulseWidth);
+ }
+
+ /* Extract timing (in ns) from SPD value. We ignore the tenth ns part. */
+ /* by shifting the data two bits right. */
+ tRp = tRp >> 2; /* For example 0x50 -> 20ns */
+ tRrd = tRrd >> 2;
+ tRcd = tRcd >> 2;
+
+ /* Extract clock cycles from time parameter. We need to round up */
+ tRp = ((busClk * tRp) / 1000) + (((busClk * tRp) % 1000) ? 1 : 0);
+ DB(mvOsPrintf("Dram Timing Low: tRp = %d ", tRp));
+ tRrd = ((busClk * tRrd) / 1000) + (((busClk * tRrd) % 1000) ? 1 : 0);
+ /* JEDEC min reqeirments tRrd = 2 */
+ if (tRrd < 2)
+ tRrd = 2;
+ DB(mvOsPrintf("tRrd = %d ", tRrd));
+ tRcd = ((busClk * tRcd) / 1000) + (((busClk * tRcd) % 1000) ? 1 : 0);
+ DB(mvOsPrintf("tRcd = %d ", tRcd));
+ tRas = ((busClk * tRas) / 1000) + (((busClk * tRas) % 1000) ? 1 : 0);
+ DB(mvOsPrintf("tRas = %d ", tRas));
+
+ /* tWr and tWtr is different for DDR1 and DDR2. tRtp is only for DDR2 */
+ /* Scan all DRAM banks to find maximum timing values */
+ for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ tWr = MV_MAX(tWr, pBankInfo[bankNum].minWriteRecoveryTime);
+ tWtr = MV_MAX(tWtr, pBankInfo[bankNum].minWriteToReadCmdDelay);
+ tRtp = MV_MAX(tRtp, pBankInfo[bankNum].minReadToPrechCmdDelay);
+ }
+
+ /* Extract timing (in ns) from SPD value. We ignore the tenth ns */
+ /* part by shifting the data two bits right. */
+ tWr = tWr >> 2; /* For example 0x50 -> 20ns */
+ tWtr = tWtr >> 2;
+ tRtp = tRtp >> 2;
+ /* Extract clock cycles from time parameter. We need to round up */
+ tWr = ((busClk * tWr) / 1000) + (((busClk * tWr) % 1000) ? 1 : 0);
+ DB(mvOsPrintf("tWr = %d ", tWr));
+ tWtr = ((busClk * tWtr) / 1000) + (((busClk * tWtr) % 1000) ? 1 : 0);
+ /* JEDEC min reqeirments tWtr = 2 */
+ if (tWtr < 2)
+ tWtr = 2;
+ DB(mvOsPrintf("tWtr = %d ", tWtr));
+ tRtp = ((busClk * tRtp) / 1000) + (((busClk * tRtp) % 1000) ? 1 : 0);
+ /* JEDEC min reqeirments tRtp = 2 */
+ if (tRtp < 2)
+ tRtp = 2;
+ DB(mvOsPrintf("tRtp = %d ", tRtp));
+
+ /* Note: value of 0 in register means one cycle, 1 means two and so on */
+ timeCtrlLow = (((tRp - 1) << SDRAM_TRP_OFFS) |
+ ((tRrd - 1) << SDRAM_TRRD_OFFS) |
+ ((tRcd - 1) << SDRAM_TRCD_OFFS) |
+ (((tRas - 1) << SDRAM_TRAS_OFFS) & SDRAM_TRAS_MASK)|
+ ((tWr - 1) << SDRAM_TWR_OFFS) |
+ ((tWtr - 1) << SDRAM_TWTR_OFFS) |
+ ((tRtp - 1) << SDRAM_TRTP_OFFS));
+
+ /* Check extended tRas bit */
+ if ((tRas - 1) & BIT4)
+ timeCtrlLow |= (1 << SDRAM_EXT_TRAS_OFFS);
+
+ return timeCtrlLow;
+}
+
+/*******************************************************************************
+* sdramTimeCtrlHighRegCalc - Calculate sdram timing control high register
+*
+* DESCRIPTION:
+* This function calculates sdram timing control high register
+* optimized value based on the bank info parameters and the bus clock.
+*
+* INPUT:
+* pBankInfo - sdram bank parameters
+* busClk - Bus clock
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* sdram timing control high reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramTimeCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk)
+{
+ MV_U32 tRfc;
+ MV_U32 timingHigh;
+ MV_U32 timeNs = 0;
+ MV_U32 bankNum;
+
+ busClk = busClk / 1000000; /* In MHz */
+
+ /* Set DDR timing high register static configuration bits */
+ timingHigh = MV_REG_READ(SDRAM_TIMING_CTRL_HIGH_REG);
+
+ /* Set DDR timing high register default value */
+ timingHigh |= SDRAM_TIMING_CTRL_HIGH_REG_DV;
+
+ /* Clear tRfc field */
+ timingHigh &= ~SDRAM_TRFC_MASK;
+
+ /* Scan all DRAM banks to find maximum timing values */
+ for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ timeNs = MV_MAX(timeNs, pBankInfo[bankNum].minRefreshToActiveCmd);
+ DB(mvOsPrintf("Dram: Timing High: minRefreshToActiveCmd = %d\n",
+ pBankInfo[bankNum].minRefreshToActiveCmd));
+ }
+ if(busClk >= 333 && mvCtrlModelGet() == MV_78XX0_A1_REV)
+ {
+ timingHigh |= 0x1 << SDRAM_TR2W_W2R_OFFS;
+ }
+
+ tRfc = ((busClk * timeNs) / 1000) + (((busClk * timeNs) % 1000) ? 1 : 0);
+ /* Note: value of 0 in register means one cycle, 1 means two and so on */
+ DB(mvOsPrintf("Dram: Timing High: tRfc = %d\n", tRfc));
+ timingHigh |= (((tRfc - 1) & SDRAM_TRFC_MASK) << SDRAM_TRFC_OFFS);
+ DB(mvOsPrintf("Dram: Timing High: tRfc = %d\n", tRfc));
+
+ /* SDRAM timing high */
+ DB(mvOsPrintf("Dram: setting timing high with: %x \n", timingHigh));
+
+ return timingHigh;
+}
+/*******************************************************************************
+* sdramDDr2OdtConfig - Set DRAM DDR2 On Die Termination registers.
+*
+* DESCRIPTION:
+* This function config DDR2 On Die Termination (ODT) registers.
+*
+* INPUT:
+* pBankInfo - bank info parameters.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* None
+*******************************************************************************/
+static void sdramDDr2OdtConfig(MV_DRAM_BANK_INFO *pBankInfo)
+{
+ MV_U32 populateBanks = 0;
+ MV_U32 odtCtrlLow, odtCtrlHigh, dunitOdtCtrl;
+ int bankNum;
+
+ /* Represent the populate banks in binary form */
+ for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+ {
+ if (0 != pBankInfo[bankNum].size)
+ {
+ populateBanks |= (1 << bankNum);
+ }
+ }
+
+ switch(populateBanks)
+ {
+ case(BANK_PRESENT_CS0):
+ case(BANK_PRESENT_CS0_CS1):
+ odtCtrlLow = DDR2_ODT_CTRL_LOW_CS0_CS1_DV;
+ odtCtrlHigh = DDR2_ODT_CTRL_HIGH_CS0_CS1_DV;
+ dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS1_DV;
+ break;
+ case(BANK_PRESENT_CS0_CS2):
+ case(BANK_PRESENT_CS0_CS1_CS2):
+ case(BANK_PRESENT_CS0_CS2_CS3):
+ case(BANK_PRESENT_CS0_CS2_CS3_CS4):
+ odtCtrlLow = DDR2_ODT_CTRL_LOW_CS0_CS1_CS2_CS3_DV;
+ odtCtrlHigh = DDR2_ODT_CTRL_HIGH_CS0_CS1_CS2_CS3_DV;
+ dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS1_CS2_CS3_DV;
+ break;
+ default:
+ DB(mvOsPrintf("sdramDDr2OdtConfig: Invalid DRAM bank presence\n"));
+ return;
+ }
+ /* DDR2 SDRAM ODT ctrl low */
+ DB(mvOsPrintf("Dram: DDR2 setting ODT ctrl low with: %x \n", odtCtrlLow));
+ MV_REG_WRITE(DRAM_BUF_REG7, odtCtrlLow);
+
+ /* DDR2 SDRAM ODT ctrl high */
+ DB(mvOsPrintf("Dram: DDR2 setting ODT ctrl high with: %x \n", odtCtrlHigh));
+ MV_REG_WRITE(DRAM_BUF_REG8, odtCtrlHigh);
+
+ /* DDR2 DUNIT ODT ctrl */
+ if ( ((mvCtrlModelGet() == MV_78XX0_DEV_ID) && (mvCtrlRevGet() == MV_78XX0_Y0_REV)) ||
+ (mvCtrlModelGet() == MV_76100_DEV_ID) ||
+ (mvCtrlModelGet() == MV_78100_DEV_ID) ||
+ (mvCtrlModelGet() == MV_78200_DEV_ID) )
+ dunitOdtCtrl &= ~(BIT9|BIT8); /* Clear ODT always on */
+
+ DB(mvOsPrintf("DUNIT: DDR2 setting ODT ctrl with: %x \n", dunitOdtCtrl));
+ MV_REG_WRITE(DRAM_BUF_REG9, dunitOdtCtrl);
+ return;
+}
+/*******************************************************************************
+* sdramDdr2TimeLoRegCalc - Set DDR2 DRAM Timing Low registers.
+*
+* DESCRIPTION:
+* This function config DDR2 DRAM Timing low registers.
+*
+* INPUT:
+* minCas - minimum CAS supported.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* DDR2 sdram timing low reg value.
+*******************************************************************************/
+static MV_U32 sdramDdr2TimeLoRegCalc(MV_U32 minCas)
+{
+ MV_U8 cl = -1;
+ MV_U32 ddr2TimeLoReg;
+
+ /* read and clear the feilds we are going to set */
+ ddr2TimeLoReg = MV_REG_READ(SDRAM_DDR2_TIMING_LO_REG);
+ ddr2TimeLoReg &= ~(SD2TLR_TODT_ON_RD_MASK |
+ SD2TLR_TODT_OFF_RD_MASK |
+ SD2TLR_TODT_ON_CTRL_RD_MASK |
+ SD2TLR_TODT_OFF_CTRL_RD_MASK);
+
+ if( minCas == DDR2_CL_3 )
+ {
+ cl = 3;
+ }
+ else if( minCas == DDR2_CL_4 )
+ {
+ cl = 4;
+ }
+ else if( minCas == DDR2_CL_5 )
+ {
+ cl = 5;
+ }
+ else if( minCas == DDR2_CL_6 )
+ {
+ cl = 6;
+ }
+ else
+ {
+ DB(mvOsPrintf("sdramDdr2TimeLoRegCalc: CAS latency %d unsupported. using CAS latency 4\n",
+ minCas));
+ cl = 4;
+ }
+
+ ddr2TimeLoReg |= ((cl-3) << SD2TLR_TODT_ON_RD_OFFS);
+ ddr2TimeLoReg |= ( cl << SD2TLR_TODT_OFF_RD_OFFS);
+ ddr2TimeLoReg |= ( cl << SD2TLR_TODT_ON_CTRL_RD_OFFS);
+ ddr2TimeLoReg |= ((cl+3) << SD2TLR_TODT_OFF_CTRL_RD_OFFS);
+
+ /* DDR2 SDRAM timing low */
+ DB(mvOsPrintf("Dram: DDR2 setting timing low with: %x \n", ddr2TimeLoReg));
+
+ return ddr2TimeLoReg;
+}
+
+/*******************************************************************************
+* sdramDdr2TimeHiRegCalc - Set DDR2 DRAM Timing High registers.
+*
+* DESCRIPTION:
+* This function config DDR2 DRAM Timing high registers.
+*
+* INPUT:
+* minCas - minimum CAS supported.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* DDR2 sdram timing high reg value.
+*******************************************************************************/
+static MV_U32 sdramDdr2TimeHiRegCalc(MV_U32 minCas)
+{
+ MV_U8 cl = -1;
+ MV_U32 ddr2TimeHiReg;
+
+ /* read and clear the feilds we are going to set */
+ ddr2TimeHiReg = MV_REG_READ(SDRAM_DDR2_TIMING_HI_REG);
+ ddr2TimeHiReg &= ~(SD2THR_TODT_ON_WR_MASK |
+ SD2THR_TODT_OFF_WR_MASK |
+ SD2THR_TODT_ON_CTRL_WR_MASK |
+ SD2THR_TODT_OFF_CTRL_WR_MASK);
+
+ if( minCas == DDR2_CL_3 )
+ {
+ cl = 3;
+ }
+ else if( minCas == DDR2_CL_4 )
+ {
+ cl = 4;
+ }
+ else if( minCas == DDR2_CL_5 )
+ {
+ cl = 5;
+ }
+ else if( minCas == DDR2_CL_6 )
+ {
+ cl = 6;
+ }
+ else
+ {
+ mvOsOutput("sdramDdr2TimeHiRegCalc: CAS latency %d unsupported. using CAS latency 4\n",
+ minCas);
+ cl = 4;
+ }
+
+ ddr2TimeHiReg |= ((cl-3) << SD2THR_TODT_ON_WR_OFFS);
+ ddr2TimeHiReg |= ( cl << SD2THR_TODT_OFF_WR_OFFS);
+ ddr2TimeHiReg |= ( cl << SD2THR_TODT_ON_CTRL_WR_OFFS);
+ ddr2TimeHiReg |= ((cl+3) << SD2THR_TODT_OFF_CTRL_WR_OFFS);
+
+ /* DDR2 SDRAM timin high */
+ DB(mvOsPrintf("Dram: DDR2 setting timing high with: %x \n", ddr2TimeHiReg));
+
+ return ddr2TimeHiReg;
+}
+#endif
+
+/*******************************************************************************
+* mvDramIfCalGet - Get CAS Latency
+*
+* DESCRIPTION:
+* This function get the CAS Latency.
+*
+* INPUT:
+* None
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* CAS latency times 10 (to avoid using floating point).
+*
+*******************************************************************************/
+MV_U32 mvDramIfCalGet(void)
+{
+ MV_U32 sdramCasLat, casLatMask;
+
+ casLatMask = (MV_REG_READ(SDRAM_MODE_REG) & SDRAM_CL_MASK);
+
+ switch (casLatMask)
+ {
+ case SDRAM_DDR2_CL_3:
+ sdramCasLat = 30;
+ break;
+ case SDRAM_DDR2_CL_4:
+ sdramCasLat = 40;
+ break;
+ case SDRAM_DDR2_CL_5:
+ sdramCasLat = 50;
+ break;
+ case SDRAM_DDR2_CL_6:
+ sdramCasLat = 60;
+ break;
+ default:
+ mvOsOutput("mvDramIfCalGet: Err, unknown DDR2 CAL\n");
+ return -1;
+ }
+
+ return sdramCasLat;
+}
+
+
+/*******************************************************************************
+* mvDramIfSelfRefreshSet - Put the dram in self refresh mode -
+*
+* DESCRIPTION:
+* add support in power management.
+*
+*
+* INPUT:
+* None
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* None
+*
+*******************************************************************************/
+
+MV_VOID mvDramIfSelfRefreshSet()
+{
+ MV_U32 operReg;
+
+ operReg = MV_REG_READ(SDRAM_OPERATION_REG);
+ MV_REG_WRITE(SDRAM_OPERATION_REG ,operReg |SDRAM_CMD_SLF_RFRSH);
+ /* Read until register is reset to 0 */
+ while(MV_REG_READ(SDRAM_OPERATION_REG));
+}
+/*******************************************************************************
+* mvDramIfDimGetSPDversion - return DIMM SPD version.
+*
+* DESCRIPTION:
+* This function prints the DRAM controller information.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+static void mvDramIfDimGetSPDversion(MV_U32 *pMajor, MV_U32 *pMinor, MV_U32 bankNum)
+{
+ MV_DIMM_INFO dimmInfo;
+ if (bankNum >= MV_DRAM_MAX_CS )
+ {
+ DB(mvOsPrintf("Dram: mvDramIfDimGetSPDversion bad params \n"));
+ return ;
+ }
+ memset(&dimmInfo,0,sizeof(dimmInfo));
+ if ( MV_OK != dimmSpdGet((MV_U32)(bankNum/2), &dimmInfo))
+ {
+ DB(mvOsPrintf("Dram: ERR dimmSpdGet failed to get dimm info \n"));
+ return ;
+ }
+ *pMajor = dimmInfo.spdRawData[DIMM_SPD_VERSION]/10;
+ *pMinor = dimmInfo.spdRawData[DIMM_SPD_VERSION]%10;
+}
+/*******************************************************************************
+* mvDramIfShow - Show DRAM controller information.
+*
+* DESCRIPTION:
+* This function prints the DRAM controller information.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+void mvDramIfShow(void)
+{
+ int i, sdramCasLat, sdramCsSize;
+ MV_U32 Major=0, Minor=0;
+
+ mvOsOutput("DRAM Controller info:\n");
+
+ mvOsOutput("Total DRAM ");
+ mvSizePrint(mvDramIfSizeGet());
+ mvOsOutput("\n");
+
+ for(i = 0; i < MV_DRAM_MAX_CS; i++)
+ {
+ sdramCsSize = mvDramIfBankSizeGet(i);
+ if (sdramCsSize)
+ {
+ if (0 == (i & 1))
+ {
+ mvDramIfDimGetSPDversion(&Major, &Minor,i);
+ mvOsOutput("DIMM %d version %d.%d\n", i/2, Major, Minor);
+ }
+ mvOsOutput("\tDRAM CS[%d] ", i);
+ mvSizePrint(sdramCsSize);
+ mvOsOutput("\n");
+ }
+ }
+ sdramCasLat = mvDramIfCalGet();
+
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_ECC_EN)
+ {
+ mvOsOutput("ECC enabled, ");
+ }
+ else
+ {
+ mvOsOutput("ECC Disabled, ");
+ }
+
+ if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_REGISTERED)
+ {
+ mvOsOutput("Registered DIMM\n");
+ }
+ else
+ {
+ mvOsOutput("Non registered DIMM\n");
+ }
+
+ mvOsOutput("Configured CAS Latency %d.%d\n", sdramCasLat/10, sdramCasLat%10);
+}
+/*******************************************************************************
+* mvDramIfGetFirstCS - find the DRAM bank on the lower address
+*
+*
+* DESCRIPTION:
+* This function return the fisrt CS on address 0
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* SDRAM_CS0 or SDRAM_CS2
+*
+*******************************************************************************/
+MV_U32 mvDramIfGetFirstCS(void)
+{
+ MV_DRAM_BANK_INFO bankInfo[MV_DRAM_MAX_CS];
+
+ if (DRAM_CS_Order[0] == N_A)
+ {
+ mvDramBankInfoGet(SDRAM_CS0, &bankInfo[SDRAM_CS0]);
+#ifdef MV_INCLUDE_SDRAM_CS2
+ mvDramBankInfoGet(SDRAM_CS2, &bankInfo[SDRAM_CS2]);
+#endif
+
+#ifdef MV_INCLUDE_SDRAM_CS2
+ if (bankInfo[SDRAM_CS0].size < bankInfo[SDRAM_CS2].size)
+ {
+ DRAM_CS_Order[0] = SDRAM_CS2;
+ DRAM_CS_Order[1] = SDRAM_CS3;
+ DRAM_CS_Order[2] = SDRAM_CS0;
+ DRAM_CS_Order[3] = SDRAM_CS1;
+
+ return SDRAM_CS2;
+ }
+#endif
+ DRAM_CS_Order[0] = SDRAM_CS0;
+ DRAM_CS_Order[1] = SDRAM_CS1;
+#ifdef MV_INCLUDE_SDRAM_CS2
+ DRAM_CS_Order[2] = SDRAM_CS2;
+ DRAM_CS_Order[3] = SDRAM_CS3;
+#endif
+ return SDRAM_CS0;
+ }
+ return DRAM_CS_Order[0];
+}
+/*******************************************************************************
+* mvDramIfGetCSorder -
+*
+*
+* DESCRIPTION:
+* This function return the fisrt CS on address 0
+*
+* INPUT:
+* CS number.
+*
+* OUTPUT:
+* CS order.
+*
+* RETURN:
+* SDRAM_CS0 or SDRAM_CS2
+*
+* NOTE: mvDramIfGetFirstCS must be caled before this subroutine
+*******************************************************************************/
+MV_U32 mvDramIfGetCSorder(MV_U32 csOrder )
+{
+ return DRAM_CS_Order[csOrder];
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h
new file mode 100644
index 000000000..23f2e540c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h
@@ -0,0 +1,172 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfh
+#define __INCmvDramIfh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* includes */
+#include "ddr2/mvDramIfRegs.h"
+#include "ddr2/mvDramIfConfig.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* defines */
+/* DRAM Timing parameters */
+#define SDRAM_TWR 15 /* ns tWr */
+#define SDRAM_TRFC_64_512M_AT_200MHZ 70 /* ns tRfc for dens 64-512 @ 200MHz */
+#define SDRAM_TRFC_64_512M 75 /* ns tRfc for dens 64-512 */
+#define SDRAM_TRFC_1G 120 /* ns tRfc for dens 1GB */
+#define SDRAM_TR2R_CYC 1 /* cycle for tR2r */
+
+#define CAL_AUTO_DETECT 0 /* Do not force CAS latancy (mvDramIfDetect) */
+#define ECC_DISABLE 1 /* Force ECC to Disable */
+#define ECC_ENABLE 0 /* Force ECC to ENABLE */
+/* typedefs */
+
+/* enumeration for memory types */
+typedef enum _mvMemoryType
+{
+ MEM_TYPE_SDRAM,
+ MEM_TYPE_DDR1,
+ MEM_TYPE_DDR2
+}MV_MEMORY_TYPE;
+
+/* enumeration for DDR2 supported CAS Latencies */
+typedef enum _mvDimmDdr2Cas
+{
+ DDR2_CL_3 = 0x08,
+ DDR2_CL_4 = 0x10,
+ DDR2_CL_5 = 0x20,
+ DDR2_CL_6 = 0x40,
+ DDR2_CL_FAULT
+} MV_DIMM_DDR2_CAS;
+
+
+typedef struct _mvDramBankInfo
+{
+ MV_MEMORY_TYPE memoryType; /* DDR1, DDR2 or SDRAM */
+
+ /* DIMM dimensions */
+ MV_U32 numOfRowAddr;
+ MV_U32 numOfColAddr;
+ MV_U32 dataWidth;
+ MV_U32 errorCheckType; /* ECC , PARITY..*/
+ MV_U32 sdramWidth; /* 4,8,16 or 32 */
+ MV_U32 errorCheckDataWidth; /* 0 - no, 1 - Yes */
+ MV_U32 burstLengthSupported;
+ MV_U32 numOfBanksOnEachDevice;
+ MV_U32 suportedCasLatencies;
+ MV_U32 refreshInterval;
+
+ /* DIMM timing parameters */
+ MV_U32 minCycleTimeAtMaxCasLatPs;
+ MV_U32 minCycleTimeAtMaxCasLatMinus1Ps;
+ MV_U32 minCycleTimeAtMaxCasLatMinus2Ps;
+ MV_U32 minRowPrechargeTime;
+ MV_U32 minRowActiveToRowActive;
+ MV_U32 minRasToCasDelay;
+ MV_U32 minRasPulseWidth;
+ MV_U32 minWriteRecoveryTime; /* DDR2 only */
+ MV_U32 minWriteToReadCmdDelay; /* DDR2 only */
+ MV_U32 minReadToPrechCmdDelay; /* DDR2 only */
+ MV_U32 minRefreshToActiveCmd; /* DDR2 only */
+
+ /* Parameters calculated from the extracted DIMM information */
+ MV_U32 size;
+ MV_U32 deviceDensity; /* 16,64,128,256 or 512 Mbit */
+ MV_U32 numberOfDevices;
+
+ /* DIMM attributes (MV_TRUE for yes) */
+ MV_BOOL registeredAddrAndControlInputs;
+ MV_BOOL registeredDQMBinputs;
+
+}MV_DRAM_BANK_INFO;
+
+#include "ddr2/spd/mvSpd.h"
+
+/* mvDramIf.h API list */
+MV_VOID mvDramIfBasicAsmInit(MV_VOID);
+MV_STATUS mvDramIfDetect(MV_U32 forcedCl, MV_BOOL eccDisable);
+MV_VOID _mvDramIfConfig(int entryNum);
+
+MV_U32 mvDramIfBankSizeGet(MV_U32 bankNum);
+MV_U32 mvDramIfBankBaseGet(MV_U32 bankNum);
+MV_U32 mvDramIfSizeGet(MV_VOID);
+MV_U32 mvDramIfCalGet(void);
+MV_STATUS mvDramIfSingleBitErrThresholdSet(MV_U32 threshold);
+MV_VOID mvDramIfSelfRefreshSet(void);
+void mvDramIfShow(void);
+MV_U32 mvDramIfGetFirstCS(void);
+MV_U32 mvDramIfGetCSorder(MV_U32 csOrder );
+MV_U32 mvDramCsSizeGet(MV_U32 csNum);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvDramIfh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfBasicInit.S b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfBasicInit.S
new file mode 100644
index 000000000..76723816d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfBasicInit.S
@@ -0,0 +1,986 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#define _ASMLANGUAGE
+#define MV_ASMLANGUAGE
+#include "mvSysHwConfig.h"
+#include "mvOsAsm.h"
+#include "boardEnv/mvBoardEnvSpec.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+#include "mvDramIfConfig.h"
+#include "mvDramIfRegs.h"
+#include "pex/mvPexRegs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/mvCtrlEnvAsm.h"
+#include "mvCommon.h"
+
+/* defines */
+
+#if defined(MV_STATIC_DRAM_ON_BOARD)
+.globl dramBoot1
+dramBoot1:
+ .word 0
+
+/******************************************************************************
+*
+*
+*
+*
+*******************************************************************************/
+#if defined(DB_MV78XX0) || defined(DB_MV88F632X)
+/* DDR2 boards 512MB 333MHz */
+#define STATIC_SDRAM0_BANK0_SIZE 0x1ffffff1 /* 0x1504 */
+#define STATIC_SDRAM_CONFIG 0x43048C30 /* 0x1400 */
+#define STATIC_SDRAM_MODE 0x00000652 /* 0x141c */
+#define STATIC_DUNIT_CTRL_LOW 0x38543000 /* 0x1404 */
+#define STATIC_DUNIT_CTRL_HI 0x0000FFFF /* 0x1424 */
+#define STATIC_SDRAM_ADDR_CTRL 0x00000088 /* 0x1410 */
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x22125441 /* 0x1408 */
+#define STATIC_SDRAM_TIME_CTRL_HI 0x00000A29 /* 0x140c */
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x84210000 /* 0x1494 */
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000000 /* 0x1498 */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0000E80F /* 0x149c */
+#define STATIC_SDRAM_EXT_MODE 0x00000040 /* 0x1420 */
+#define STATIC_SDRAM_DDR2_TIMING_LO 0x00085520 /* 0x1428 */
+#define STATIC_SDRAM_DDR2_TIMING_HI 0x00008552 /* 0x147C */
+
+#elif defined(RD_MV78XX0_AMC)
+/* On board DDR2 512MB 400MHz CL5 */
+#define STATIC_SDRAM0_BANK0_SIZE 0x1ffffff1 /* 0x1504 */
+#define STATIC_SDRAM_CONFIG 0x43008C30 /* 0x1400 */
+#define STATIC_SDRAM_MODE 0x00000652 /* 0x141c */
+#define STATIC_DUNIT_CTRL_LOW 0x38543000 /* 0x1404 */
+#define STATIC_DUNIT_CTRL_HI 0x0000F07F /* 0x1424 */
+#define STATIC_SDRAM_ADDR_CTRL 0x000000DD /* 0x1410 */
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x23135441 /* 0x1408 */
+#define STATIC_SDRAM_TIME_CTRL_HI 0x00000A32 /* 0x140c */
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x84210000 /* 0x1494 */
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000000 /* 0x1498 */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0000EB0F /* 0x149c */
+#define STATIC_SDRAM_EXT_MODE 0x00000040 /* 0x1420 */
+#define STATIC_SDRAM_DDR2_TIMING_LO 0x00085520 /* 0x1428 */
+#define STATIC_SDRAM_DDR2_TIMING_HI 0x00008552 /* 0x147C */
+
+#elif defined(RD_MV78XX0_H3C)
+/* DDR2 boards 512MB 333MHz */
+#define STATIC_SDRAM0_BANK0_SIZE 0x1ffffff1 /* 0x1504 */
+#define STATIC_SDRAM_CONFIG 0x43048a25 /* 0x1400 */
+#define STATIC_SDRAM_MODE 0x00000652 /* 0x141c */
+#define STATIC_DUNIT_CTRL_LOW 0x38543000 /* 0x1404 */
+#define STATIC_DUNIT_CTRL_HI 0x0000F07F /* 0x1424 */
+#define STATIC_SDRAM_ADDR_CTRL 0x00000088 /* 0x1410 */
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x2202444e /* 0x1408 */
+#define STATIC_SDRAM_TIME_CTRL_HI 0x00000A22 /* 0x140c */
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x84210000 /* 0x1494 */
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000000 /* 0x1498 */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0000EB0F /* 0x149c */
+#define STATIC_SDRAM_EXT_MODE 0x00000040 /* 0x1420 */
+#define STATIC_SDRAM_DDR2_TIMING_LO 0x00085520 /* 0x1428 */
+#define STATIC_SDRAM_DDR2_TIMING_HI 0x00008552 /* 0x147C */
+
+#elif defined(RD_MV78XX0_PCAC)
+/* DDR2 boards 256MB 200MHz */
+#define STATIC_SDRAM0_BANK0_SIZE 0x0ffffff1 /* 0x1504 */
+#define STATIC_SDRAM_CONFIG 0x43000a25 /* 0x1400 */
+#define STATIC_SDRAM_MODE 0x00000652 /* 0x141c */
+#define STATIC_DUNIT_CTRL_LOW 0x38543000 /* 0x1404 */
+#define STATIC_DUNIT_CTRL_HI 0x0000F07F /* 0x1424 */
+#define STATIC_SDRAM_ADDR_CTRL 0x000000DD /* 0x1410 */
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x2202444e /* 0x1408 */
+#define STATIC_SDRAM_TIME_CTRL_HI 0x00000822 /* 0x140c */
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x84210000 /* 0x1494 */
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000000 /* 0x1498 */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0000EB0F /* 0x149c */
+#define STATIC_SDRAM_EXT_MODE 0x00000040 /* 0x1420 */
+#define STATIC_SDRAM_DDR2_TIMING_LO 0x00085520 /* 0x1428 */
+#define STATIC_SDRAM_DDR2_TIMING_HI 0x00008552 /* 0x147C */
+
+#else
+/* DDR2 MV88F6281 boards 256MB 400MHz */
+#define STATIC_SDRAM0_BANK0_SIZE 0x0FFFFFF1 /* 0x1504 */
+#define STATIC_SDRAM_CONFIG 0x43000c30 /* 0x1400 */
+#define STATIC_SDRAM_MODE 0x00000C52 /* 0x141c */
+#define STATIC_DUNIT_CTRL_LOW 0x39543000 /* 0x1404 */
+#define STATIC_DUNIT_CTRL_HI 0x0000F1FF /* 0x1424 */
+#define STATIC_SDRAM_ADDR_CTRL 0x000000cc /* 0x1410 */
+#define STATIC_SDRAM_TIME_CTRL_LOW 0x22125451 /* 0x1408 */
+#define STATIC_SDRAM_TIME_CTRL_HI 0x00000A33 /* 0x140c */
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x003C0000 /* 0x1494 */
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000000 /* 0x1498 */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0000F80F /* 0x149c */
+#define STATIC_SDRAM_EXT_MODE 0x00000042 /* 0x1420 */
+#define STATIC_SDRAM_DDR2_TIMING_LO 0x00085520 /* 0x1428 */
+#define STATIC_SDRAM_DDR2_TIMING_HI 0x00008552 /* 0x147C */
+#endif /* MV78XX0 */
+
+ .globl _mvDramIfStaticInit
+_mvDramIfStaticInit:
+
+ mov r11, LR /* Save link register */
+ mov r10, r2
+
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+ orr r6, r6, #BIT4 /* Enable 2T mode */
+ bic r6, r6, #BIT6 /* clear ctrlPos */
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+#endif
+
+ /*DDR SDRAM Initialization Control */
+ ldr r6, =DSICR_INIT_EN
+ MV_REG_WRITE_ASM (r6, r1, DDR_SDRAM_INIT_CTRL_REG)
+2: MV_REG_READ_ASM (r6, r1, DDR_SDRAM_INIT_CTRL_REG)
+ and r6, r6, #DSICR_INIT_EN
+ cmp r6, #0
+ bne 2b
+
+ /* If we boot from NAND jump to DRAM address */
+ mov r5, #1
+ ldr r6, =dramBoot1
+ str r5, [r6] /* We started executing from DRAM */
+
+ ldr r6, dramBoot1
+ cmp r6, #0
+ bne 1f
+
+ /* set all dram windows to 0 */
+ mov r6, #0
+ MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,0))
+ MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,1))
+ MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,2))
+ MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,3))
+ ldr r6, = STATIC_SDRAM0_BANK0_SIZE
+ MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,0))
+
+
+ /* set all dram configuration in temp registers */
+ ldr r6, = STATIC_SDRAM0_BANK0_SIZE
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG0)
+ ldr r6, = STATIC_SDRAM_CONFIG
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG1)
+ ldr r6, = STATIC_SDRAM_MODE
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG2)
+ ldr r6, = STATIC_DUNIT_CTRL_LOW
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG3)
+ ldr r6, = STATIC_SDRAM_ADDR_CTRL
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG4)
+ ldr r6, = STATIC_SDRAM_TIME_CTRL_LOW
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG5)
+ ldr r6, = STATIC_SDRAM_TIME_CTRL_HI
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG6)
+ ldr r6, = STATIC_SDRAM_ODT_CTRL_LOW
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG7)
+ ldr r6, = STATIC_SDRAM_ODT_CTRL_HI
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG8)
+ ldr r6, = STATIC_SDRAM_DUNIT_ODT_CTRL
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG9)
+ ldr r6, = STATIC_SDRAM_EXT_MODE
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG10)
+ ldr r6, = STATIC_SDRAM_DDR2_TIMING_LO
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG11)
+ ldr r6, = STATIC_SDRAM_DDR2_TIMING_HI
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG12)
+#ifndef MV_NAND_BOOT
+ ldr r6, = STATIC_DUNIT_CTRL_HI
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG13)
+#endif
+
+ ldr sp,=0
+ bl _mvDramIfConfig
+ ldr r0, =0
+#ifdef MV78XX0
+ bl _mvDramIfEccMemInit
+#endif
+1:
+ mov r2, r10
+ mov PC, r11 /* r11 is saved link register */
+
+#else /* #if defined(MV_STATIC_DRAM_ON_BOARD) */
+
+.globl dramBoot1
+dramBoot1:
+ .word 0
+
+/*******************************************************************************
+* mvDramIfBasicInit - Basic initialization of DRAM interface
+*
+* DESCRIPTION:
+* The function will initialize the DRAM for basic usage. The function
+* will use the TWSI assembly API to extract DIMM parameters according
+* to which DRAM interface will be initialized.
+* The function referes to the following DRAM parameters:
+* 1) DIMM is registered or not.
+* 2) DIMM width detection.
+* 3) DIMM density.
+*
+* INPUT:
+* r3 - required size for initial DRAM.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+* Note:
+* r4 holds I2C EEPROM address
+* r5 holds SDRAM register base address
+* r7 holds returned values
+* r8 holds SDRAM various configuration registers value.
+* r11 holds return function address.
+*******************************************************************************/
+/* Setting the offsets of the I2C registers */
+#define DIMM_TYPE_OFFSET 2
+#define NUM_OF_ROWS_OFFSET 3
+#define NUM_OF_COLS_OFFSET 4
+#define NUM_OF_RANKS 5
+#define DIMM_CONFIG_TYPE 11
+#define SDRAM_WIDTH_OFFSET 13
+#define NUM_OF_BANKS_OFFSET 17
+#define SUPPORTED_CL_OFFSET 18
+#define DIMM_TYPE_INFO_OFFSET 20 /* DDR2 only */
+#define SDRAM_MODULES_ATTR_OFFSET 21
+#define RANK_SIZE_OFFSET 31
+
+#define DRAM_DEV_DENSITY_128M 128
+#define DRAM_DEV_DENSITY_256M 256
+#define DRAM_DEV_DENSITY_512M 512
+#define DRAM_DEV_DENSITY_1G 1024
+#define DRAM_DEV_DENSITY_2G 2048
+
+#define DRAM_RANK_DENSITY_128M 0x20
+#define DRAM_RANK_DENSITY_256M 0x40
+#define DRAM_RANK_DENSITY_512M 0x80
+#define DRAM_RANK_DENSITY_1G 0x1
+#define DRAM_RANK_DENSITY_2G 0x2
+
+ .globl _mvDramIfBasicInit
+ .extern _i2cInit
+_mvDramIfBasicInit:
+
+ mov r11, LR /* Save link register */
+
+ /* Set Dunit high control register */
+ MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_HI_REG)
+ orr r6, r6, #BIT7 /* SDRAM__D2P_EN */
+ orr r6, r6, #BIT8 /* SDRAM__P2D_EN */
+#ifdef MV78XX0
+ orr r6, r6, #BIT9 /* SDRAM__ADD_HALF_FCC_EN */
+ orr r6, r6, #BIT10 /* SDRAM__PUP_ZERO_SKEW_EN */
+ orr r6, r6, #BIT11 /* SDRAM__WR_MASH_DELAY_EN */
+#endif
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_DUNIT_CTRL_HI_REG)
+
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+ orr r6, r6, #BIT4 /* Enable 2T mode */
+ bic r6, r6, #BIT6 /* clear ctrlPos */
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+#endif
+
+ /*DDR SDRAM Initialization Control */
+ ldr r6, =DSICR_INIT_EN
+ MV_REG_WRITE_ASM (r6, r1, DDR_SDRAM_INIT_CTRL_REG)
+2: MV_REG_READ_ASM (r6, r1, DDR_SDRAM_INIT_CTRL_REG)
+ and r6, r6, #DSICR_INIT_EN
+ cmp r6, #0
+ bne 2b
+
+ mov r5, #1
+ ldr r8, =dramBoot1
+ str r5, [r8] /* We started executing from DRAM */
+
+ /* If we boot from NAND jump to DRAM address */
+ ldr r8, dramBoot1
+ cmp r8, #0
+ movne pc, r11
+
+ bl _i2cInit /* Initialize TWSI master */
+
+ /* Check if we have more then 1 dimm */
+ ldr r6, =0
+ MV_REG_WRITE_ASM (r6, r1, DRAM_BUF_REG14)
+#ifdef MV78XX0
+ bl _is_Second_Dimm_Exist
+ beq single_dimm
+ ldr r6, =1
+ MV_REG_WRITE_ASM (r6, r1, DRAM_BUF_REG14)
+single_dimm:
+ bl _i2cInit /* Initialize TWSI master */
+#endif
+
+ /* Get default SDRAM Config values */
+ MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+
+ /* Get registered/non registered info from DIMM */
+ bl _is_Registered
+ beq nonRegistered
+
+setRegistered:
+ orr r8, r8, #SDRAM_REGISTERED /* Set registered bit(17) */
+nonRegistered:
+#ifdef MV78XX0
+ /* Get ECC/non ECC info from DIMM */
+ bl _is_Ecc
+ beq setConfigReg
+
+setEcc:
+ orr r8, r8, #SDRAM_ECC_EN /* Set ecc bit(18) */
+#endif
+setConfigReg:
+ MV_REG_WRITE_ASM (r8, r5, DRAM_BUF_REG1)
+
+ /* Set maximum CL supported by DIMM */
+ bl _get_CAL
+
+ /* r7 is DIMM supported CAS (e.g: 3 --> 0x1C) */
+ clz r6, r7
+ rsb r6, r6, #31 /* r6 = the bit number of MAX CAS supported */
+
+casDdr2:
+ ldr r7, =0x41 /* stBurstInDel|stBurstOutDel field value */
+ ldr r3, =0x53 /* stBurstInDel|stBurstOutDel registered value*/
+ ldr r8, =0x32 /* Assuming MAX CL = 3 */
+ cmp r6, #3 /* If CL = 3 break */
+ beq casDdr2Cont
+
+ ldr r7, =0x53 /* stBurstInDel|stBurstOutDel field value */
+ ldr r3, =0x65 /* stBurstInDel|stBurstOutDel registered value*/
+ ldr r8, =0x42 /* Assuming MAX CL = 4 */
+ cmp r6, #4 /* If CL = 4 break */
+ beq casDdr2Cont
+
+ ldr r7, =0x65 /* stBurstInDel|stBurstOutDel field value */
+ ldr r3, =0x77 /* stBurstInDel|stBurstOutDel registered value*/
+ ldr r8, =0x52 /* Assuming MAX CL = 5 */
+ cmp r6, #5 /* If CL = 5 break */
+ beq casDdr2Cont
+
+ ldr r7, =0x77 /* stBurstInDel|stBurstOutDel field value */
+ ldr r3, =0x89 /* stBurstInDel|stBurstOutDel registered value*/
+ ldr r8, =0x62 /* Assuming MAX CL = 6 */
+ cmp r6, #6 /* If CL = 5 break */
+ beq casDdr2Cont
+
+ /* This is an error. return */
+ b exit_ddrAutoConfig /* This is an error !! */
+casDdr2Cont:
+
+ /* Get default SDRAM Mode values */
+ MV_REG_READ_ASM (r6, r5, SDRAM_MODE_REG)
+ bic r6, r6, #(BIT6 | BIT5 | BIT4) /* Clear CL filed */
+ orr r6, r6, r8
+ MV_REG_WRITE_ASM (r6, r5, DRAM_BUF_REG2)
+
+ /* Set Dunit control register according to max CL detected */
+ MV_REG_READ_ASM (r6, r5, DRAM_BUF_REG1)
+ tst r6, #SDRAM_REGISTERED
+ beq setDunitReg
+ mov r7, r3
+
+setDunitReg:
+#ifdef MV78XX0
+ /* Set SDRAM Extended Mode register for double DIMM */
+ /* Check DRAM frequency for more then 267MHz set ODT Rtt to 50ohm */
+
+ MV_REG_READ_ASM (r4, r5, CPU_RESET_SAMPLE_L_REG)
+ ldr r5, =MSAR_SYSCLCK_MASK
+ and r4, r4, r5
+ ldr r5, =MSAR_SYSCLCK_333
+ cmp r4, r5
+ ble Clock333
+ add r7, r7, #0x10
+Clock333:
+#endif
+
+ MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+ bic r6, r6, #(0xff << 20) /* Clear SBout and SBin */
+ orr r6, r6, #BIT4 /* Enable 2T mode */
+ bic r6, r6, #BIT6 /* clear ctrlPos */
+ orr r6, r6, r7, LSL #20
+ MV_REG_WRITE_ASM (r6, r5, DRAM_BUF_REG3)
+
+ /* Set Dunit high control register */
+ MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_HI_REG)
+ orr r6, r6, #BIT7 /* SDRAM__D2P_EN */
+ orr r6, r6, #BIT8 /* SDRAM__P2D_EN */
+#ifdef MV78XX0
+ orr r6, r6, #BIT9 /* SDRAM__ADD_HALF_FCC_EN */
+ orr r6, r6, #BIT10 /* SDRAM__PUP_ZERO_SKEW_EN */
+ orr r6, r6, #BIT11 /* SDRAM__WR_MASH_DELAY_EN */
+#endif
+ MV_REG_WRITE_ASM (r6, r5, DRAM_BUF_REG13)
+
+ /* DIMM density configuration*/
+ /* Density = (1 << (rowNum + colNum)) * dramWidth * dramBankNum */
+Density:
+ /* Get bank 0 and 1 density */
+ ldr r6, =0
+ bl _getDensity
+
+ mov r8, r7
+ mov r8, r8, LSR #20 /* Move density 20 bits to the right */
+ /* For example 0x10000000 --> 0x1000 */
+
+ mov r3, #(SDRAM_DSIZE_256Mb(0) | SDRAM_DSIZE_256Mb(1))
+ cmp r8, #DRAM_DEV_DENSITY_256M
+ beq get_bank_2_density
+
+ mov r3, #(SDRAM_DSIZE_512Mb(0) | SDRAM_DSIZE_512Mb(1))
+ cmp r8, #DRAM_DEV_DENSITY_512M
+ beq get_bank_2_density
+
+ mov r3, #(SDRAM_DSIZE_1Gb(0) | SDRAM_DSIZE_1Gb(1))
+ cmp r8, #DRAM_DEV_DENSITY_1G
+ beq get_bank_2_density
+
+ mov r3, #(SDRAM_DSIZE_2Gb(0) | SDRAM_DSIZE_2Gb(1))
+ cmp r8, #DRAM_DEV_DENSITY_2G
+ beq get_bank_2_density
+
+ /* This is an error. return */
+ b exit_ddrAutoConfig
+
+get_bank_2_density:
+ /* Check for second dimm */
+ MV_REG_READ_ASM (r6, r1, DRAM_BUF_REG14)
+ cmp r6, #1
+ bne get_width
+
+ /* Get bank 2 and 3 density */
+ ldr r6, =2
+ bl _getDensity
+
+ mov r8, r7
+ mov r8, r8, LSR #20 /* Move density 20 bits to the right */
+ /* For example 0x10000000 --> 0x1000 */
+
+ orr r3, r3, #(SDRAM_DSIZE_256Mb(2) | SDRAM_DSIZE_256Mb(3))
+ cmp r8, #DRAM_DEV_DENSITY_256M
+ beq get_width
+
+ and r3, r3, #~(SDRAM_DSIZE_MASK(2) | SDRAM_DSIZE_MASK(3))
+ orr r3, r3, #(SDRAM_DSIZE_512Mb(2) | SDRAM_DSIZE_512Mb(3))
+ cmp r8, #DRAM_DEV_DENSITY_512M
+ beq get_width
+
+ and r3, r3, #~(SDRAM_DSIZE_MASK(2) | SDRAM_DSIZE_MASK(3))
+ orr r3, r3, #(SDRAM_DSIZE_1Gb(2) | SDRAM_DSIZE_1Gb(3))
+ cmp r8, #DRAM_DEV_DENSITY_1G
+ beq get_width
+
+ and r3, r3, #~(SDRAM_DSIZE_MASK(2) | SDRAM_DSIZE_MASK(3))
+ orr r3, r3, #(SDRAM_DSIZE_2Gb(2) | SDRAM_DSIZE_2Gb(3))
+ cmp r8, #DRAM_DEV_DENSITY_2G
+ beq get_width
+
+ /* This is an error. return */
+ b exit_ddrAutoConfig
+
+ /* Get SDRAM width */
+get_width:
+ /* Get bank 0 and 1 width */
+ ldr r6, =0
+ bl _get_width
+
+ cmp r7, #8 /* x8 devices */
+ beq get_bank_2_width
+
+ orr r3, r3, #(SDRAM_ADDRSEL_X16(0) | SDRAM_ADDRSEL_X16(1)) /* x16 devices */
+ cmp r7, #16
+ beq get_bank_2_width
+
+ /* This is an error. return */
+ b exit_ddrAutoConfig
+
+get_bank_2_width:
+ /* Check for second dimm */
+ MV_REG_READ_ASM (r6, r1, DRAM_BUF_REG14)
+ cmp r6, #1
+ bne densCont
+
+ /* Get bank 2 and 3 width */
+ ldr r6, =2
+ bl _get_width
+
+ cmp r7, #8 /* x8 devices */
+ beq densCont
+
+ orr r3, r3, #(SDRAM_ADDRSEL_X16(2) | SDRAM_ADDRSEL_X16(3)) /* x16 devices */
+ cmp r7, #16
+ beq densCont
+
+ /* This is an error. return */
+ b exit_ddrAutoConfig
+
+densCont:
+ MV_REG_WRITE_ASM (r3, r5, DRAM_BUF_REG4)
+
+ /* Set SDRAM timing control low register */
+ ldr r4, =SDRAM_TIMING_CTRL_LOW_REG_DEFAULT
+ /* MV_REG_READ_ASM (r4, r5, SDRAM_TIMING_CTRL_LOW_REG) */
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG5)
+
+ /* Set SDRAM timing control high register */
+ ldr r6, =SDRAM_TIMING_CTRL_HIGH_REG_DEFAULT
+
+ MV_REG_READ_ASM (r4, r5, CPU_RESET_SAMPLE_L_REG)
+ ldr r5, =MSAR_SYSCLCK_MASK
+ and r4, r4, r5
+ ldr r5, =MSAR_SYSCLCK_333
+ cmp r4, r5
+ blt timingHighClock333
+ orr r6, r6, #BIT9
+
+timingHighClock333:
+ /* MV_REG_READ_ASM (r6, r5, SDRAM_TIMING_CTRL_HIGH_REG) */
+ MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG6)
+
+ /* Check for second dimm */
+ MV_REG_READ_ASM (r6, r1, DRAM_BUF_REG14)
+ cmp r6, #1
+ bne single_dimm_odt
+
+ /* Set SDRAM ODT control low register for double DIMM*/
+ ldr r4, =DDR2_ODT_CTRL_LOW_CS0_CS1_CS2_CS3_DV
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG7)
+
+ /* Set DUNIT ODT control register for double DIMM */
+ ldr r4, =DDR2_DUNIT_ODT_CTRL_CS0_CS1_CS2_CS3_DV
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG9)
+
+#ifdef MV78XX0
+ /* Set SDRAM Extended Mode register for double DIMM */
+ /* Check DRAM frequency for more then 267MHz set ODT Rtt to 50ohm */
+
+ MV_REG_READ_ASM (r4, r5, CPU_RESET_SAMPLE_L_REG)
+ ldr r5, =MSAR_SYSCLCK_MASK
+ and r4, r4, r5
+ ldr r5, =MSAR_SYSCLCK_267
+ cmp r4, r5
+ beq slow_dram_clock_rtt
+ ldr r5, =MSAR_SYSCLCK_300
+ cmp r4, r5
+ beq slow_dram_clock_rtt
+ ldr r5, =MSAR_SYSCLCK_333
+ cmp r4, r5
+ beq fast_dram_clock_rtt
+ ldr r5, =MSAR_SYSCLCK_400
+ cmp r4, r5
+ beq fast_dram_clock_rtt
+
+ b slow_dram_clock_rtt
+
+fast_dram_clock_rtt:
+ ldr r4, =DDR_SDRAM_EXT_MODE_FAST_CS0_CS1_CS2_CS3_DV
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG10)
+ b odt_config_end
+#endif
+slow_dram_clock_rtt:
+ ldr r4, =DDR_SDRAM_EXT_MODE_CS0_CS1_CS2_CS3_DV
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG10)
+ b odt_config_end
+
+single_dimm_odt:
+ /* Set SDRAM ODT control low register */
+ ldr r4, =DDR2_ODT_CTRL_LOW_CS0_CS1_DV
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG7)
+
+ /* Set DUNIT ODT control register */
+ ldr r4, =DDR2_DUNIT_ODT_CTRL_CS0_CS1_DV
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG9)
+
+ /* Set SDRAM Extended Mode register */
+ ldr r4, =DDR_SDRAM_EXT_MODE_CS0_CS1_DV
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG10)
+
+odt_config_end:
+ /* SDRAM ODT control high register is left as default */
+ MV_REG_READ_ASM (r4, r5, DDR2_SDRAM_ODT_CTRL_HIGH_REG)
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG8)
+
+ /*Read CL and set the DDR2 registers accordingly */
+ MV_REG_READ_ASM (r6, r5, DRAM_BUF_REG2)
+ and r6, r6, #SDRAM_CL_MASK
+ mov r4, r6
+ orr r4, r4, r6, LSL #4
+ orr r4, r4, r6, LSL #8
+ orr r4, r4, r6, LSL #12
+ mov r5, #0x30000
+ add r4, r4, r5
+ sub r4, r4, #0x30
+ /* Set SDRAM Ddr2 Timing Low register */
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG11)
+
+ /* Set SDRAM Ddr2 Timing High register */
+ mov r4, r4, LSR #4
+ MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG12)
+
+timeParamDone:
+ /* Close all windows */
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+ and r6, r6,#~SCSR_SIZE_MASK
+ and r6, r6,#~1
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,1))
+ and r6, r6,#~SCSR_SIZE_MASK
+ and r6, r6,#~1
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,1))
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+ and r6, r6,#~SCSR_SIZE_MASK
+ and r6, r6,#~1
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,3))
+ and r6, r6,#~SCSR_SIZE_MASK
+ and r6, r6,#~1
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,3))
+
+ /* Set sdram bank 0 size and enable it */
+ ldr r6, =0
+ bl _mvDramIfGetDimmSizeFromSpd
+#ifdef MV78XX0
+ /* Check DRAM width */
+ MV_REG_READ_ASM (r4, r5, SDRAM_CONFIG_REG)
+ ldr r5, =SDRAM_DWIDTH_MASK
+ and r4, r4, r5
+ ldr r5, =SDRAM_DWIDTH_64BIT
+ cmp r4, r5
+ beq dram_64bit_width
+ /* Utilize only 32bit width */
+ mov r8, r8, LSR #1
+#else
+ /* Utilize only 16bit width */
+ mov r8, r8, LSR #2
+#endif
+dram_64bit_width:
+ /* Update first dimm size return value R8 */
+ MV_REG_READ_ASM (r5, r6, SDRAM_SIZE_REG(0,0))
+ ldr r6, =~SCSR_SIZE_MASK
+ and r5, r5, r6
+ orr r5, r5, r8
+ MV_REG_WRITE_ASM(r5, r8, SDRAM_SIZE_REG(0,0))
+
+ /* Clear bank 2 size */
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+ and r6, r6,#~SCSR_SIZE_MASK
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+
+ /* Check for second dimm */
+ MV_REG_READ_ASM (r6, r1, DRAM_BUF_REG14)
+ cmp r6, #1
+ bne defualt_order
+
+ /* Set sdram bank 2 size */
+ ldr r6, =2
+ bl _mvDramIfGetDimmSizeFromSpd
+#ifdef MV78XX0
+ /* Check DRAM width */
+ MV_REG_READ_ASM (r4, r5, SDRAM_CONFIG_REG)
+ ldr r5, =SDRAM_DWIDTH_MASK
+ and r4, r4, r5
+ ldr r5, =SDRAM_DWIDTH_64BIT
+ cmp r4, r5
+ beq dram_64bit_width2
+ /* Utilize only 32bit width */
+ mov r8, r8, LSR #1
+#else
+ /* Utilize only 16bit width */
+ mov r8, r8, LSR #2
+#endif
+dram_64bit_width2:
+ /* Update first dimm size return value R8 */
+ MV_REG_READ_ASM (r5, r6, SDRAM_SIZE_REG(0,2))
+ ldr r6, =~SCSR_SIZE_MASK
+ and r5, r5, r6
+ orr r5, r5, r8
+ MV_REG_WRITE_ASM(r5, r8, SDRAM_SIZE_REG(0,2))
+
+ /* Close windows 1 and 3 */
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,1))
+ and r6, r6,#~1
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,1))
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,3))
+ and r6, r6,#~1
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,3))
+
+ /* Check dimm size for setting dram bank order */
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+ MV_REG_READ_ASM (r4, r5, SDRAM_SIZE_REG(0,2))
+ and r6, r6,#SCSR_SIZE_MASK
+ and r4, r4,#SCSR_SIZE_MASK
+ cmp r6, r4
+ bge defualt_order
+
+ /* Bank 2 is biger then bank 0 */
+ ldr r6,=0
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_BASE_ADDR_REG(0,2))
+
+ /* Open win 2 */
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+ orr r6, r6,#1
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+
+ ldr sp,=0
+ bl _mvDramIfConfig
+#ifdef MV78XX0
+ /* Init ECC on CS 2 */
+ ldr r0, =2
+ bl _mvDramIfEccMemInit
+#endif
+ mov PC, r11 /* r11 is saved link register */
+
+defualt_order:
+
+ /* Open win 0 */
+ MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+ orr r6, r6,#1
+ MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+
+ ldr sp,=0
+ bl _mvDramIfConfig
+#ifdef MV78XX0
+ /* Init ECC on CS 0 */
+ ldr r0, =0
+ bl _mvDramIfEccMemInit
+#endif
+exit_ddrAutoConfig:
+ mov PC, r11 /* r11 is saved link register */
+
+
+/***************************************************************************************/
+/* r4 holds I2C EEPROM address
+ * r7 holds I2C EEPROM offset parameter for i2cRead and its --> returned value
+ * r8 holds SDRAM various configuration registers value.
+ * r13 holds Link register
+ */
+/**************************/
+_getDensity:
+ mov r13, LR /* Save link register */
+
+ /* Read SPD rank size from DIMM0 */
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+
+ cmp r6, #0
+ beq 1f
+
+ /* Read SPD rank size from DIMM1 */
+ mov r4, #MV_BOARD_DIMM1_I2C_ADDR /* reading from DIMM1 */
+
+1:
+ mov r7, #NUM_OF_ROWS_OFFSET /* offset 3 */
+ bl _i2cRead
+ mov r8, r7 /* r8 save number of rows */
+
+ mov r7, #NUM_OF_COLS_OFFSET /* offset 4 */
+ bl _i2cRead
+ add r8, r8, r7 /* r8 = number of rows + number of col */
+
+ mov r7, #0x1
+ mov r8, r7, LSL r8 /* r8 = (1 << r8) */
+
+ mov r7, #SDRAM_WIDTH_OFFSET /* offset 13 */
+ bl _i2cRead
+ mul r8, r7, r8
+
+ mov r7, #NUM_OF_BANKS_OFFSET /* offset 17 */
+ bl _i2cRead
+ mul r7, r8, r7
+
+ mov PC, r13
+
+/**************************/
+_get_width:
+ mov r13, LR /* Save link register */
+
+ /* Read SPD rank size from DIMM0 */
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+
+ cmp r6, #0
+ beq 1f
+
+ /* Read SPD rank size from DIMM1 */
+ mov r4, #MV_BOARD_DIMM1_I2C_ADDR /* reading from DIMM1 */
+
+1:
+ /* Get SDRAM width (SPD offset 13) */
+ mov r7, #SDRAM_WIDTH_OFFSET
+ bl _i2cRead /* result in r7 */
+
+ mov PC, r13
+
+/**************************/
+_get_CAL:
+ mov r13, LR /* Save link register */
+
+ /* Set maximum CL supported by DIMM */
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #SUPPORTED_CL_OFFSET /* offset 18 */
+ bl _i2cRead
+
+ mov PC, r13
+
+/**************************/
+/* R8 - sdram configuration register.
+ * Return value in flag if no-registered then Z-flag is set
+ */
+_is_Registered:
+ mov r13, LR /* Save link register */
+#if defined(MV645xx)
+ /* Get registered/non registered info from DIMM */
+ tst r8, #SDRAM_DTYPE_DDR2
+ bne regDdr2
+
+regDdr1:
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #SDRAM_MODULES_ATTR_OFFSET
+ bl _i2cRead /* result in r7 */
+
+ tst r7, #0x2
+ b exit
+#endif
+regDdr2:
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #DIMM_TYPE_INFO_OFFSET
+ bl _i2cRead /* result in r7 */
+
+ tst r7, #0x11 /* DIMM type = regular RDIMM (0x01) */
+ /* or Mini-RDIMM (0x10) */
+exit:
+ mov PC, r13
+
+
+/**************************/
+/* Return value in flag if no-Ecc then Z-flag is set */
+_is_Ecc:
+ mov r13, LR /* Save link register */
+
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #DIMM_CONFIG_TYPE
+ bl _i2cRead /* result in r7 */
+
+ tst r7, #0x2 /* bit 1 -> Data ECC */
+ mov PC, r13
+
+/**************************/
+/* Return value in flag if no second DIMM then Z-flag is set */
+_is_Second_Dimm_Exist:
+ mov r13, LR /* Save link register */
+
+ mov r4, #MV_BOARD_DIMM1_I2C_ADDR /* reading from DIMM0 */
+ mov r7, #DIMM_TYPE_OFFSET
+ bl _i2cRead /* result in r7 */
+
+ tst r7, #0x8 /* bit3 is '1' -> DDR 2 */
+ mov PC, r13
+
+/*******************************************************************************
+* _mvDramIfGetDimmSizeFromSpd - read bank 0 dram's size
+*
+* DESCRIPTION:
+* The function will read the bank 0 dram size(SPD version 1.0 and above )
+*
+* INPUT:
+* r6 - dram bank number.
+*
+* OUTPUT:
+* none
+*/
+_mvDramIfGetDimmSizeFromSpd:
+
+ mov r13, LR /* Save link register */
+
+ /* Read SPD rank size from DIMM0 */
+ mov r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0 */
+
+ cmp r6, #0
+ beq 1f
+
+ /* Read SPD rank size from DIMM1 */
+ mov r4, #MV_BOARD_DIMM1_I2C_ADDR /* reading from DIMM1 */
+
+1:
+ mov r7, #RANK_SIZE_OFFSET /* offset 31 */
+ bl _i2cRead
+
+pass_read:
+ ldr r8, =(0x7 << SCSR_SIZE_OFFS)
+ cmp r7, #DRAM_RANK_DENSITY_128M
+ beq endDimmSize
+
+ ldr r8, =(0xf << SCSR_SIZE_OFFS)
+ cmp r7, #DRAM_RANK_DENSITY_256M
+ beq endDimmSize
+
+ ldr r8, =(0x1f << SCSR_SIZE_OFFS)
+ cmp r7, #DRAM_RANK_DENSITY_512M
+ beq endDimmSize
+
+ ldr r8, =(0x3f << SCSR_SIZE_OFFS)
+ cmp r7, #DRAM_RANK_DENSITY_1G
+ beq endDimmSize
+
+ ldr r8, =(0x7f << SCSR_SIZE_OFFS) /* DRAM_RANK_DENSITY_2G */
+endDimmSize:
+ mov PC, r13
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.S b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.S
new file mode 100644
index 000000000..88527e58c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.S
@@ -0,0 +1,528 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvDramIfBasicAsm.s
+*
+* DESCRIPTION:
+* Memory full detection and best timing configuration is done in
+* C code. C runtime environment requires a stack. This module API
+* initialize DRAM interface chip select 0 for basic functionality for
+* the use of stack.
+* The module API assumes DRAM information is stored in I2C EEPROM reside
+* in a given I2C address MV_BOARD_DIMM0_I2C_ADDR. The I2C EEPROM
+* internal data structure is assumed to be orgenzied in common DRAM
+* vendor SPD structure.
+* NOTE: DFCDL values are assumed to be already initialized prior to
+* this module API activity.
+*
+*
+* DEPENDENCIES:
+* None.
+*
+*******************************************************************************/
+
+/* includes */
+#define _ASMLANGUAGE
+#define MV_ASMLANGUAGE
+#include "mvOsAsm.h"
+#include "mvSysHwConfig.h"
+#include "mvDramIfRegs.h"
+#include "mvDramIfConfig.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+#include "pex/mvPexRegs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "mvCommon.h"
+
+/* defines */
+
+/* locals */
+.data
+.globl _mvDramIfConfig
+.text
+.globl _mvDramIfMemInit
+
+/*******************************************************************************
+* _mvDramIfConfig - Basic DRAM interface initialization.
+*
+* DESCRIPTION:
+* The function will initialize the following DRAM parameters using the
+* values prepared by mvDramIfDetect routine. Values are located
+* in predefined registers.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+
+_mvDramIfConfig:
+
+ /* Save register on stack */
+ cmp sp, #0
+ beq no_stack_s
+save_on_stack:
+ stmdb sp!, {r1, r2, r3, r4}
+no_stack_s:
+
+ /* Dunit FTDLL Configuration Register */
+ /* 0) Write to SDRAM FTDLL coniguration register */
+ ldr r4, = SDRAM_FTDLL_REG_DEFAULT_LEFT;
+ ldr r1, =(INTER_REGS_BASE + SDRAM_FTDLL_CONFIG_LEFT_REG)
+ str r4, [r1]
+ ldr r4, = SDRAM_FTDLL_REG_DEFAULT_RIGHT;
+ ldr r1, =(INTER_REGS_BASE + SDRAM_FTDLL_CONFIG_RIGHT_REG)
+ str r4, [r1]
+ ldr r4, = SDRAM_FTDLL_REG_DEFAULT_UP;
+ ldr r1, =(INTER_REGS_BASE + SDRAM_FTDLL_CONFIG_UP_REG)
+ str r4, [r1]
+
+ /* 1) Write to SDRAM coniguration register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG1)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_CONFIG_REG)
+ str r4, [r1]
+
+ /* 2) Write Dunit control low register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG3)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_DUNIT_CTRL_REG)
+ str r4, [r1]
+
+ /* 2) Write Dunit control high register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG13)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_DUNIT_CTRL_HI_REG)
+ str r4, [r1]
+
+ /* 3) Write SDRAM address control register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG4)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_ADDR_CTRL_REG)
+ str r4, [r1]
+#if defined(MV_STATIC_DRAM_ON_BOARD)
+ /* 4) Write SDRAM bank 0 size register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG0)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_SIZE_REG(0,0))
+ str r4, [r1]
+#endif
+
+ /* 5) Write SDRAM open pages control register */
+ ldr r1, =(INTER_REGS_BASE + SDRAM_OPEN_PAGE_CTRL_REG)
+ ldr r4, =SDRAM_OPEN_PAGES_CTRL_REG_DV
+ str r4, [r1]
+
+ /* 6) Write SDRAM timing Low register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG5)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_TIMING_CTRL_LOW_REG)
+ str r4, [r1]
+
+ /* 7) Write SDRAM timing High register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG6)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_TIMING_CTRL_HIGH_REG)
+ str r4, [r1]
+
+ /* Config DDR2 On Die Termination (ODT) registers */
+ /* Write SDRAM DDR2 ODT control low register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG7)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + DDR2_SDRAM_ODT_CTRL_LOW_REG)
+ str r4, [r1]
+
+ /* Write SDRAM DDR2 ODT control high register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG8)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + DDR2_SDRAM_ODT_CTRL_HIGH_REG)
+ str r4, [r1]
+
+ /* Write SDRAM DDR2 Dunit ODT control register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG9)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + DDR2_DUNIT_ODT_CONTROL_REG)
+ str r4, [r1]
+
+ /* Write DDR2 SDRAM timing Low register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG11)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_DDR2_TIMING_LO_REG)
+ str r4, [r1]
+
+ /* Write DDR2 SDRAM timing High register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG12)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_DDR2_TIMING_HI_REG)
+ str r4, [r1]
+
+ /* 8) Write SDRAM mode register */
+ /* The CPU must not attempt to change the SDRAM Mode register setting */
+ /* prior to DRAM controller completion of the DRAM initialization */
+ /* sequence. To guarantee this restriction, it is recommended that */
+ /* the CPU sets the SDRAM Operation register to NOP command, performs */
+ /* read polling until the register is back in Normal operation value, */
+ /* and then sets SDRAM Mode register to its new value. */
+
+ /* 8.1 write 'nop' to SDRAM operation */
+ mov r4, #0x5 /* 'NOP' command */
+ MV_REG_WRITE_ASM(r4, r1, SDRAM_OPERATION_REG)
+
+ /* 8.2 poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll1:
+ ldr r4, [r1]
+ cmp r4, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll1
+
+ /* 8.3 Now its safe to write new value to SDRAM Mode register */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG2)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_MODE_REG)
+ str r4, [r1]
+
+ /* 8.4 Make the Dunit write the DRAM its new mode */
+ mov r4, #0x3 /* Mode Register Set command */
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+ /* 8.5 poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll2:
+ ldr r4, [r1]
+ cmp r4, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll2
+
+ /* Now its safe to write new value to SDRAM Extended Mode regist */
+ ldr r1, =(INTER_REGS_BASE + DRAM_BUF_REG10)
+ ldr r4, [r1]
+ ldr r1, =(INTER_REGS_BASE + SDRAM_EXTENDED_MODE_REG)
+ str r4, [r1]
+
+ /* 9) Write SDRAM Extended mode register This operation should be */
+ /* done for each memory bank */
+ /* write 'nop' to SDRAM operation */
+ mov r4, #0x5 /* 'NOP' command */
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+ /* poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll3:
+ ldr r4, [r1]
+ cmp r4, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll3
+ /* Go over each of the Banks */
+ ldr r3, =0 /* r3 = DRAM bank Num */
+
+extModeLoop:
+ /* Set the SDRAM Operation Control to each of the DRAM banks */
+ mov r4, r3 /* Do not swap the bank counter value */
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_CTRL_REG)
+
+ /* Make the Dunit write the DRAM its new mode */
+ mov r4, #0x4 /* Extended Mode Register Set command */
+ MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+ /* poll SDRAM operation. Make sure its back to normal operation */
+_sdramOpPoll4:
+ ldr r4, [r1]
+ cmp r4, #0 /* '0' = Normal SDRAM Mode */
+ bne _sdramOpPoll4
+
+ add r3, r3, #1
+ cmp r3, #4 /* 4 = Number of banks */
+ bne extModeLoop
+
+extModeEnd:
+cmp sp, #0
+ beq no_stack_l
+ mov r1, LR /* Save link register */
+#if defined(MV78XX0)
+ bl _mvDramIfMemInit
+#endif
+ mov LR,r1 /* restore link register */
+load_from_stack:
+ /* Restore registers */
+ ldmia sp!, {r1, r2, r3, r4}
+no_stack_l:
+
+ mov pc, lr
+
+
+/*******************************************************************************
+* _mvDramIfEccMemInit - Basic DRAM ECC initialization.
+*
+* DESCRIPTION:
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+#define XOR_CHAN0 0 /* XOR channel 0 used for memory initialization */
+#define XOR_UNIT0 0 /* XOR unit 0 used for memory initialization */
+#define XOR_ADDR_DEC_WIN0 0 /* Enable DRAM access using XOR decode window 0 */
+/* XOR engine register offsets macros */
+#define XOR_CONFIG_REG(chan) (XOR_UNIT_BASE(0) + 0x10 + ((chan) * 4))
+#define XOR_ACTIVATION_REG(chan) (XOR_UNIT_BASE(0) + 0x20 + ((chan) * 4))
+#define XOR_CAUSE_REG (XOR_UNIT_BASE(0) + 0x30)
+#define XOR_ERROR_CAUSE_REG (XOR_UNIT_BASE(0) + 0x50)
+#define XOR_ERROR_ADDR_REG (XOR_UNIT_BASE(0) + 0x60)
+#define XOR_INIT_VAL_LOW_REG (XOR_UNIT_BASE(0) + 0x2E0)
+#define XOR_INIT_VAL_HIGH_REG (XOR_UNIT_BASE(0) + 0x2E4)
+#define XOR_DST_PTR_REG(chan) (XOR_UNIT_BASE(0) + 0x2B0 + ((chan) * 4))
+#define XOR_BLOCK_SIZE_REG(chan) (XOR_UNIT_BASE(0) + 0x2C0 + ((chan) * 4))
+
+/* XOR Engine Address Decoding Register Map */
+#define XOR_WINDOW_CTRL_REG(unit,chan) (XOR_UNIT_BASE(unit)+(0x240 + ((chan) * 4)))
+#define XOR_BASE_ADDR_REG(unit,winNum) (XOR_UNIT_BASE(unit)+(0x250 + ((winNum) * 4)))
+#define XOR_SIZE_MASK_REG(unit,winNum) (XOR_UNIT_BASE(unit)+(0x270 + ((winNum) * 4)))
+
+.globl _mvDramIfEccMemInit
+/*******************************************************************************
+* _mvDramIfEccMemInit - mem init for dram cs
+*
+* DESCRIPTION:
+* This function will clean the cs by ussing the XOR mem init.
+*
+* INPUT:
+* r0 - dram bank number.
+*
+* OUTPUT:
+* none
+*/
+_mvDramIfEccMemInit:
+
+ /* Save register on stack */
+ cmp sp, #0
+ beq no_stack_s1
+save_on_stack1:
+ stmdb sp!, {r0,r1, r2, r3, r4, r5, r6}
+no_stack_s1:
+
+ ldr r1, = 0
+
+ /* Disable all XOR address decode windows to avoid possible overlap */
+ MV_REG_WRITE_ASM (r1, r5, (XOR_WINDOW_CTRL_REG(XOR_UNIT0,XOR_CHAN0)))
+
+ /* Init r5 to first XOR_SIZE_MASK_REG */
+ mov r5, r0, LSL #3
+ add r5, r5,#0x1500
+ add r5, r5,#0x04
+ add r5, r5,#(INTER_REGS_BASE)
+ ldr r6, [r5]
+ HTOLL(r6,r5)
+ MV_REG_WRITE_ASM (r6, r5, XOR_SIZE_MASK_REG(XOR_UNIT0,XOR_ADDR_DEC_WIN0))
+
+ mov r5, r0, LSL #3
+ add r5, r5,#0x1500
+ add r5, r5,#(INTER_REGS_BASE)
+ ldr r6, [r5]
+ /* Update destination & size */
+ MV_REG_WRITE_ASM(r6, r5, XOR_DST_PTR_REG(XOR_CHAN0))
+ HTOLL(r6,r5)
+ /* Init r6 to first XOR_BASE_ADDR_REG */
+ ldr r4, = 0xf
+ ldr r5, = 0x1
+ mov r5, r5, LSL r0
+ bic r4, r4, r5
+ mov r4, r4, LSL #8
+
+ orr r6, r6, r4
+ MV_REG_WRITE_ASM (r6, r5, XOR_BASE_ADDR_REG(XOR_UNIT0,XOR_ADDR_DEC_WIN0))
+
+ ldr r6, = 0xff0001
+ MV_REG_WRITE_ASM (r6, r5, XOR_WINDOW_CTRL_REG(XOR_UNIT0,XOR_CHAN0))
+
+ /* Configure XOR engine for memory init function. */
+ MV_REG_READ_ASM (r6, r5, XOR_CONFIG_REG(XOR_CHAN0))
+ and r6, r6, #~0x7 /* Clear operation mode field */
+ orr r6, r6, #0x4 /* Set operation to memory init */
+ MV_REG_WRITE_ASM(r6, r5, XOR_CONFIG_REG(XOR_CHAN0))
+
+ /* Set initVal in the XOR Engine Initial Value Registers */
+ ldr r6, = 0xfeedfeed
+ MV_REG_WRITE_ASM(r6, r5, XOR_INIT_VAL_LOW_REG)
+ ldr r6, = 0xfeedfeed
+ MV_REG_WRITE_ASM(r6, r5, XOR_INIT_VAL_HIGH_REG)
+
+ /* Set block size using DRAM bank size */
+
+ mov r5, r0, LSL #3
+ add r5, r5,#0x1500
+ add r5, r5,#0x04
+ add r5, r5,#(INTER_REGS_BASE)
+
+ ldr r6, [r5]
+ HTOLL(r6,r5)
+ and r6, r6, #SCSR_SIZE_MASK
+ mov r5, r6, LSR #SCSR_SIZE_OFFS
+ add r5, r5, #1
+ mov r6, r5, LSL #SCSR_SIZE_OFFS
+ MV_REG_WRITE_ASM(r6, r5, XOR_BLOCK_SIZE_REG(XOR_CHAN0))
+
+ /* Clean interrupt cause*/
+ MV_REG_WRITE_ASM(r1, r5, XOR_CAUSE_REG)
+
+ /* Clean error interrupt cause*/
+ MV_REG_READ_ASM(r6, r5, XOR_ERROR_CAUSE_REG)
+ MV_REG_READ_ASM(r6, r5, XOR_ERROR_ADDR_REG)
+
+ /* Start transfer */
+ MV_REG_READ_ASM (r6, r5, XOR_ACTIVATION_REG(XOR_CHAN0))
+ orr r6, r6, #0x1 /* Preform start command */
+ MV_REG_WRITE_ASM(r6, r5, XOR_ACTIVATION_REG(XOR_CHAN0))
+
+ /* Wait for engine to finish */
+waitForComplete:
+ MV_REG_READ_ASM(r6, r5, XOR_CAUSE_REG)
+ and r6, r6, #2
+ cmp r6, #0
+ beq waitForComplete
+
+ /* Clear all error report registers */
+ MV_REG_WRITE_ASM(r1, r5, SDRAM_SINGLE_BIT_ERR_CNTR_REG)
+ MV_REG_WRITE_ASM(r1, r5, SDRAM_DOUBLE_BIT_ERR_CNTR_REG)
+
+ MV_REG_WRITE_ASM(r1, r5, SDRAM_ERROR_CAUSE_REG)
+
+ cmp sp, #0
+ beq no_stack_l1
+load_from_stack1:
+ ldmia sp!, {r0, r1, r2, r3, r4, r5, r6}
+no_stack_l1:
+ mov pc, lr
+
+
+/*******************************************************************************
+* mvDramIfMemInit - Use XOR to clear all memory.
+*
+* DESCRIPTION:
+* Use assembler function _mvDramIfEccMemInit to fill all memory with FEADFEAD pattern.
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+#if defined(MV78XX0)
+
+_mvDramIfMemInit:
+ stmdb sp!, {r0,r1, r2, r3, r4, r5, r6}
+ mov r6, LR /* Save link register */
+ /* Check if dram bank 0 has to be init for ECC */
+ MV_REG_READ_ASM (r0, r5, SDRAM_SIZE_REG(0,0))
+ and r3, r0, #SCSR_WIN_EN
+ cmp r3, #0
+ beq no_bank_0
+ MV_REG_READ_ASM(r0, r5, SDRAM_BASE_ADDR_REG(0,0))
+ cmp r0, #0
+ beq no_bank_0
+ mov r0,#0
+ bl _mvDramIfEccMemInit
+
+no_bank_0:
+ /* Check if dram bank 1 has to be init for ECC */
+ MV_REG_READ_ASM (r0, r5, SDRAM_SIZE_REG(0,1))
+ and r0, r0, #SCSR_WIN_EN
+ cmp r0, #0
+ beq no_bank_1
+ mov r0,#1
+ bl _mvDramIfEccMemInit
+no_bank_1:
+ /* Check if dram bank 2 has to be init for ECC */
+ MV_REG_READ_ASM (r0, r5, SDRAM_SIZE_REG(0,2))
+ and r0, r0, #SCSR_WIN_EN
+ cmp r0, #0
+ beq no_bank_2
+ MV_REG_READ_ASM(r0, r5, SDRAM_BASE_ADDR_REG(0,2))
+ cmp r0, #0
+ beq no_bank_2
+ mov r0,#2
+ bl _mvDramIfEccMemInit
+
+no_bank_2:
+ /* Check if dram bank 3 has to be init for ECC */
+ MV_REG_READ_ASM (r0, r5, SDRAM_SIZE_REG(0,3))
+ and r0, r0, #SCSR_WIN_EN
+ cmp r0, #0
+ beq no_bank_3
+ mov r0,#3
+ bl _mvDramIfEccMemInit
+no_bank_3:
+ mov LR ,r6 /* restore link register */
+ ldmia sp!, {r0, r1, r2, r3, r4, r5, r6}
+ mov pc, lr
+#endif
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.h
new file mode 100644
index 000000000..6141c46a0
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.h
@@ -0,0 +1,157 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfConfigh
+#define __INCmvDramIfConfigh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* includes */
+
+/* defines */
+
+/* registers defaults values */
+
+#define SDRAM_CONFIG_DV (SDRAM_SRMODE_DRAM | BIT25 | BIT30)
+
+#define SDRAM_DUNIT_CTRL_LOW_DDR2_DV \
+ (SDRAM_SRCLK_KEPT | \
+ SDRAM_CLK1DRV_NORMAL | \
+ (BIT28 | BIT29))
+
+#define SDRAM_ADDR_CTRL_DV 2
+
+#define SDRAM_TIMING_CTRL_LOW_REG_DV \
+ ((0x2 << SDRAM_TRCD_OFFS) | \
+ (0x2 << SDRAM_TRP_OFFS) | \
+ (0x1 << SDRAM_TWR_OFFS) | \
+ (0x0 << SDRAM_TWTR_OFFS) | \
+ (0x5 << SDRAM_TRAS_OFFS) | \
+ (0x1 << SDRAM_TRRD_OFFS))
+
+/* Note: value of 0 in register means one cycle, 1 means two and so on */
+#define SDRAM_TIMING_CTRL_HIGH_REG_DV \
+ ((0x0 << SDRAM_TR2R_OFFS) | \
+ (0x0 << SDRAM_TR2W_W2R_OFFS) | \
+ (0x1 << SDRAM_TW2W_OFFS))
+
+#define SDRAM_OPEN_PAGES_CTRL_REG_DV SDRAM_OPEN_PAGE_EN
+
+/* Presence Ctrl Low Ctrl High Dunit Ctrl Ext Mode */
+/* CS0 0x84210000 0x00000000 0x0000780F 0x00000440 */
+/* CS0+CS1 0x84210000 0x00000000 0x0000780F 0x00000440 */
+/* CS0+CS2 0x030C030C 0x00000000 0x0000740F 0x00000404 */
+/* CS0+CS1+CS2 0x030C030C 0x00000000 0x0000740F 0x00000404 */
+/* CS0+CS2+CS3 0x030C030C 0x00000000 0x0000740F 0x00000404 */
+/* CS0+CS1+CS2+CS3 0x030C030C 0x00000000 0x0000740F 0x00000404 */
+
+#define DDR2_ODT_CTRL_LOW_CS0_CS1_DV 0x84210000
+#define DDR2_ODT_CTRL_HIGH_CS0_CS1_DV 0x00000000
+#define DDR2_DUNIT_ODT_CTRL_CS0_CS1_DV 0x0000E80F
+#ifdef MV78XX0
+#define DDR_SDRAM_EXT_MODE_CS0_CS1_DV 0x00000040
+#else
+#define DDR_SDRAM_EXT_MODE_CS0_CS1_DV 0x00000440
+#endif
+
+#define DDR2_ODT_CTRL_LOW_CS0_CS1_CS2_CS3_DV 0x030C030C
+#define DDR2_ODT_CTRL_HIGH_CS0_CS1_CS2_CS3_DV 0x00000000
+#define DDR2_DUNIT_ODT_CTRL_CS0_CS1_CS2_CS3_DV 0x0000F40F
+#ifdef MV78XX0
+#define DDR_SDRAM_EXT_MODE_CS0_CS1_CS2_CS3_DV 0x00000004
+#define DDR_SDRAM_EXT_MODE_FAST_CS0_CS1_CS2_CS3_DV 0x00000044
+#else
+#define DDR_SDRAM_EXT_MODE_CS0_CS1_CS2_CS3_DV 0x00000404
+#define DDR_SDRAM_EXT_MODE_FAST_CS0_CS1_CS2_CS3_DV 0x00000444
+#endif
+
+/* DDR SDRAM Adderss/Control and Data Pads Calibration default values */
+#define DDR2_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV \
+ (3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+
+#define DDR2_DATA_PAD_STRENGTH_TYPICAL_DV \
+ (3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+
+/* DDR SDRAM Mode Register default value */
+#define DDR2_MODE_REG_DV (SDRAM_BURST_LEN_4 | SDRAM_WR_3_CYC)
+/* DDR SDRAM Timing parameter default values */
+#define SDRAM_TIMING_CTRL_LOW_REG_DEFAULT 0x33136552
+#define SDRAM_TRFC_DEFAULT_VALUE 0x34
+#define SDRAM_TRFC_DEFAULT SDRAM_TRFC_DEFAULT_VALUE
+#define SDRAM_TW2W_DEFALT (0x1 << SDRAM_TW2W_OFFS)
+
+#define SDRAM_TIMING_CTRL_HIGH_REG_DEFAULT (SDRAM_TRFC_DEFAULT | SDRAM_TW2W_DEFALT)
+
+#define SDRAM_FTDLL_REG_DEFAULT_LEFT 0x88C800
+#define SDRAM_FTDLL_REG_DEFAULT_RIGHT 0x88C800
+#define SDRAM_FTDLL_REG_DEFAULT_UP 0x88C800
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvDramIfh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h
new file mode 100644
index 000000000..369eda692
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h
@@ -0,0 +1,423 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDramIfRegsh
+#define __INCmvDramIfRegsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* DDR SDRAM Controller Address Decode Registers */
+ /* SDRAM CSn Base Address Register (SCBAR) */
+#define SDRAM_BASE_ADDR_REG(cpu,csNum) (0x1500 + ((csNum) * 8) + ((cpu) * 0x70))
+#define SCBAR_BASE_OFFS 16
+#define SCBAR_BASE_MASK (0xffff << SCBAR_BASE_OFFS)
+#define SCBAR_BASE_ALIGNMENT 0x10000
+
+/* SDRAM CSn Size Register (SCSR) */
+#define SDRAM_SIZE_REG(cpu,csNum) (0x1504 + ((csNum) * 8) + ((cpu) * 0x70))
+#define SCSR_SIZE_OFFS 24
+#define SCSR_SIZE_MASK (0xff << SCSR_SIZE_OFFS)
+#define SCSR_SIZE_ALIGNMENT 0x1000000
+#define SCSR_WIN_EN BIT0
+
+/* configuration register */
+#define SDRAM_CONFIG_REG (DRAM_BASE + 0x1400)
+#define SDRAM_REFRESH_OFFS 0
+#define SDRAM_REFRESH_MAX 0x3FFF
+#define SDRAM_REFRESH_MASK (SDRAM_REFRESH_MAX << SDRAM_REFRESH_OFFS)
+#define SDRAM_DWIDTH_OFFS 15
+#define SDRAM_DWIDTH_MASK (1 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DWIDTH_32BIT (0 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DWIDTH_64BIT (1 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_REGISTERED (1 << 17)
+#define SDRAM_ECC_OFFS 18
+#define SDRAM_ECC_MASK (1 << SDRAM_ECC_OFFS)
+#define SDRAM_ECC_DIS (0 << SDRAM_ECC_OFFS)
+#define SDRAM_ECC_EN (1 << SDRAM_ECC_OFFS)
+#define SDRAM_IERR_OFFS 19
+#define SDRAM_IERR_MASK (1 << SDRAM_IERR_OFFS)
+#define SDRAM_IERR_REPORTE (0 << SDRAM_IERR_OFFS)
+#define SDRAM_IERR_IGNORE (1 << SDRAM_IERR_OFFS)
+#define SDRAM_SRMODE_OFFS 24
+#define SDRAM_SRMODE_MASK (1 << SDRAM_SRMODE_OFFS)
+#define SDRAM_SRMODE_POWER (0 << SDRAM_SRMODE_OFFS)
+#define SDRAM_SRMODE_DRAM (1 << SDRAM_SRMODE_OFFS)
+
+/* dunit control low register */
+#define SDRAM_DUNIT_CTRL_REG (DRAM_BASE + 0x1404)
+#define SDRAM_2T_OFFS 4
+#define SDRAM_2T_MASK (1 << SDRAM_2T_OFFS)
+#define SDRAM_2T_MODE (1 << SDRAM_2T_OFFS)
+
+#define SDRAM_SRCLK_OFFS 5
+#define SDRAM_SRCLK_MASK (1 << SDRAM_SRCLK_OFFS)
+#define SDRAM_SRCLK_KEPT (0 << SDRAM_SRCLK_OFFS)
+#define SDRAM_SRCLK_GATED (1 << SDRAM_SRCLK_OFFS)
+#define SDRAM_CTRL_POS_OFFS 6
+#define SDRAM_CTRL_POS_MASK (1 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CTRL_POS_FALL (0 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CTRL_POS_RISE (1 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CLK1DRV_OFFS 12
+#define SDRAM_CLK1DRV_MASK (1 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK1DRV_HIGH_Z (0 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK1DRV_NORMAL (1 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK2DRV_OFFS 13
+#define SDRAM_CLK2DRV_MASK (1 << SDRAM_CLK2DRV_OFFS)
+#define SDRAM_CLK2DRV_HIGH_Z (0 << SDRAM_CLK2DRV_OFFS)
+#define SDRAM_CLK2DRV_NORMAL (1 << SDRAM_CLK2DRV_OFFS)
+#define SDRAM_SB_OUT_DEL_OFFS 20
+#define SDRAM_SB_OUT_DEL_MAX 0xf
+#define SDRAM_SB_OUT_MASK (SDRAM_SB_OUT_DEL_MAX<<SDRAM_SB_OUT_DEL_OFFS)
+#define SDRAM_SB_IN_DEL_OFFS 24
+#define SDRAM_SB_IN_DEL_MAX 0xf
+#define SDRAM_SB_IN_MASK (SDRAM_SB_IN_DEL_MAX<<SDRAM_SB_IN_DEL_OFFS)
+
+/* dunit control hight register */
+#define SDRAM_DUNIT_CTRL_HI_REG (DRAM_BASE + 0x1424)
+#define SDRAM__D2P_OFFS 7
+#define SDRAM__D2P_EN (1 << SDRAM__D2P_OFFS)
+#define SDRAM__P2D_OFFS 8
+#define SDRAM__P2D_EN (1 << SDRAM__P2D_OFFS)
+#define SDRAM__ADD_HALF_FCC_OFFS 9
+#define SDRAM__ADD_HALF_FCC_EN (1 << SDRAM__ADD_HALF_FCC_OFFS)
+#define SDRAM__PUP_ZERO_SKEW_OFFS 10
+#define SDRAM__PUP_ZERO_SKEW_EN (1 << SDRAM__PUP_ZERO_SKEW_OFFS)
+#define SDRAM__WR_MESH_DELAY_OFFS 11
+#define SDRAM__WR_MESH_DELAY_EN (1 << SDRAM__WR_MESH_DELAY_OFFS)
+
+/* sdram timing control low register */
+#define SDRAM_TIMING_CTRL_LOW_REG (DRAM_BASE + 0x1408)
+#define SDRAM_TRCD_OFFS 4
+#define SDRAM_TRCD_MASK (0xF << SDRAM_TRCD_OFFS)
+#define SDRAM_TRP_OFFS 8
+#define SDRAM_TRP_MASK (0xF << SDRAM_TRP_OFFS)
+#define SDRAM_TWR_OFFS 12
+#define SDRAM_TWR_MASK (0xF << SDRAM_TWR_OFFS)
+#define SDRAM_TWTR_OFFS 16
+#define SDRAM_TWTR_MASK (0xF << SDRAM_TWTR_OFFS)
+#define SDRAM_TRAS_OFFS 0
+#define SDRAM_TRAS_MASK (0xF << SDRAM_TRAS_OFFS)
+#define SDRAM_EXT_TRAS_OFFS 20
+#define SDRAM_EXT_TRAS_MASK (0x1 << SDRAM_EXT_TRAS_OFFS)
+#define SDRAM_TRRD_OFFS 24
+#define SDRAM_TRRD_MASK (0xF << SDRAM_TRRD_OFFS)
+#define SDRAM_TRTP_OFFS 28
+#define SDRAM_TRTP_MASK (0xF << SDRAM_TRTP_OFFS)
+#define SDRAM_TRTP_DDR1 (0x1 << SDRAM_TRTP_OFFS)
+
+/* sdram timing control high register */
+#define SDRAM_TIMING_CTRL_HIGH_REG (DRAM_BASE + 0x140c)
+#define SDRAM_TRFC_OFFS 0
+#define SDRAM_TRFC_MASK (0x3F << SDRAM_TRFC_OFFS)
+#define SDRAM_TR2R_OFFS 7
+#define SDRAM_TR2R_MASK (0x3 << SDRAM_TR2R_OFFS)
+#define SDRAM_TR2W_W2R_OFFS 9
+#define SDRAM_TR2W_W2R_MASK (0x3 << SDRAM_TR2W_W2R_OFFS)
+#define SDRAM_TW2W_OFFS 11
+#define SDRAM_TW2W_MASK (0x3 << SDRAM_TW2W_OFFS)
+
+/* sdram DDR2 timing low register (SD2TLR) */
+#define SDRAM_DDR2_TIMING_LO_REG (DRAM_BASE + 0x1428)
+#define SD2TLR_TODT_ON_RD_OFFS 4
+#define SD2TLR_TODT_ON_RD_MASK (0xF << SD2TLR_TODT_ON_RD_OFFS)
+#define SD2TLR_TODT_OFF_RD_OFFS 8
+#define SD2TLR_TODT_OFF_RD_MASK (0xF << SD2TLR_TODT_OFF_RD_OFFS)
+#define SD2TLR_TODT_ON_CTRL_RD_OFFS 12
+#define SD2TLR_TODT_ON_CTRL_RD_MASK (0xF << SD2TLR_TODT_ON_CTRL_RD_OFFS)
+#define SD2TLR_TODT_OFF_CTRL_RD_OFFS 16
+#define SD2TLR_TODT_OFF_CTRL_RD_MASK (0xF << SD2TLR_TODT_OFF_CTRL_RD_OFFS)
+
+/* sdram DDR2 timing high register (SD2TLR) */
+#define SDRAM_DDR2_TIMING_HI_REG (DRAM_BASE + 0x147C)
+#define SD2THR_TODT_ON_WR_OFFS 0
+#define SD2THR_TODT_ON_WR_MASK (0xF << SD2THR_TODT_ON_WR_OFFS)
+#define SD2THR_TODT_OFF_WR_OFFS 4
+#define SD2THR_TODT_OFF_WR_MASK (0xF << SD2THR_TODT_OFF_WR_OFFS)
+#define SD2THR_TODT_ON_CTRL_WR_OFFS 8
+#define SD2THR_TODT_ON_CTRL_WR_MASK (0xF << SD2THR_TODT_ON_CTRL_WR_OFFS)
+#define SD2THR_TODT_OFF_CTRL_WR_OFFS 12
+#define SD2THR_TODT_OFF_CTRL_WR_MASK (0xF << SD2THR_TODT_OFF_CTRL_WR_OFFS)
+
+/* address control register */
+#define SDRAM_ADDR_CTRL_REG (DRAM_BASE + 0x1410)
+#define SDRAM_ADDRSEL_OFFS(cs) (4 * (cs))
+#define SDRAM_ADDRSEL_MASK(cs) (0x3 << SDRAM_ADDRSEL_OFFS(cs))
+#define SDRAM_ADDRSEL_X8(cs) (0x0 << SDRAM_ADDRSEL_OFFS(cs))
+#define SDRAM_ADDRSEL_X16(cs) (0x1 << SDRAM_ADDRSEL_OFFS(cs))
+#define SDRAM_DSIZE_OFFS(cs) (2 + 4 * (cs))
+#define SDRAM_DSIZE_MASK(cs) (0x3 << SDRAM_DSIZE_OFFS(cs))
+#define SDRAM_DSIZE_256Mb(cs) (0x1 << SDRAM_DSIZE_OFFS(cs))
+#define SDRAM_DSIZE_512Mb(cs) (0x2 << SDRAM_DSIZE_OFFS(cs))
+#define SDRAM_DSIZE_1Gb(cs) (0x3 << SDRAM_DSIZE_OFFS(cs))
+#define SDRAM_DSIZE_2Gb(cs) (0x0 << SDRAM_DSIZE_OFFS(cs))
+
+/* SDRAM Open Pages Control registers */
+#define SDRAM_OPEN_PAGE_CTRL_REG (DRAM_BASE + 0x1414)
+#define SDRAM_OPEN_PAGE_EN (0 << 0)
+#define SDRAM_OPEN_PAGE_DIS (1 << 0)
+
+/* sdram opertion register */
+#define SDRAM_OPERATION_REG (DRAM_BASE + 0x1418)
+#define SDRAM_CMD_OFFS 0
+#define SDRAM_CMD_MASK (0xF << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_NORMAL (0x0 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_PRECHARGE_ALL (0x1 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_REFRESH_ALL (0x2 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_REG_SET_CMD (0x3 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EXT_MODE_SET (0x4 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_NOP (0x5 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_SLF_RFRSH (0x7 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EMRS2_CMD (0x8 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EMRS3_CMD (0x9 << SDRAM_CMD_OFFS)
+
+/* sdram mode register */
+#define SDRAM_MODE_REG (DRAM_BASE + 0x141c)
+#define SDRAM_BURST_LEN_OFFS 0
+#define SDRAM_BURST_LEN_MASK (0x7 << SDRAM_BURST_LEN_OFFS)
+#define SDRAM_BURST_LEN_4 (0x2 << SDRAM_BURST_LEN_OFFS)
+#define SDRAM_CL_OFFS 4
+#define SDRAM_CL_MASK (0x7 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_3 (0x3 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_4 (0x4 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_5 (0x5 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_6 (0x6 << SDRAM_CL_OFFS)
+
+#define SDRAM_TM_OFFS 7
+#define SDRAM_TM_MASK (1 << SDRAM_TM_OFFS)
+#define SDRAM_TM_NORMAL (0 << SDRAM_TM_OFFS)
+#define SDRAM_TM_TEST_MODE (1 << SDRAM_TM_OFFS)
+#define SDRAM_DLL_OFFS 8
+#define SDRAM_DLL_MASK (1 << SDRAM_DLL_OFFS)
+#define SDRAM_DLL_NORMAL (0 << SDRAM_DLL_OFFS)
+#define SDRAM_DLL_RESET (1 << SDRAM_DLL_OFFS)
+#define SDRAM_WR_OFFS 9
+#define SDRAM_WR_MAX 7
+#define SDRAM_WR_MASK (SDRAM_WR_MAX << SDRAM_WR_OFFS)
+#define SDRAM_WR_2_CYC (1 << SDRAM_WR_OFFS)
+#define SDRAM_WR_3_CYC (2 << SDRAM_WR_OFFS)
+#define SDRAM_WR_4_CYC (3 << SDRAM_WR_OFFS)
+#define SDRAM_WR_5_CYC (4 << SDRAM_WR_OFFS)
+#define SDRAM_WR_6_CYC (5 << SDRAM_WR_OFFS)
+#define SDRAM_PD_OFFS 12
+#define SDRAM_PD_MASK (1 << SDRAM_PD_OFFS)
+#define SDRAM_PD_FAST_EXIT (0 << SDRAM_PD_OFFS)
+#define SDRAM_PD_SLOW_EXIT (1 << SDRAM_PD_OFFS)
+
+/* DDR SDRAM Extended Mode register (DSEMR) */
+#define SDRAM_EXTENDED_MODE_REG (DRAM_BASE + 0x1420)
+#define DSEMR_DLL_ENABLE 0
+#define DSEMR_DLL_DISABLE 1
+#define DSEMR_DS_OFFS 1
+#define DSEMR_DS_MASK (1 << DSEMR_DS_OFFS)
+#define DSEMR_DS_NORMAL (0 << DSEMR_DS_OFFS)
+#define DSEMR_DS_REDUCED (1 << DSEMR_DS_OFFS)
+#define DSEMR_QOFF_OUTPUT_BUFF_EN (0 << 12)
+#define DSEMR_RTT0_OFFS 2
+#define DSEMR_RTT1_OFFS 6
+#define DSEMR_RTT_ODT_DISABLE ((0 << DSEMR_RTT0_OFFS)||(0 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_75_OHM ((1 << DSEMR_RTT0_OFFS)||(0 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_150_OHM ((0 << DSEMR_RTT0_OFFS)||(1 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_50_OHM ((1 << DSEMR_RTT0_OFFS)||(1 << DSEMR_RTT1_OFFS))
+#define DSEMR_DQS_OFFS 10
+#define DSEMR_DQS_MASK (1 << DSEMR_DQS_OFFS)
+#define DSEMR_DQS_DIFFERENTIAL (0 << DSEMR_DQS_OFFS)
+#define DSEMR_DQS_SINGLE_ENDED (1 << DSEMR_DQS_OFFS)
+#define DSEMR_RDQS_ENABLE (1 << 11)
+#define DSEMR_QOFF_OUTPUT_BUFF_EN (0 << 12)
+#define DSEMR_QOFF_OUTPUT_BUFF_DIS (1 << 12)
+
+/* DDR SDRAM Operation Control Register */
+#define SDRAM_OPERATION_CTRL_REG (DRAM_BASE + 0x142c)
+
+/* Dunit FTDLL Configuration Register */
+#define SDRAM_FTDLL_CONFIG_LEFT_REG (DRAM_BASE + 0x1484)
+#define SDRAM_FTDLL_CONFIG_RIGHT_REG (DRAM_BASE + 0x161C)
+#define SDRAM_FTDLL_CONFIG_UP_REG (DRAM_BASE + 0x1620)
+
+/* Pads Calibration register */
+#define SDRAM_ADDR_CTRL_PADS_CAL_REG (DRAM_BASE + 0x14c0)
+#define SDRAM_DATA_PADS_CAL_REG (DRAM_BASE + 0x14c4)
+#define SDRAM_DRVN_OFFS 0
+#define SDRAM_DRVN_MASK (0x3F << SDRAM_DRVN_OFFS)
+#define SDRAM_DRVP_OFFS 6
+#define SDRAM_DRVP_MASK (0x3F << SDRAM_DRVP_OFFS)
+#define SDRAM_PRE_DRIVER_STRENGTH_OFFS 12
+#define SDRAM_PRE_DRIVER_STRENGTH_MASK (3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+#define SDRAM_TUNE_EN BIT16
+#define SDRAM_LOCKN_OFFS 17
+#define SDRAM_LOCKN_MAKS (0x3F << SDRAM_LOCKN_OFFS)
+#define SDRAM_LOCKP_OFFS 23
+#define SDRAM_LOCKP_MAKS (0x3F << SDRAM_LOCKP_OFFS)
+#define SDRAM_WR_EN (1 << 31)
+
+/* DDR2 SDRAM ODT Control (Low) Register (DSOCLR) */
+#define DDR2_SDRAM_ODT_CTRL_LOW_REG (DRAM_BASE + 0x1494)
+#define DSOCLR_ODT_RD_OFFS(odtNum) (odtNum * 4)
+#define DSOCLR_ODT_RD_MASK(odtNum) (0xf << DSOCLR_ODT_RD_OFFS(odtNum))
+#define DSOCLR_ODT_RD(odtNum, bank) ((1 << bank) << DSOCLR_ODT_RD_OFFS(odtNum))
+#define DSOCLR_ODT_WR_OFFS(odtNum) (16 + (odtNum * 4))
+#define DSOCLR_ODT_WR_MASK(odtNum) (0xf << DSOCLR_ODT_WR_OFFS(odtNum))
+#define DSOCLR_ODT_WR(odtNum, bank) ((1 << bank) << DSOCLR_ODT_WR_OFFS(odtNum))
+
+/* DDR2 SDRAM ODT Control (High) Register (DSOCHR) */
+#define DDR2_SDRAM_ODT_CTRL_HIGH_REG (DRAM_BASE + 0x1498)
+/* Optional control values to DSOCHR_ODT_EN macro */
+#define DDR2_ODT_CTRL_DUNIT 0
+#define DDR2_ODT_CTRL_NEVER 1
+#define DDR2_ODT_CTRL_ALWAYS 3
+#define DSOCHR_ODT_EN_OFFS(odtNum) (odtNum * 2)
+#define DSOCHR_ODT_EN_MASK(odtNum) (0x3 << DSOCHR_ODT_EN_OFFS(odtNum))
+#define DSOCHR_ODT_EN(odtNum, ctrl) (ctrl << DSOCHR_ODT_EN_OFFS(odtNum))
+
+/* DDR2 Dunit ODT Control Register (DDOCR)*/
+#define DDR2_DUNIT_ODT_CONTROL_REG (DRAM_BASE + 0x149c)
+#define DDOCR_ODT_RD_OFFS 0
+#define DDOCR_ODT_RD_MASK (0xf << DDOCR_ODT_RD_OFFS)
+#define DDOCR_ODT_RD(bank) ((1 << bank) << DDOCR_ODT_RD_OFFS)
+#define DDOCR_ODT_WR_OFFS 4
+#define DDOCR_ODT_WR_MASK (0xf << DDOCR_ODT_WR_OFFS)
+#define DDOCR_ODT_WR(bank) ((1 << bank) << DDOCR_ODT_WR_OFFS)
+#define DSOCR_ODT_EN_OFFS 8
+#define DSOCR_ODT_EN_MASK (0x3 << DSOCR_ODT_EN_OFFS)
+/* For ctrl parameters see DDR2 SDRAM ODT Control (High) Register (0x1498) above. */
+#define DSOCR_ODT_EN(ctrl) (ctrl << DSOCR_ODT_EN_OFFS)
+#define DSOCR_ODT_SEL_DISABLE 0
+#define DSOCR_ODT_SEL_75_OHM 2
+#define DSOCR_ODT_SEL_150_OHM 1
+#define DSOCR_ODT_SEL_50_OHM 3
+#define DSOCR_DQ_ODT_SEL_OFFS 10
+#define DSOCR_DQ_ODT_SEL_MASK (0x3 << DSOCR_DQ_ODT_SEL_OFFS)
+#define DSOCR_DQ_ODT_SEL(odtSel) (odtSel << DSOCR_DQ_ODT_SEL_OFFS)
+#define DSOCR_ST_ODT_SEL_OFFS 12
+#define DSOCR_ST_ODT_SEL_MASK (0x3 << DSOCR_ST_ODT_SEL_OFFS)
+#define DSOCR_ST_ODT_SEL(odtSel) (odtSel << DSOCR_ST_ODT_SEL_OFFS)
+#define DSOCR_ST_ODT_EN (1 << 14)
+
+/* DDR SDRAM Initialization Control Register (DSICR) */
+#define DDR_SDRAM_INIT_CTRL_REG (DRAM_BASE + 0x1480)
+#define DSICR_INIT_EN (1 << 0)
+#define DSICR_T200_SET (1 << 8)
+
+/* sdram extended mode2 register (SEM2R) */
+#define SDRAM_EXTENDED_MODE2_REG (DRAM_BASE + 0x148C)
+#define SEM2R_EMRS2_DDR2_OFFS 0
+#define SEM2R_EMRS2_DDR2_MASK (0x7FFF << SEM2R_EMRS2_DDR2_OFFS)
+
+/* sdram extended mode3 register (SEM3R) */
+#define SDRAM_EXTENDED_MODE3_REG (DRAM_BASE + 0x1490)
+#define SEM3R_EMRS3_DDR2_OFFS 0
+#define SEM3R_EMRS3_DDR2_MASK (0x7FFF << SEM3R_EMRS3_DDR2_OFFS)
+
+/* sdram error registers */
+#define SDRAM_ERROR_CAUSE_REG (DRAM_BASE + 0x14d0)
+#define SDRAM_ERROR_MASK_REG (DRAM_BASE + 0x14d4)
+#define SDRAM_ERROR_DATA_LOW_REG (DRAM_BASE + 0x1444)
+#define SDRAM_ERROR_DATA_HIGH_REG (DRAM_BASE + 0x1440)
+#define SDRAM_ERROR_ADDR_REG (DRAM_BASE + 0x1450)
+#define SDRAM_ERROR_ECC_REG (DRAM_BASE + 0x1448)
+#define SDRAM_CALC_ECC_REG (DRAM_BASE + 0x144c)
+#define SDRAM_ECC_CONTROL_REG (DRAM_BASE + 0x1454)
+#define SDRAM_SINGLE_BIT_ERR_CNTR_REG (DRAM_BASE + 0x1458)
+#define SDRAM_DOUBLE_BIT_ERR_CNTR_REG (DRAM_BASE + 0x145c)
+
+/* SDRAM Error Cause Register (SECR) */
+#define SECR_SINGLE_BIT_ERR BIT0
+#define SECR_DOUBLE_BIT_ERR BIT1
+#define SECR_DATA_PATH_PARITY_ERR BIT2
+/* SDRAM Error Address Register (SEAR) */
+#define SEAR_ERR_TYPE_OFFS 0
+#define SEAR_ERR_TYPE_MASK (1 << SEAR_ERR_TYPE_OFFS)
+#define SEAR_ERR_TYPE_SINGLE 0
+#define SEAR_ERR_TYPE_DOUBLE (1 << SEAR_ERR_TYPE_OFFS)
+#define SEAR_ERR_CS_OFFS 1
+#define SEAR_ERR_CS_MASK (3 << SEAR_ERR_CS_OFFS)
+#define SEAR_ERR_CS(csNum) (csNum << SEAR_ERR_CS_OFFS)
+#define SEAR_ERR_ADDR_OFFS 3
+#define SEAR_ERR_ADDR_MASK (0x1FFFFFFF << SEAR_ERR_ADDR_OFFS)
+
+/* SDRAM ECC Control Register (SECR) */
+#define SECR_FORCEECC_OFFS 0
+#define SECR_FORCEECC_MASK (0xFF << SECR_FORCEECC_OFFS)
+#define SECR_FORCEEN_OFFS 8
+#define SECR_FORCEEN_MASK (1 << SECR_FORCEEN_OFFS)
+#define SECR_ECC_CALC_MASK (0 << SECR_FORCEEN_OFFS)
+#define SECR_ECC_USER_MASK (1 << SECR_FORCEEN_OFFS)
+#define SECR_PERRPROP_EN BIT9
+#define SECR_CNTMODE_OFFS 10
+#define SECR_CNTMODE_MASK (1 << SECR_CNTMODE_OFFS)
+#define SECR_ALL_IN_CS0 (0 << SECR_CNTMODE_OFFS)
+#define SECR_NORMAL_COUNTER (1 << SECR_CNTMODE_OFFS)
+#define SECR_THRECC_OFFS 16
+#define SECR_THRECC_MAX 0xFF
+#define SECR_THRECC_MASK (SECR_THRECC_MAX << SECR_THRECC_OFFS)
+#define SECR_THRECC(threshold) (threshold << SECR_THRECC_OFFS)
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvDramIfRegsh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfStaticInit.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfStaticInit.h
new file mode 100644
index 000000000..f3bf83b4e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfStaticInit.h
@@ -0,0 +1,179 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfStaticInith
+#define __INCmvDramIfStaticInith
+
+#ifdef MV_STATIC_DRAM_ON_BOARD
+#define STATIC_DRAM_BANK_1
+#undef STATIC_DRAM_BANK_2
+#undef STATIC_DRAM_BANK_3
+#undef STATIC_DRAM_BANK_4
+
+
+#ifdef MV_DIMM_TS256MLQ72V5U
+#define STATIC_DRAM_BANK_2
+#define STATIC_DRAM_BANK_3
+#undef STATIC_DRAM_BANK_4
+
+#define STATIC_SDRAM_CONFIG_REG 0x4724481A /* offset 0x1400 - DMA reg-0xf1000814 */
+#define STATIC_SDRAM_DUNIT_CTRL_REG 0x37707450 /* offset 0x1404 - DMA reg-0xf100081c */
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG 0x11A13330 /* offset 0x1408 - DMA reg-0xf1000824 */
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG 0x00000601 /* offset 0x140c - DMA reg-0xf1000828 */
+#define STATIC_SDRAM_ADDR_CTRL_REG 0x00001CB2 /* offset 0x1410 - DMA reg-0xf1000820 */
+#define STATIC_SDRAM_MODE_REG 0x00000642 /* offset 0x141c - DMA reg-0xf1000818 */
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x030C030C /* 0x1494 */
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000000 /* 0x1498 */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0000740F /* 0x149c */
+#define STATIC_SDRAM_EXT_MODE 0x00000404 /* 0x1420 */
+#define STATIC_SDRAM_DDR2_TIMING_LO 0x00074410 /* 0x1428 */
+#define STATIC_SDRAM_DDR2_TIMING_HI 0x00007441 /* 0x147C */
+
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0 0x3FFF /* size bank0 dimm0 - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_RANK1_SIZE_DIMM0 0x3FFF /* size bank1 dimm0 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM1 0x3FFF /* size bank0 dimm1 */
+#define STATIC_SDRAM_RANK1_SIZE_DIMM1 0x0 /* size bank1 dimm1 */
+
+#endif /* TS256MLQ72V5U */
+
+
+#ifdef MV_MT9VDDT3272AG
+/* one DIMM 256M */
+#define STATIC_SDRAM_CONFIG_REG 0x5820040d /* offset 0x1400 - DMA reg-0xf1000814 */
+#define STATIC_SDRAM_DUNIT_CTRL_REG 0xC4000540 /* offset 0x1404 - DMA reg-0xf100081c */
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG 0x01602220 /* offset 0x1408 - DMA reg-0xf1000824 */
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG 0x0000000b /* offset 0x140c - DMA reg-0xf1000828 */
+#define STATIC_SDRAM_ADDR_CTRL_REG 0x00000012 /* offset 0x1410 - DMA reg-0xf1000820 */
+#define STATIC_SDRAM_MODE_REG 0x00000062 /* offset 0x141c - DMA reg-0xf1000818 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0 0x0fff /* size bank0 dimm0 - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM1 0x0 /* size bank0 dimm1 */
+
+#endif /* MV_MT9VDDT3272AG */
+
+
+
+#ifdef MV_D27RB12P
+/*
+Two DIMM 512M + ECC enabled, Registered DIMM CAS Latency 2.5
+*/
+
+#define STATIC_SDRAM_CONFIG_REG 0x6826081E /* offset 0x1400 - DMA reg-0xf1000814 */
+#define STATIC_SDRAM_DUNIT_CTRL_REG 0xC5000540 /* offset 0x1404 - DMA reg-0xf100081c */
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG 0x01501220 /* offset 0x1408 - DMA reg-0xf1000824 */
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG 0x00000009 /* offset 0x140c - DMA reg-0xf1000828 */
+#define STATIC_SDRAM_ADDR_CTRL_REG 0x00000012 /* offset 0x1410 - DMA reg-0xf1000820 */
+#define STATIC_SDRAM_MODE_REG 0x00000062 /* offset 0x141c - DMA reg-0xf1000818 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0 0x0FFF /* size bank0 dimm0 - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM1 0x0FFF /* size bank0 dimm1 */
+
+#define STATIC_DRAM_BANK_2
+
+#define STATIC_DRAM_BANK_3
+#define STATIC_DRAM_BANK_4
+
+#endif /* mv_D27RB12P */
+
+#ifdef RD_MV645XX
+
+#define STATIC_MEM_TYPE MEM_TYPE_DDR2
+#define STATIC_DIMM_INFO_BANK0_SIZE 256
+/* DDR2 boards 256 MB*/
+
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0 0x00000fff /* size bank0 dimm0 - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_CONFIG_REG 0x07190618
+#define STATIC_SDRAM_MODE_REG 0x00000432
+#define STATIC_SDRAM_DUNIT_CTRL_REG 0xf4a03440
+#define STATIC_SDRAM_ADDR_CTRL_REG 0x00000022
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG 0x11712220
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG 0x00000504
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x84210000
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000000
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0000780f
+#define STATIC_SDRAM_EXT_MODE 0x00000440
+#define STATIC_SDRAM_DDR2_TIMING_LO 0x00063300
+#define STATIC_SDRAM_DDR2_TIMING_HI 0x00006330
+#endif /* RD_MV645XX */
+
+#if MV_DIMM_M3783354CZ3_CE6
+
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0 0x00000FFF /* 0x2010 size bank0 dimm0 - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_CONFIG_REG 0x07190618 /* 0x1400 */
+#define STATIC_SDRAM_MODE_REG 0x00000432 /* 0x141c */
+#define STATIC_SDRAM_DUNIT_CTRL_REG 0xf4a03440 /* 0x1404 */
+#define STATIC_SDRAM_ADDR_CTRL_REG 0x00000022 /* 0x1410 */
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG 0x11712220 /* 0x1408 */
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG 0x00000504 /* 0x140c */
+#define STATIC_SDRAM_ODT_CTRL_LOW 0x84210000 /* 0x1494 */
+#define STATIC_SDRAM_ODT_CTRL_HI 0x00000000 /* 0x1498 */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL 0x0000780f /* 0x149c */
+#define STATIC_SDRAM_EXT_MODE 0x00000440 /* 0x1420 */
+#define STATIC_SDRAM_DDR2_TIMING_LO 0x00063300 /* 0x1428 */
+#define STATIC_SDRAM_DDR2_TIMING_HI 0x00006330 /* 0x147C */
+
+#endif /* MV_DIMM_M3783354CZ3_CE6 */
+
+#endif /* MV_STATIC_DRAM_ON_BOARD */
+#endif /* __INCmvDramIfStaticInith */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c
new file mode 100644
index 000000000..7a26f9059
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c
@@ -0,0 +1,1474 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "ddr2/spd/mvSpd.h"
+#include "boardEnv/mvBoardEnvLib.h"
+
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+static MV_VOID cpyDimm2BankInfo(MV_DIMM_INFO *pDimmInfo,
+ MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 cas2ps(MV_U8 spd_byte);
+/*******************************************************************************
+* mvDramBankGet - Get the DRAM bank paramters.
+*
+* DESCRIPTION:
+* This function retrieves DRAM bank parameters as described in
+* DRAM_BANK_INFO struct to the controller DRAM unit. In case the board
+* has its DRAM on DIMMs it will use its EEPROM to extract SPD data
+* from it. Otherwise, if the DRAM is soldered on board, the function
+* should insert its bank information into MV_DRAM_BANK_INFO struct.
+*
+* INPUT:
+* bankNum - Board DRAM bank number.
+*
+* OUTPUT:
+* pBankInfo - DRAM bank information struct.
+*
+* RETURN:
+* MV_FAIL - Bank parameters could not be read.
+*
+*******************************************************************************/
+MV_STATUS mvDramBankInfoGet(MV_U32 bankNum, MV_DRAM_BANK_INFO *pBankInfo)
+{
+ MV_DIMM_INFO dimmInfo;
+
+ DB(mvOsPrintf("Dram: mvDramBankInfoGet bank %d\n", bankNum));
+ /* zero pBankInfo structure */
+
+ if((NULL == pBankInfo) || (bankNum >= MV_DRAM_MAX_CS ))
+ {
+ DB(mvOsPrintf("Dram: mvDramBankInfoGet bad params \n"));
+ return MV_BAD_PARAM;
+ }
+ memset(pBankInfo, 0, sizeof(*pBankInfo));
+
+ if ( MV_OK != dimmSpdGet((MV_U32)(bankNum/2), &dimmInfo))
+ {
+ DB(mvOsPrintf("Dram: ERR dimmSpdGet failed to get dimm info \n"));
+ return MV_FAIL;
+ }
+ if ((dimmInfo.numOfModuleBanks == 1) && ((bankNum % 2) == 1))
+ {
+ DB(mvOsPrintf("Dram: ERR dimmSpdGet. Can't find DIMM bank 2 \n"));
+ return MV_FAIL;
+ }
+ /* convert Dimm info to Bank info */
+ cpyDimm2BankInfo(&dimmInfo, pBankInfo);
+ return MV_OK;
+}
+
+/*******************************************************************************
+* cpyDimm2BankInfo - Convert a Dimm info struct into a bank info struct.
+*
+* DESCRIPTION:
+* Convert a Dimm info struct into a bank info struct.
+*
+* INPUT:
+* pDimmInfo - DIMM information structure.
+*
+* OUTPUT:
+* pBankInfo - DRAM bank information struct.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+static MV_VOID cpyDimm2BankInfo(MV_DIMM_INFO *pDimmInfo,
+ MV_DRAM_BANK_INFO *pBankInfo)
+{
+ pBankInfo->memoryType = pDimmInfo->memoryType;
+
+ /* DIMM dimensions */
+ pBankInfo->numOfRowAddr = pDimmInfo->numOfRowAddr;
+ pBankInfo->numOfColAddr = pDimmInfo->numOfColAddr;
+ pBankInfo->dataWidth = pDimmInfo->dataWidth;
+ pBankInfo->errorCheckType = pDimmInfo->errorCheckType;
+ pBankInfo->sdramWidth = pDimmInfo->sdramWidth;
+ pBankInfo->errorCheckDataWidth = pDimmInfo->errorCheckDataWidth;
+ pBankInfo->numOfBanksOnEachDevice = pDimmInfo->numOfBanksOnEachDevice;
+ pBankInfo->suportedCasLatencies = pDimmInfo->suportedCasLatencies;
+ pBankInfo->refreshInterval = pDimmInfo->refreshInterval;
+
+ /* DIMM timing parameters */
+ pBankInfo->minCycleTimeAtMaxCasLatPs = pDimmInfo->minCycleTimeAtMaxCasLatPs;
+ pBankInfo->minCycleTimeAtMaxCasLatMinus1Ps =
+ pDimmInfo->minCycleTimeAtMaxCasLatMinus1Ps;
+ pBankInfo->minCycleTimeAtMaxCasLatMinus2Ps =
+ pDimmInfo->minCycleTimeAtMaxCasLatMinus2Ps;
+
+ pBankInfo->minRowPrechargeTime = pDimmInfo->minRowPrechargeTime;
+ pBankInfo->minRowActiveToRowActive = pDimmInfo->minRowActiveToRowActive;
+ pBankInfo->minRasToCasDelay = pDimmInfo->minRasToCasDelay;
+ pBankInfo->minRasPulseWidth = pDimmInfo->minRasPulseWidth;
+ pBankInfo->minWriteRecoveryTime = pDimmInfo->minWriteRecoveryTime;
+ pBankInfo->minWriteToReadCmdDelay = pDimmInfo->minWriteToReadCmdDelay;
+ pBankInfo->minReadToPrechCmdDelay = pDimmInfo->minReadToPrechCmdDelay;
+ pBankInfo->minRefreshToActiveCmd = pDimmInfo->minRefreshToActiveCmd;
+
+ /* Parameters calculated from the extracted DIMM information */
+ pBankInfo->size = pDimmInfo->size/pDimmInfo->numOfModuleBanks;
+ pBankInfo->deviceDensity = pDimmInfo->deviceDensity;
+ pBankInfo->numberOfDevices = pDimmInfo->numberOfDevices /
+ pDimmInfo->numOfModuleBanks;
+
+ /* DIMM attributes (MV_TRUE for yes) */
+
+ if ((pDimmInfo->memoryType == MEM_TYPE_SDRAM) ||
+ (pDimmInfo->memoryType == MEM_TYPE_DDR1) )
+ {
+ if (pDimmInfo->dimmAttributes & BIT1)
+ pBankInfo->registeredAddrAndControlInputs = MV_TRUE;
+ else
+ pBankInfo->registeredAddrAndControlInputs = MV_FALSE;
+ }
+ else /* pDimmInfo->memoryType == MEM_TYPE_DDR2 */
+ {
+ if (pDimmInfo->dimmTypeInfo & (BIT0 | BIT4))
+ pBankInfo->registeredAddrAndControlInputs = MV_TRUE;
+ else
+ pBankInfo->registeredAddrAndControlInputs = MV_FALSE;
+ }
+
+ return;
+}
+/*******************************************************************************
+* dimmSpdCpy - Cpy SPD parameters from dimm 0 to dimm 1.
+*
+* DESCRIPTION:
+* Read the DIMM SPD parameters from dimm 0 into dimm 1 SPD.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE if function could read DIMM parameters, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS dimmSpdCpy(MV_VOID)
+{
+ MV_U32 i;
+ MV_U32 spdChecksum;
+
+ MV_TWSI_SLAVE twsiSlave;
+ MV_U8 data[SPD_SIZE];
+
+ /* zero dimmInfo structure */
+ memset(data, 0, SPD_SIZE);
+
+ /* read the dimm eeprom */
+ DB(mvOsPrintf("DRAM: Read Dimm eeprom\n"));
+ twsiSlave.slaveAddr.address = MV_BOARD_DIMM0_I2C_ADDR;
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ twsiSlave.offset = 0;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+ if( MV_OK != mvTwsiRead (MV_BOARD_DIMM_I2C_CHANNEL, &twsiSlave, data, SPD_SIZE) )
+ {
+ DB(mvOsPrintf("DRAM: ERR. no DIMM in dimmNum 0\n"));
+ return MV_FAIL;
+ }
+ DB(puts("DRAM: Reading dimm info succeded.\n"));
+
+ /* calculate SPD checksum */
+ spdChecksum = 0;
+
+ for(i = 0 ; i <= 62 ; i++)
+ {
+ spdChecksum += data[i];
+ }
+
+ if ((spdChecksum & 0xff) != data[63])
+ {
+ DB(mvOsPrintf("DRAM: Warning. Wrong SPD Checksum %2x, expValue=%2x\n",
+ (MV_U32)(spdChecksum & 0xff), data[63]));
+ }
+ else
+ {
+ DB(mvOsPrintf("DRAM: SPD Checksum ok!\n"));
+ }
+
+ /* copy the SPD content 1:1 into the DIMM 1 SPD */
+ twsiSlave.slaveAddr.address = MV_BOARD_DIMM1_I2C_ADDR;
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ twsiSlave.offset = 0;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+ for(i = 0 ; i < SPD_SIZE ; i++)
+ {
+ twsiSlave.offset = i;
+ if( MV_OK != mvTwsiWrite (MV_BOARD_DIMM_I2C_CHANNEL, &twsiSlave, &data[i], 1) )
+ {
+ mvOsPrintf("DRAM: ERR. no DIMM in dimmNum 1 byte %d \n",i);
+ return MV_FAIL;
+ }
+ mvOsDelay(5);
+ }
+
+ DB(puts("DRAM: Reading dimm info succeded.\n"));
+ return MV_OK;
+}
+
+/*******************************************************************************
+* dimmSpdGet - Get the SPD parameters.
+*
+* DESCRIPTION:
+* Read the DIMM SPD parameters into given struct parameter.
+*
+* INPUT:
+* dimmNum - DIMM number. See MV_BOARD_DIMM_NUM enumerator.
+*
+* OUTPUT:
+* pDimmInfo - DIMM information structure.
+*
+* RETURN:
+* MV_TRUE if function could read DIMM parameters, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS dimmSpdGet(MV_U32 dimmNum, MV_DIMM_INFO *pDimmInfo)
+{
+ MV_U32 i;
+ MV_U32 density = 1;
+ MV_U32 spdChecksum;
+
+ MV_TWSI_SLAVE twsiSlave;
+ MV_U8 data[SPD_SIZE];
+
+ if((NULL == pDimmInfo)|| (dimmNum >= MAX_DIMM_NUM))
+ {
+ DB(mvOsPrintf("Dram: mvDramBankInfoGet bad params \n"));
+ return MV_BAD_PARAM;
+ }
+
+ /* zero dimmInfo structure */
+ memset(data, 0, SPD_SIZE);
+
+ /* read the dimm eeprom */
+ DB(mvOsPrintf("DRAM: Read Dimm eeprom\n"));
+ twsiSlave.slaveAddr.address = (dimmNum == 0) ?
+ MV_BOARD_DIMM0_I2C_ADDR : MV_BOARD_DIMM1_I2C_ADDR;
+ twsiSlave.slaveAddr.type = ADDR7_BIT;
+ twsiSlave.validOffset = MV_TRUE;
+ twsiSlave.offset = 0;
+ twsiSlave.moreThen256 = MV_FALSE;
+
+ if( MV_OK != mvTwsiRead (MV_BOARD_DIMM_I2C_CHANNEL, &twsiSlave, data, SPD_SIZE) )
+ {
+ DB(mvOsPrintf("DRAM: ERR. no DIMM in dimmNum %d \n", dimmNum));
+ return MV_FAIL;
+ }
+ DB(puts("DRAM: Reading dimm info succeded.\n"));
+
+ /* calculate SPD checksum */
+ spdChecksum = 0;
+
+ for(i = 0 ; i <= 62 ; i++)
+ {
+ spdChecksum += data[i];
+ }
+
+ if ((spdChecksum & 0xff) != data[63])
+ {
+ DB(mvOsPrintf("DRAM: Warning. Wrong SPD Checksum %2x, expValue=%2x\n",
+ (MV_U32)(spdChecksum & 0xff), data[63]));
+ }
+ else
+ {
+ DB(mvOsPrintf("DRAM: SPD Checksum ok!\n"));
+ }
+
+ /* copy the SPD content 1:1 into the dimmInfo structure*/
+ for(i = 0 ; i < SPD_SIZE ; i++)
+ {
+ pDimmInfo->spdRawData[i] = data[i];
+ DB(mvOsPrintf("SPD-EEPROM Byte %3d = %3x (%3d)\n",i, data[i], data[i]));
+ }
+
+ DB(mvOsPrintf("DRAM SPD Information:\n"));
+
+ /* Memory type (DDR / SDRAM) */
+ switch (data[DIMM_MEM_TYPE])
+ {
+ case (DIMM_MEM_TYPE_SDRAM):
+ pDimmInfo->memoryType = MEM_TYPE_SDRAM;
+ DB(mvOsPrintf("DRAM Memeory type SDRAM\n"));
+ break;
+ case (DIMM_MEM_TYPE_DDR1):
+ pDimmInfo->memoryType = MEM_TYPE_DDR1;
+ DB(mvOsPrintf("DRAM Memeory type DDR1\n"));
+ break;
+ case (DIMM_MEM_TYPE_DDR2):
+ pDimmInfo->memoryType = MEM_TYPE_DDR2;
+ DB(mvOsPrintf("DRAM Memeory type DDR2\n"));
+ break;
+ default:
+ mvOsPrintf("ERROR: Undefined memory type!\n");
+ return MV_ERROR;
+ }
+
+
+ /* Number Of Row Addresses */
+ pDimmInfo->numOfRowAddr = data[DIMM_ROW_NUM];
+ DB(mvOsPrintf("DRAM numOfRowAddr[3] %d\n",pDimmInfo->numOfRowAddr));
+
+ /* Number Of Column Addresses */
+ pDimmInfo->numOfColAddr = data[DIMM_COL_NUM];
+ DB(mvOsPrintf("DRAM numOfColAddr[4] %d\n",pDimmInfo->numOfColAddr));
+
+ /* Number Of Module Banks */
+ pDimmInfo->numOfModuleBanks = data[DIMM_MODULE_BANK_NUM];
+ DB(mvOsPrintf("DRAM numOfModuleBanks[5] 0x%x\n",
+ pDimmInfo->numOfModuleBanks));
+
+ /* Number of module banks encoded differently for DDR2 */
+ if (pDimmInfo->memoryType == MEM_TYPE_DDR2)
+ pDimmInfo->numOfModuleBanks = (pDimmInfo->numOfModuleBanks & 0x7)+1;
+
+ /* Data Width */
+ pDimmInfo->dataWidth = data[DIMM_DATA_WIDTH];
+ DB(mvOsPrintf("DRAM dataWidth[6] 0x%x\n", pDimmInfo->dataWidth));
+
+ /* Minimum Cycle Time At Max CasLatancy */
+ pDimmInfo->minCycleTimeAtMaxCasLatPs = cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS]);
+
+ /* Error Check Type */
+ pDimmInfo->errorCheckType = data[DIMM_ERR_CHECK_TYPE];
+ DB(mvOsPrintf("DRAM errorCheckType[11] 0x%x\n",
+ pDimmInfo->errorCheckType));
+
+ /* Refresh Interval */
+ pDimmInfo->refreshInterval = data[DIMM_REFRESH_INTERVAL];
+ DB(mvOsPrintf("DRAM refreshInterval[12] 0x%x\n",
+ pDimmInfo->refreshInterval));
+
+ /* Sdram Width */
+ pDimmInfo->sdramWidth = data[DIMM_SDRAM_WIDTH];
+ DB(mvOsPrintf("DRAM sdramWidth[13] 0x%x\n",pDimmInfo->sdramWidth));
+
+ /* Error Check Data Width */
+ pDimmInfo->errorCheckDataWidth = data[DIMM_ERR_CHECK_DATA_WIDTH];
+ DB(mvOsPrintf("DRAM errorCheckDataWidth[14] 0x%x\n",
+ pDimmInfo->errorCheckDataWidth));
+
+ /* Burst Length Supported */
+ /* SDRAM/DDR1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ burst length = * Page | TBD | TBD | TBD | 8 | 4 | 2 | 1 *
+ *********************************************************/
+ /* DDR2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ burst length = * Page | TBD | TBD | TBD | 8 | 4 | TBD | TBD *
+ *********************************************************/
+
+ pDimmInfo->burstLengthSupported = data[DIMM_BURST_LEN_SUP];
+ DB(mvOsPrintf("DRAM burstLengthSupported[16] 0x%x\n",
+ pDimmInfo->burstLengthSupported));
+
+ /* Number Of Banks On Each Device */
+ pDimmInfo->numOfBanksOnEachDevice = data[DIMM_DEV_BANK_NUM];
+ DB(mvOsPrintf("DRAM numOfBanksOnEachDevice[17] 0x%x\n",
+ pDimmInfo->numOfBanksOnEachDevice));
+
+ /* Suported Cas Latencies */
+
+ /* SDRAM:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 7 | 6 | 5 | 4 | 3 | 2 | 1 *
+ ********************************************************/
+
+ /* DDR 1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 4 | 3.5 | 3 | 2.5 | 2 | 1.5 | 1 *
+ *********************************************************/
+
+ /* DDR 2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | TBD | 5 | 4 | 3 | 2 | TBD | TBD *
+ *********************************************************/
+
+ pDimmInfo->suportedCasLatencies = data[DIMM_SUP_CAL];
+ DB(mvOsPrintf("DRAM suportedCasLatencies[18] 0x%x\n",
+ pDimmInfo->suportedCasLatencies));
+
+ /* For DDR2 only, get the DIMM type information */
+ if (pDimmInfo->memoryType == MEM_TYPE_DDR2)
+ {
+ pDimmInfo->dimmTypeInfo = data[DIMM_DDR2_TYPE_INFORMATION];
+ DB(mvOsPrintf("DRAM dimmTypeInfo[20] (DDR2) 0x%x\n",
+ pDimmInfo->dimmTypeInfo));
+ }
+
+ /* SDRAM Modules Attributes */
+ pDimmInfo->dimmAttributes = data[DIMM_BUF_ADDR_CONT_IN];
+ DB(mvOsPrintf("DRAM dimmAttributes[21] 0x%x\n",
+ pDimmInfo->dimmAttributes));
+
+ /* Minimum Cycle Time At Max CasLatancy Minus 1*/
+ pDimmInfo->minCycleTimeAtMaxCasLatMinus1Ps =
+ cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS_MINUS1]);
+
+ /* Minimum Cycle Time At Max CasLatancy Minus 2*/
+ pDimmInfo->minCycleTimeAtMaxCasLatMinus2Ps =
+ cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS_MINUS2]);
+
+ pDimmInfo->minRowPrechargeTime = data[DIMM_MIN_ROW_PRECHARGE_TIME];
+ DB(mvOsPrintf("DRAM minRowPrechargeTime[27] 0x%x\n",
+ pDimmInfo->minRowPrechargeTime));
+ pDimmInfo->minRowActiveToRowActive = data[DIMM_MIN_ROW_ACTIVE_TO_ROW_ACTIVE];
+ DB(mvOsPrintf("DRAM minRowActiveToRowActive[28] 0x%x\n",
+ pDimmInfo->minRowActiveToRowActive));
+ pDimmInfo->minRasToCasDelay = data[DIMM_MIN_RAS_TO_CAS_DELAY];
+ DB(mvOsPrintf("DRAM minRasToCasDelay[29] 0x%x\n",
+ pDimmInfo->minRasToCasDelay));
+ pDimmInfo->minRasPulseWidth = data[DIMM_MIN_RAS_PULSE_WIDTH];
+ DB(mvOsPrintf("DRAM minRasPulseWidth[30] 0x%x\n",
+ pDimmInfo->minRasPulseWidth));
+
+ /* DIMM Bank Density */
+ pDimmInfo->dimmBankDensity = data[DIMM_BANK_DENSITY];
+ DB(mvOsPrintf("DRAM dimmBankDensity[31] 0x%x\n",
+ pDimmInfo->dimmBankDensity));
+
+ /* Only DDR2 includes Write Recovery Time field. Other SDRAM ignore */
+ pDimmInfo->minWriteRecoveryTime = data[DIMM_MIN_WRITE_RECOVERY_TIME];
+ DB(mvOsPrintf("DRAM minWriteRecoveryTime[36] 0x%x\n",
+ pDimmInfo->minWriteRecoveryTime));
+
+ /* Only DDR2 includes Internal Write To Read Command Delay field. */
+ pDimmInfo->minWriteToReadCmdDelay = data[DIMM_MIN_WRITE_TO_READ_CMD_DELAY];
+ DB(mvOsPrintf("DRAM minWriteToReadCmdDelay[37] 0x%x\n",
+ pDimmInfo->minWriteToReadCmdDelay));
+
+ /* Only DDR2 includes Internal Read To Precharge Command Delay field. */
+ pDimmInfo->minReadToPrechCmdDelay = data[DIMM_MIN_READ_TO_PRECH_CMD_DELAY];
+ DB(mvOsPrintf("DRAM minReadToPrechCmdDelay[38] 0x%x\n",
+ pDimmInfo->minReadToPrechCmdDelay));
+
+ /* Only DDR2 includes Minimum Refresh to Activate/Refresh Command field */
+ pDimmInfo->minRefreshToActiveCmd = data[DIMM_MIN_REFRESH_TO_ACTIVATE_CMD];
+ DB(mvOsPrintf("DRAM minRefreshToActiveCmd[42] 0x%x\n",
+ pDimmInfo->minRefreshToActiveCmd));
+
+ /* calculating the sdram density. Representing device density from */
+ /* bit 20 to allow representation of 4GB and above. */
+ /* For example, if density is 512Mbit 0x20000000, will be represent in */
+ /* deviceDensity by 0x20000000 >> 16 --> 0x00000200. Another example */
+ /* is density 8GB 0x200000000 >> 16 --> 0x00002000. */
+ density = (1 << ((pDimmInfo->numOfRowAddr + pDimmInfo->numOfColAddr) - 20));
+ pDimmInfo->deviceDensity = density *
+ pDimmInfo->numOfBanksOnEachDevice *
+ pDimmInfo->sdramWidth;
+ DB(mvOsPrintf("DRAM deviceDensity %d\n",pDimmInfo->deviceDensity));
+
+ /* Number of devices includeing Error correction */
+ pDimmInfo->numberOfDevices = (pDimmInfo->dataWidth/pDimmInfo->sdramWidth) *
+ pDimmInfo->numOfModuleBanks;
+ DB(mvOsPrintf("DRAM numberOfDevices %d\n",
+ pDimmInfo->numberOfDevices));
+
+ pDimmInfo->size = 0;
+
+ /* Note that pDimmInfo->size is in MB units */
+ if (pDimmInfo->memoryType == MEM_TYPE_SDRAM)
+ {
+ if (pDimmInfo->dimmBankDensity & BIT0)
+ pDimmInfo->size += 1024; /* Equal to 1GB */
+ else if (pDimmInfo->dimmBankDensity & BIT1)
+ pDimmInfo->size += 8; /* Equal to 8MB */
+ else if (pDimmInfo->dimmBankDensity & BIT2)
+ pDimmInfo->size += 16; /* Equal to 16MB */
+ else if (pDimmInfo->dimmBankDensity & BIT3)
+ pDimmInfo->size += 32; /* Equal to 32MB */
+ else if (pDimmInfo->dimmBankDensity & BIT4)
+ pDimmInfo->size += 64; /* Equal to 64MB */
+ else if (pDimmInfo->dimmBankDensity & BIT5)
+ pDimmInfo->size += 128; /* Equal to 128MB */
+ else if (pDimmInfo->dimmBankDensity & BIT6)
+ pDimmInfo->size += 256; /* Equal to 256MB */
+ else if (pDimmInfo->dimmBankDensity & BIT7)
+ pDimmInfo->size += 512; /* Equal to 512MB */
+ }
+ else if (pDimmInfo->memoryType == MEM_TYPE_DDR1)
+ {
+ if (pDimmInfo->dimmBankDensity & BIT0)
+ pDimmInfo->size += 1024; /* Equal to 1GB */
+ else if (pDimmInfo->dimmBankDensity & BIT1)
+ pDimmInfo->size += 2048; /* Equal to 2GB */
+ else if (pDimmInfo->dimmBankDensity & BIT2)
+ pDimmInfo->size += 16; /* Equal to 16MB */
+ else if (pDimmInfo->dimmBankDensity & BIT3)
+ pDimmInfo->size += 32; /* Equal to 32MB */
+ else if (pDimmInfo->dimmBankDensity & BIT4)
+ pDimmInfo->size += 64; /* Equal to 64MB */
+ else if (pDimmInfo->dimmBankDensity & BIT5)
+ pDimmInfo->size += 128; /* Equal to 128MB */
+ else if (pDimmInfo->dimmBankDensity & BIT6)
+ pDimmInfo->size += 256; /* Equal to 256MB */
+ else if (pDimmInfo->dimmBankDensity & BIT7)
+ pDimmInfo->size += 512; /* Equal to 512MB */
+ }
+ else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+ {
+ if (pDimmInfo->dimmBankDensity & BIT0)
+ pDimmInfo->size += 1024; /* Equal to 1GB */
+ else if (pDimmInfo->dimmBankDensity & BIT1)
+ pDimmInfo->size += 2048; /* Equal to 2GB */
+ else if (pDimmInfo->dimmBankDensity & BIT2)
+ pDimmInfo->size += 4096; /* Equal to 4GB */
+ else if (pDimmInfo->dimmBankDensity & BIT3)
+ pDimmInfo->size += 8192; /* Equal to 8GB */
+ else if (pDimmInfo->dimmBankDensity & BIT4)
+ pDimmInfo->size += 16384; /* Equal to 16GB */
+ else if (pDimmInfo->dimmBankDensity & BIT5)
+ pDimmInfo->size += 128; /* Equal to 128MB */
+ else if (pDimmInfo->dimmBankDensity & BIT6)
+ pDimmInfo->size += 256; /* Equal to 256MB */
+ else if (pDimmInfo->dimmBankDensity & BIT7)
+ pDimmInfo->size += 512; /* Equal to 512MB */
+ }
+
+ pDimmInfo->size *= pDimmInfo->numOfModuleBanks;
+
+ DB(mvOsPrintf("Dram: dimm size %dMB \n",pDimmInfo->size));
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* dimmSpdPrint - Print the SPD parameters.
+*
+* DESCRIPTION:
+* Print the Dimm SPD parameters.
+*
+* INPUT:
+* pDimmInfo - DIMM information structure.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_VOID dimmSpdPrint(MV_U32 dimmNum)
+{
+ MV_DIMM_INFO dimmInfo;
+ MV_U32 i, temp = 0;
+ MV_U32 k, maskLeftOfPoint = 0, maskRightOfPoint = 0;
+ MV_U32 rightOfPoint = 0,leftOfPoint = 0, div, time_tmp, shift;
+ MV_U32 busClkPs;
+ MV_U8 trp_clocks=0, trcd_clocks, tras_clocks, trrd_clocks,
+ temp_buf[40], *spdRawData;
+
+ busClkPs = 1000000000 / (mvBoardSysClkGet() / 100); /* in 10 ps units */
+
+ spdRawData = dimmInfo.spdRawData;
+
+ if(MV_OK != dimmSpdGet(dimmNum, &dimmInfo))
+ {
+ mvOsOutput("ERROR: Could not read SPD information!\n");
+ return;
+ }
+
+ /* find Manufactura of Dimm Module */
+ mvOsOutput("\nManufacturer's JEDEC ID Code: ");
+ for(i = 0 ; i < DIMM_MODULE_MANU_SIZE ; i++)
+ {
+ mvOsOutput("%x",spdRawData[DIMM_MODULE_MANU_OFFS + i]);
+ }
+ mvOsOutput("\n");
+
+ /* Manufacturer's Specific Data */
+ for(i = 0 ; i < DIMM_MODULE_ID_SIZE ; i++)
+ {
+ temp_buf[i] = spdRawData[DIMM_MODULE_ID_OFFS + i];
+ }
+ mvOsOutput("Manufacturer's Specific Data: %s\n", temp_buf);
+
+ /* Module Part Number */
+ for(i = 0 ; i < DIMM_MODULE_VEN_SIZE ; i++)
+ {
+ temp_buf[i] = spdRawData[DIMM_MODULE_VEN_OFFS + i];
+ }
+ mvOsOutput("Module Part Number: %s\n", temp_buf);
+
+ /* Module Serial Number */
+ for(i = 0; i < sizeof(MV_U32); i++)
+ {
+ temp |= spdRawData[95+i] << 8*i;
+ }
+ mvOsOutput("DIMM Serial No. %ld (%lx)\n", (long)temp,
+ (long)temp);
+
+ /* find Manufac-Data of Dimm Module */
+ mvOsOutput("Manufactoring Date: Year 20%d%d/ ww %d%d\n",
+ ((spdRawData[93] & 0xf0) >> 4), (spdRawData[93] & 0xf),
+ ((spdRawData[94] & 0xf0) >> 4), (spdRawData[94] & 0xf));
+ /* find modul_revision of Dimm Module */
+ mvOsOutput("Module Revision: %d.%d\n",
+ spdRawData[62]/10, spdRawData[62]%10);
+
+ /* find manufac_place of Dimm Module */
+ mvOsOutput("manufac_place: %d\n", spdRawData[72]);
+
+ /* go over the first 35 I2C data bytes */
+ for(i = 2 ; i <= 35 ; i++)
+ switch(i)
+ {
+ case 2: /* Memory type (DDR1/2 / SDRAM) */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ mvOsOutput("Dram Type is: SDRAM\n");
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ mvOsOutput("Dram Type is: SDRAM DDR1\n");
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+ mvOsOutput("Dram Type is: SDRAM DDR2\n");
+ else
+ mvOsOutput("Dram Type unknown\n");
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 3: /* Number Of Row Addresses */
+ mvOsOutput("Module Number of row addresses: %d\n",
+ dimmInfo.numOfRowAddr);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 4: /* Number Of Column Addresses */
+ mvOsOutput("Module Number of col addresses: %d\n",
+ dimmInfo.numOfColAddr);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 5: /* Number Of Module Banks */
+ mvOsOutput("Number of Banks on Mod.: %d\n",
+ dimmInfo.numOfModuleBanks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 6: /* Data Width */
+ mvOsOutput("Module Data Width: %d bit\n",
+ dimmInfo.dataWidth);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 8: /* Voltage Interface */
+ switch(spdRawData[i])
+ {
+ case 0x0:
+ mvOsOutput("Module is TTL_5V_TOLERANT\n");
+ break;
+ case 0x1:
+ mvOsOutput("Module is LVTTL\n");
+ break;
+ case 0x2:
+ mvOsOutput("Module is HSTL_1_5V\n");
+ break;
+ case 0x3:
+ mvOsOutput("Module is SSTL_3_3V\n");
+ break;
+ case 0x4:
+ mvOsOutput("Module is SSTL_2_5V\n");
+ break;
+ case 0x5:
+ if (dimmInfo.memoryType != MEM_TYPE_SDRAM)
+ {
+ mvOsOutput("Module is SSTL_1_8V\n");
+ break;
+ }
+ default:
+ mvOsOutput("Module is VOLTAGE_UNKNOWN\n");
+ break;
+ }
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 9: /* Minimum Cycle Time At Max CasLatancy */
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+ /* DDR2 addition of right of point */
+ if ((spdRawData[i] & 0x0f) == 0xA)
+ {
+ rightOfPoint = 25;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xB)
+ {
+ rightOfPoint = 33;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xC)
+ {
+ rightOfPoint = 66;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xD)
+ {
+ rightOfPoint = 75;
+ }
+ mvOsOutput("Minimum Cycle Time At Max CL: %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 10: /* Clock To Data Out */
+ div = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 10:100;
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / div;
+ rightOfPoint = time_tmp % div;
+ mvOsOutput("Clock To Data Out: %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 11: /* Error Check Type */
+ mvOsOutput("Error Check Type (0=NONE): %d\n",
+ dimmInfo.errorCheckType);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 12: /* Refresh Interval */
+ mvOsOutput("Refresh Rate: %x\n",
+ dimmInfo.refreshInterval);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 13: /* Sdram Width */
+ mvOsOutput("Sdram Width: %d bits\n",
+ dimmInfo.sdramWidth);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 14: /* Error Check Data Width */
+ mvOsOutput("Error Check Data Width: %d bits\n",
+ dimmInfo.errorCheckDataWidth);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 15: /* Minimum Clock Delay is unsupported */
+ if ((dimmInfo.memoryType == MEM_TYPE_SDRAM) ||
+ (dimmInfo.memoryType == MEM_TYPE_DDR1))
+ {
+ mvOsOutput("Minimum Clk Delay back to back: %d\n",
+ spdRawData[i]);
+ }
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 16: /* Burst Length Supported */
+ /* SDRAM/DDR1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ burst length = * Page | TBD | TBD | TBD | 8 | 4 | 2 | 1 *
+ *********************************************************/
+ /* DDR2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ burst length = * Page | TBD | TBD | TBD | 8 | 4 | TBD | TBD *
+ *********************************************************/
+ mvOsOutput("Burst Length Supported: ");
+ if ((dimmInfo.memoryType == MEM_TYPE_SDRAM) ||
+ (dimmInfo.memoryType == MEM_TYPE_DDR1))
+ {
+ if (dimmInfo.burstLengthSupported & BIT0)
+ mvOsOutput("1, ");
+ if (dimmInfo.burstLengthSupported & BIT1)
+ mvOsOutput("2, ");
+ }
+ if (dimmInfo.burstLengthSupported & BIT2)
+ mvOsOutput("4, ");
+ if (dimmInfo.burstLengthSupported & BIT3)
+ mvOsOutput("8, ");
+
+ mvOsOutput(" Bit \n");
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 17: /* Number Of Banks On Each Device */
+ mvOsOutput("Number Of Banks On Each Chip: %d\n",
+ dimmInfo.numOfBanksOnEachDevice);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 18: /* Suported Cas Latencies */
+
+ /* SDRAM:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 7 | 6 | 5 | 4 | 3 | 2 | 1 *
+ ********************************************************/
+
+ /* DDR 1:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | 4 | 3.5 | 3 | 2.5 | 2 | 1.5 | 1 *
+ *********************************************************/
+
+ /* DDR 2:
+ *******-******-******-******-******-******-******-*******
+ * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+ *******-******-******-******-******-******-******-*******
+ CAS = * TBD | TBD | 5 | 4 | 3 | 2 | TBD | TBD *
+ *********************************************************/
+
+ mvOsOutput("Suported Cas Latencies: (CL) ");
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ for (k = 0; k <=7; k++)
+ {
+ if (dimmInfo.suportedCasLatencies & (1 << k))
+ mvOsOutput("%d, ", k+1);
+ }
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ {
+ if (dimmInfo.suportedCasLatencies & BIT0)
+ mvOsOutput("1, ");
+ if (dimmInfo.suportedCasLatencies & BIT1)
+ mvOsOutput("1.5, ");
+ if (dimmInfo.suportedCasLatencies & BIT2)
+ mvOsOutput("2, ");
+ if (dimmInfo.suportedCasLatencies & BIT3)
+ mvOsOutput("2.5, ");
+ if (dimmInfo.suportedCasLatencies & BIT4)
+ mvOsOutput("3, ");
+ if (dimmInfo.suportedCasLatencies & BIT5)
+ mvOsOutput("3.5, ");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+ {
+ if (dimmInfo.suportedCasLatencies & BIT2)
+ mvOsOutput("2, ");
+ if (dimmInfo.suportedCasLatencies & BIT3)
+ mvOsOutput("3, ");
+ if (dimmInfo.suportedCasLatencies & BIT4)
+ mvOsOutput("4, ");
+ if (dimmInfo.suportedCasLatencies & BIT5)
+ mvOsOutput("5, ");
+ }
+ else
+ mvOsOutput("?.?, ");
+ mvOsOutput("\n");
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 20: /* DDR2 DIMM type info */
+ if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+ {
+ if (dimmInfo.dimmTypeInfo & (BIT0 | BIT4))
+ mvOsOutput("Registered DIMM (RDIMM)\n");
+ else if (dimmInfo.dimmTypeInfo & (BIT1 | BIT5))
+ mvOsOutput("Unbuffered DIMM (UDIMM)\n");
+ else
+ mvOsOutput("Unknown DIMM type.\n");
+ }
+
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 21: /* SDRAM Modules Attributes */
+ mvOsOutput("\nModule Attributes (SPD Byte 21): \n");
+
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ if (dimmInfo.dimmAttributes & BIT0)
+ mvOsOutput(" Buffered Addr/Control Input: Yes\n");
+ else
+ mvOsOutput(" Buffered Addr/Control Input: No\n");
+
+ if (dimmInfo.dimmAttributes & BIT1)
+ mvOsOutput(" Registered Addr/Control Input: Yes\n");
+ else
+ mvOsOutput(" Registered Addr/Control Input: No\n");
+
+ if (dimmInfo.dimmAttributes & BIT2)
+ mvOsOutput(" On-Card PLL (clock): Yes \n");
+ else
+ mvOsOutput(" On-Card PLL (clock): No \n");
+
+ if (dimmInfo.dimmAttributes & BIT3)
+ mvOsOutput(" Bufferd DQMB Input: Yes \n");
+ else
+ mvOsOutput(" Bufferd DQMB Inputs: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT4)
+ mvOsOutput(" Registered DQMB Inputs: Yes \n");
+ else
+ mvOsOutput(" Registered DQMB Inputs: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT5)
+ mvOsOutput(" Differential Clock Input: Yes \n");
+ else
+ mvOsOutput(" Differential Clock Input: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT6)
+ mvOsOutput(" redundant Row Addressing: Yes \n");
+ else
+ mvOsOutput(" redundant Row Addressing: No \n");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ {
+ if (dimmInfo.dimmAttributes & BIT0)
+ mvOsOutput(" Buffered Addr/Control Input: Yes\n");
+ else
+ mvOsOutput(" Buffered Addr/Control Input: No\n");
+
+ if (dimmInfo.dimmAttributes & BIT1)
+ mvOsOutput(" Registered Addr/Control Input: Yes\n");
+ else
+ mvOsOutput(" Registered Addr/Control Input: No\n");
+
+ if (dimmInfo.dimmAttributes & BIT2)
+ mvOsOutput(" On-Card PLL (clock): Yes \n");
+ else
+ mvOsOutput(" On-Card PLL (clock): No \n");
+
+ if (dimmInfo.dimmAttributes & BIT3)
+ mvOsOutput(" FET Switch On-Card Enabled: Yes \n");
+ else
+ mvOsOutput(" FET Switch On-Card Enabled: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT4)
+ mvOsOutput(" FET Switch External Enabled: Yes \n");
+ else
+ mvOsOutput(" FET Switch External Enabled: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT5)
+ mvOsOutput(" Differential Clock Input: Yes \n");
+ else
+ mvOsOutput(" Differential Clock Input: No \n");
+ }
+ else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+ {
+ mvOsOutput(" Number of Active Registers on the DIMM: %d\n",
+ (dimmInfo.dimmAttributes & 0x3) + 1);
+
+ mvOsOutput(" Number of PLLs on the DIMM: %d\n",
+ ((dimmInfo.dimmAttributes) >> 2) & 0x3);
+
+ if (dimmInfo.dimmAttributes & BIT4)
+ mvOsOutput(" FET Switch External Enabled: Yes \n");
+ else
+ mvOsOutput(" FET Switch External Enabled: No \n");
+
+ if (dimmInfo.dimmAttributes & BIT6)
+ mvOsOutput(" Analysis probe installed: Yes \n");
+ else
+ mvOsOutput(" Analysis probe installed: No \n");
+ }
+
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 22: /* Suported AutoPreCharge */
+ mvOsOutput("\nModul Attributes (SPD Byte 22): \n");
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ if ( spdRawData[i] & BIT0 )
+ mvOsOutput(" Early Ras Precharge: Yes \n");
+ else
+ mvOsOutput(" Early Ras Precharge: No \n");
+
+ if ( spdRawData[i] & BIT1 )
+ mvOsOutput(" AutoPreCharge: Yes \n");
+ else
+ mvOsOutput(" AutoPreCharge: No \n");
+
+ if ( spdRawData[i] & BIT2 )
+ mvOsOutput(" Precharge All: Yes \n");
+ else
+ mvOsOutput(" Precharge All: No \n");
+
+ if ( spdRawData[i] & BIT3 )
+ mvOsOutput(" Write 1/ReadBurst: Yes \n");
+ else
+ mvOsOutput(" Write 1/ReadBurst: No \n");
+
+ if ( spdRawData[i] & BIT4 )
+ mvOsOutput(" lower VCC tolerance: 5%%\n");
+ else
+ mvOsOutput(" lower VCC tolerance: 10%%\n");
+
+ if ( spdRawData[i] & BIT5 )
+ mvOsOutput(" upper VCC tolerance: 5%%\n");
+ else
+ mvOsOutput(" upper VCC tolerance: 10%%\n");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ {
+ if ( spdRawData[i] & BIT0 )
+ mvOsOutput(" Supports Weak Driver: Yes \n");
+ else
+ mvOsOutput(" Supports Weak Driver: No \n");
+
+ if ( !(spdRawData[i] & BIT4) )
+ mvOsOutput(" lower VCC tolerance: 0.2V\n");
+
+ if ( !(spdRawData[i] & BIT5) )
+ mvOsOutput(" upper VCC tolerance: 0.2V\n");
+
+ if ( spdRawData[i] & BIT6 )
+ mvOsOutput(" Concurrent Auto Preharge: Yes \n");
+ else
+ mvOsOutput(" Concurrent Auto Preharge: No \n");
+
+ if ( spdRawData[i] & BIT7 )
+ mvOsOutput(" Supports Fast AP: Yes \n");
+ else
+ mvOsOutput(" Supports Fast AP: No \n");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+ {
+ if ( spdRawData[i] & BIT0 )
+ mvOsOutput(" Supports Weak Driver: Yes \n");
+ else
+ mvOsOutput(" Supports Weak Driver: No \n");
+ }
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 23:
+ /* Minimum Cycle Time At Maximum Cas Latancy Minus 1 (2nd highest CL) */
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+ /* DDR2 addition of right of point */
+ if ((spdRawData[i] & 0x0f) == 0xA)
+ {
+ rightOfPoint = 25;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xB)
+ {
+ rightOfPoint = 33;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xC)
+ {
+ rightOfPoint = 66;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xD)
+ {
+ rightOfPoint = 75;
+ }
+
+ mvOsOutput("Minimum Cycle Time At 2nd highest CasLatancy"
+ "(0 = Not supported): %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint );
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 24: /* Clock To Data Out 2nd highest Cas Latency Value*/
+ div = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ? 10:100;
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / div;
+ rightOfPoint = time_tmp % div;
+ mvOsOutput("Clock To Data Out (2nd CL value): %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 25:
+ /* Minimum Cycle Time At Maximum Cas Latancy Minus 2 (3rd highest CL) */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ leftOfPoint = (spdRawData[i] & 0xfc) >> 2;
+ rightOfPoint = (spdRawData[i] & 0x3) * 25;
+ }
+ else /* DDR1 or DDR2 */
+ {
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+ /* DDR2 addition of right of point */
+ if ((spdRawData[i] & 0x0f) == 0xA)
+ {
+ rightOfPoint = 25;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xB)
+ {
+ rightOfPoint = 33;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xC)
+ {
+ rightOfPoint = 66;
+ }
+ if ((spdRawData[i] & 0x0f) == 0xD)
+ {
+ rightOfPoint = 75;
+ }
+ }
+ mvOsOutput("Minimum Cycle Time At 3rd highest CasLatancy"
+ "(0 = Not supported): %d.%d [ns]\n",
+ leftOfPoint, rightOfPoint );
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 26: /* Clock To Data Out 3rd highest Cas Latency Value*/
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ leftOfPoint = (spdRawData[i] & 0xfc) >> 2;
+ rightOfPoint = (spdRawData[i] & 0x3) * 25;
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = 0;
+ rightOfPoint = time_tmp;
+ }
+ mvOsOutput("Clock To Data Out (3rd CL value): %d.%2d[ns]\n",
+ leftOfPoint, rightOfPoint );
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 27: /* Minimum Row Precharge Time */
+ shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+ maskLeftOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0xff : 0xfc;
+ maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0x00 : 0x03;
+ leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+ rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+ temp = ((leftOfPoint*100) + rightOfPoint);/* in 10ps Intervals*/
+ trp_clocks = (temp + (busClkPs-1)) / busClkPs;
+ mvOsOutput("Minimum Row Precharge Time [ns]: %d.%d = "
+ "in Clk cycles %d\n",
+ leftOfPoint, rightOfPoint, trp_clocks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 28: /* Minimum Row Active to Row Active Time */
+ shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+ maskLeftOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0xff : 0xfc;
+ maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0x00 : 0x03;
+ leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+ rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+ temp = ((leftOfPoint*100) + rightOfPoint);/* in 100ns Interval*/
+ trrd_clocks = (temp + (busClkPs-1)) / busClkPs;
+ mvOsOutput("Minimum Row Active -To- Row Active Delay [ns]: "
+ "%d.%d = in Clk cycles %d\n",
+ leftOfPoint, rightOfPoint, trp_clocks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 29: /* Minimum Ras-To-Cas Delay */
+ shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+ maskLeftOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0xff : 0xfc;
+ maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+ 0x00 : 0x03;
+ leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+ rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+ temp = ((leftOfPoint*100) + rightOfPoint);/* in 100ns Interval*/
+ trcd_clocks = (temp + (busClkPs-1) )/ busClkPs;
+ mvOsOutput("Minimum Ras-To-Cas Delay [ns]: %d.%d = "
+ "in Clk cycles %d\n",
+ leftOfPoint, rightOfPoint, trp_clocks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 30: /* Minimum Ras Pulse Width */
+ tras_clocks = (cas2ps(spdRawData[i])+(busClkPs-1)) / busClkPs;
+ mvOsOutput("Minimum Ras Pulse Width [ns]: %d = "
+ "in Clk cycles %d\n", spdRawData[i], tras_clocks);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 31: /* Module Bank Density */
+ mvOsOutput("Module Bank Density (more than 1= Multisize-Module):");
+
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ if (dimmInfo.dimmBankDensity & BIT0)
+ mvOsOutput("1GB, ");
+ if (dimmInfo.dimmBankDensity & BIT1)
+ mvOsOutput("8MB, ");
+ if (dimmInfo.dimmBankDensity & BIT2)
+ mvOsOutput("16MB, ");
+ if (dimmInfo.dimmBankDensity & BIT3)
+ mvOsOutput("32MB, ");
+ if (dimmInfo.dimmBankDensity & BIT4)
+ mvOsOutput("64MB, ");
+ if (dimmInfo.dimmBankDensity & BIT5)
+ mvOsOutput("128MB, ");
+ if (dimmInfo.dimmBankDensity & BIT6)
+ mvOsOutput("256MB, ");
+ if (dimmInfo.dimmBankDensity & BIT7)
+ mvOsOutput("512MB, ");
+ }
+ else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+ {
+ if (dimmInfo.dimmBankDensity & BIT0)
+ mvOsOutput("1GB, ");
+ if (dimmInfo.dimmBankDensity & BIT1)
+ mvOsOutput("2GB, ");
+ if (dimmInfo.dimmBankDensity & BIT2)
+ mvOsOutput("16MB, ");
+ if (dimmInfo.dimmBankDensity & BIT3)
+ mvOsOutput("32MB, ");
+ if (dimmInfo.dimmBankDensity & BIT4)
+ mvOsOutput("64MB, ");
+ if (dimmInfo.dimmBankDensity & BIT5)
+ mvOsOutput("128MB, ");
+ if (dimmInfo.dimmBankDensity & BIT6)
+ mvOsOutput("256MB, ");
+ if (dimmInfo.dimmBankDensity & BIT7)
+ mvOsOutput("512MB, ");
+ }
+ else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+ {
+ if (dimmInfo.dimmBankDensity & BIT0)
+ mvOsOutput("1GB, ");
+ if (dimmInfo.dimmBankDensity & BIT1)
+ mvOsOutput("2GB, ");
+ if (dimmInfo.dimmBankDensity & BIT2)
+ mvOsOutput("4GB, ");
+ if (dimmInfo.dimmBankDensity & BIT3)
+ mvOsOutput("8GB, ");
+ if (dimmInfo.dimmBankDensity & BIT4)
+ mvOsOutput("16GB, ");
+ if (dimmInfo.dimmBankDensity & BIT5)
+ mvOsOutput("128MB, ");
+ if (dimmInfo.dimmBankDensity & BIT6)
+ mvOsOutput("256MB, ");
+ if (dimmInfo.dimmBankDensity & BIT7)
+ mvOsOutput("512MB, ");
+ }
+ mvOsOutput("\n");
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 32: /* Address And Command Setup Time (measured in ns/1000) */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ rightOfPoint = (spdRawData[i] & 0x0f);
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ if(leftOfPoint > 7)
+ {
+ leftOfPoint *= -1;
+ }
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / 100;
+ rightOfPoint = time_tmp % 100;
+ }
+ mvOsOutput("Address And Command Setup Time [ns]: %d.%d\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 33: /* Address And Command Hold Time */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ rightOfPoint = (spdRawData[i] & 0x0f);
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ if(leftOfPoint > 7)
+ {
+ leftOfPoint *= -1;
+ }
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / 100;
+ rightOfPoint = time_tmp % 100;
+ }
+ mvOsOutput("Address And Command Hold Time [ns]: %d.%d\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 34: /* Data Input Setup Time */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ rightOfPoint = (spdRawData[i] & 0x0f);
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ if(leftOfPoint > 7)
+ {
+ leftOfPoint *= -1;
+ }
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / 100;
+ rightOfPoint = time_tmp % 100;
+ }
+ mvOsOutput("Data Input Setup Time [ns]: %d.%d\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 35: /* Data Input Hold Time */
+ if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+ {
+ rightOfPoint = (spdRawData[i] & 0x0f);
+ leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+ if(leftOfPoint > 7)
+ {
+ leftOfPoint *= -1;
+ }
+ }
+ else /* DDR1 or DDR2 */
+ {
+ time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+ ((spdRawData[i] & 0x0f));
+ leftOfPoint = time_tmp / 100;
+ rightOfPoint = time_tmp % 100;
+ }
+ mvOsOutput("Data Input Hold Time [ns]: %d.%d\n\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+
+ case 36: /* Relevant for DDR2 only: Write Recovery Time */
+ leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> 2);
+ rightOfPoint = (spdRawData[i] & maskRightOfPoint) * 25;
+ mvOsOutput("Write Recovery Time [ns]: %d.%d\n",
+ leftOfPoint, rightOfPoint);
+ break;
+/*----------------------------------------------------------------------------*/
+ }
+
+}
+
+
+/*
+ * translate ns.ns/10 coding of SPD timing values
+ * into ps unit values
+ */
+/*******************************************************************************
+* cas2ps - Translate x.y ns parameter to pico-seconds values
+*
+* DESCRIPTION:
+* This function translates x.y nano seconds to its value in pico seconds.
+* For example 3.75ns will return 3750.
+*
+* INPUT:
+* spd_byte - DIMM SPD byte.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* value in pico seconds.
+*
+*******************************************************************************/
+static MV_U32 cas2ps(MV_U8 spd_byte)
+{
+ MV_U32 ns, ns10;
+
+ /* isolate upper nibble */
+ ns = (spd_byte >> 4) & 0x0F;
+ /* isolate lower nibble */
+ ns10 = (spd_byte & 0x0F);
+
+ if( ns10 < 10 ) {
+ ns10 *= 10;
+ }
+ else if( ns10 == 10 )
+ ns10 = 25;
+ else if( ns10 == 11 )
+ ns10 = 33;
+ else if( ns10 == 12 )
+ ns10 = 66;
+ else if( ns10 == 13 )
+ ns10 = 75;
+ else
+ {
+ mvOsOutput("cas2ps Err. unsupported cycle time.\n");
+ }
+
+ return (ns*1000 + ns10*10);
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h
new file mode 100644
index 000000000..f95546624
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h
@@ -0,0 +1,192 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDram
+#define __INCmvDram
+
+#include "ddr2/mvDramIf.h"
+#include "twsi/mvTwsi.h"
+
+#define MAX_DIMM_NUM 2
+#define SPD_SIZE 128
+
+/* Dimm spd offsets */
+#define DIMM_MEM_TYPE 2
+#define DIMM_ROW_NUM 3
+#define DIMM_COL_NUM 4
+#define DIMM_MODULE_BANK_NUM 5
+#define DIMM_DATA_WIDTH 6
+#define DIMM_VOLT_IF 8
+#define DIMM_MIN_CC_AT_MAX_CAS 9
+#define DIMM_ERR_CHECK_TYPE 11
+#define DIMM_REFRESH_INTERVAL 12
+#define DIMM_SDRAM_WIDTH 13
+#define DIMM_ERR_CHECK_DATA_WIDTH 14
+#define DIMM_MIN_CLK_DEL 15
+#define DIMM_BURST_LEN_SUP 16
+#define DIMM_DEV_BANK_NUM 17
+#define DIMM_SUP_CAL 18
+#define DIMM_DDR2_TYPE_INFORMATION 20 /* DDR2 only */
+#define DIMM_BUF_ADDR_CONT_IN 21
+#define DIMM_MIN_CC_AT_MAX_CAS_MINUS1 23
+#define DIMM_MIN_CC_AT_MAX_CAS_MINUS2 25
+#define DIMM_MIN_ROW_PRECHARGE_TIME 27
+#define DIMM_MIN_ROW_ACTIVE_TO_ROW_ACTIVE 28
+#define DIMM_MIN_RAS_TO_CAS_DELAY 29
+#define DIMM_MIN_RAS_PULSE_WIDTH 30
+#define DIMM_BANK_DENSITY 31
+#define DIMM_MIN_WRITE_RECOVERY_TIME 36
+#define DIMM_MIN_WRITE_TO_READ_CMD_DELAY 37
+#define DIMM_MIN_READ_TO_PRECH_CMD_DELAY 38
+#define DIMM_MIN_REFRESH_TO_ACTIVATE_CMD 42
+#define DIMM_SPD_VERSION 62
+
+/* Dimm Memory Type values */
+#define DIMM_MEM_TYPE_SDRAM 0x4
+#define DIMM_MEM_TYPE_DDR1 0x7
+#define DIMM_MEM_TYPE_DDR2 0x8
+
+#define DIMM_MODULE_MANU_OFFS 64
+#define DIMM_MODULE_MANU_SIZE 8
+#define DIMM_MODULE_VEN_OFFS 73
+#define DIMM_MODULE_VEN_SIZE 25
+#define DIMM_MODULE_ID_OFFS 99
+#define DIMM_MODULE_ID_SIZE 18
+
+/* enumeration for voltage levels. */
+typedef enum _mvDimmVoltageIf
+{
+ TTL_5V_TOLERANT,
+ LVTTL,
+ HSTL_1_5V,
+ SSTL_3_3V,
+ SSTL_2_5V,
+ VOLTAGE_UNKNOWN,
+} MV_DIMM_VOLTAGE_IF;
+
+
+/* enumaration for SDRAM CAS Latencies. */
+typedef enum _mvDimmSdramCas
+{
+ SD_CL_1 =1,
+ SD_CL_2,
+ SD_CL_3,
+ SD_CL_4,
+ SD_CL_5,
+ SD_CL_6,
+ SD_CL_7,
+ SD_FAULT
+}MV_DIMM_SDRAM_CAS;
+
+
+/* DIMM information structure */
+typedef struct _mvDimmInfo
+{
+ MV_MEMORY_TYPE memoryType; /* DDR or SDRAM */
+
+ MV_U8 spdRawData[SPD_SIZE]; /* Content of SPD-EEPROM copied 1:1 */
+
+ /* DIMM dimensions */
+ MV_U32 numOfRowAddr;
+ MV_U32 numOfColAddr;
+ MV_U32 numOfModuleBanks;
+ MV_U32 dataWidth;
+ MV_U32 errorCheckType; /* ECC , PARITY..*/
+ MV_U32 sdramWidth; /* 4,8,16 or 32 */
+ MV_U32 errorCheckDataWidth; /* 0 - no, 1 - Yes */
+ MV_U32 burstLengthSupported;
+ MV_U32 numOfBanksOnEachDevice;
+ MV_U32 suportedCasLatencies;
+ MV_U32 refreshInterval;
+ MV_U32 dimmBankDensity;
+ MV_U32 dimmTypeInfo; /* DDR2 only */
+ MV_U32 dimmAttributes;
+
+ /* DIMM timing parameters */
+ MV_U32 minCycleTimeAtMaxCasLatPs;
+ MV_U32 minCycleTimeAtMaxCasLatMinus1Ps;
+ MV_U32 minCycleTimeAtMaxCasLatMinus2Ps;
+ MV_U32 minRowPrechargeTime;
+ MV_U32 minRowActiveToRowActive;
+ MV_U32 minRasToCasDelay;
+ MV_U32 minRasPulseWidth;
+ MV_U32 minWriteRecoveryTime; /* DDR2 only */
+ MV_U32 minWriteToReadCmdDelay; /* DDR2 only */
+ MV_U32 minReadToPrechCmdDelay; /* DDR2 only */
+ MV_U32 minRefreshToActiveCmd; /* DDR2 only */
+
+ /* Parameters calculated from the extracted DIMM information */
+ MV_U32 size; /* 16,64,128,256 or 512 MByte in MB units */
+ MV_U32 deviceDensity; /* 16,64,128,256 or 512 Mbit in MB units */
+ MV_U32 numberOfDevices;
+
+} MV_DIMM_INFO;
+
+
+MV_STATUS mvDramBankInfoGet(MV_U32 bankNum, MV_DRAM_BANK_INFO *pBankInfo);
+MV_STATUS dimmSpdGet(MV_U32 dimmNum, MV_DIMM_INFO *pDimmInfo);
+MV_VOID dimmSpdPrint(MV_U32 dimmNum);
+MV_STATUS dimmSpdCpy(MV_VOID);
+
+#endif /* __INCmvDram */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c
new file mode 100644
index 000000000..d24e788fc
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c
@@ -0,0 +1,2952 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvEth.c - Marvell's Gigabit Ethernet controller low level driver
+*
+* DESCRIPTION:
+* This file introduce OS independent APIs to Marvell's Gigabit Ethernet
+* controller. This Gigabit Ethernet Controller driver API controls
+* 1) Operations (i.e. port Init, Finish, Up, Down, PhyReset etc').
+* 2) Data flow (i.e. port Send, Receive etc').
+* 3) MAC Filtering functions (ethSetMcastAddr, ethSetRxFilterMode, etc.)
+* 4) MIB counters support (ethReadMibCounter)
+* 5) Debug functions (ethPortRegs, ethPortCounters, ethPortQueues, etc.)
+* Each Gigabit Ethernet port is controlled via ETH_PORT_CTRL struct.
+* This struct includes configuration information as well as driver
+* internal data needed for its operations.
+*
+* Supported Features:
+* - OS independent. All required OS services are implemented via external
+* OS dependent components (like osLayer or ethOsg)
+* - The user is free from Rx/Tx queue managing.
+* - Simple Gigabit Ethernet port operation API.
+* - Simple Gigabit Ethernet port data flow API.
+* - Data flow and operation API support per queue functionality.
+* - Support cached descriptors for better performance.
+* - PHY access and control API.
+* - Port Configuration API.
+* - Full control over Special and Other Multicast MAC tables.
+*
+*******************************************************************************/
+/* includes */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "eth-phy/mvEthPhy.h"
+#include "eth/mvEth.h"
+#include "eth/gbe/mvEthGbe.h"
+#include "cpu/mvCpu.h"
+
+#ifdef INCLUDE_SYNC_BARR
+#include "sys/mvCpuIf.h"
+#endif
+
+#ifdef MV_RT_DEBUG
+# define ETH_DEBUG
+#endif
+
+
+/* locals */
+MV_BOOL ethDescInSram;
+MV_BOOL ethDescSwCoher;
+
+/* This array holds the control structure of each port */
+ETH_PORT_CTRL* ethPortCtrl[MV_ETH_MAX_PORTS];
+
+/* Ethernet Port Local routines */
+
+static void ethInitRxDescRing(ETH_PORT_CTRL* pPortCtrl, int queue);
+
+static void ethInitTxDescRing(ETH_PORT_CTRL* pPortCtrl, int queue);
+
+static void ethSetUcastTable(int portNo, int queue);
+
+static MV_BOOL ethSetUcastAddr (int ethPortNum, MV_U8 lastNibble, int queue);
+static MV_BOOL ethSetSpecialMcastAddr(int ethPortNum, MV_U8 lastByte, int queue);
+static MV_BOOL ethSetOtherMcastAddr(int ethPortNum, MV_U8 crc8, int queue);
+
+static void ethFreeDescrMemory(ETH_PORT_CTRL* pEthPortCtrl, MV_BUF_INFO* pDescBuf);
+static MV_U8* ethAllocDescrMemory(ETH_PORT_CTRL* pEthPortCtrl, int size,
+ MV_ULONG* pPhysAddr, MV_U32 *memHandle);
+
+static MV_U32 mvEthMruGet(MV_U32 maxRxPktSize);
+
+static void mvEthPortSgmiiConfig(int port);
+
+
+
+/******************************************************************************/
+/* EthDrv Initialization functions */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvEthHalInit - Initialize the Giga Ethernet unit
+*
+* DESCRIPTION:
+* This function initialize the Giga Ethernet unit.
+* 1) Configure Address decode windows of the unit
+* 2) Set registers to HW default values.
+* 3) Clear and Disable interrupts
+*
+* INPUT: NONE
+*
+* RETURN: NONE
+*
+* NOTE: this function is called once in the boot process.
+*******************************************************************************/
+void mvEthHalInit(void)
+{
+ int port;
+
+ /* Init static data structures */
+ for (port=0; port<MV_ETH_MAX_PORTS; port++)
+ {
+ ethPortCtrl[port] = NULL;
+ }
+ /* Power down all existing ports */
+ for(port=0; port<mvCtrlEthMaxPortGet(); port++)
+ {
+
+#if defined (MV78200)
+ /* Skip ports mapped to another CPU*/
+ if (MV_FALSE == mvSocUnitIsMappedToThisCpu(GIGA0+port))
+ {
+ continue;
+ }
+#endif
+
+ /* Skip power down ports */
+ if (MV_FALSE == mvCtrlPwrClckGet(ETH_GIG_UNIT_ID, port)) continue;
+
+ /* Disable Giga Ethernet Unit interrupts */
+ MV_REG_WRITE(ETH_UNIT_INTR_MASK_REG(port), 0);
+
+ /* Clear ETH_UNIT_INTR_CAUSE_REG register */
+ MV_REG_WRITE(ETH_UNIT_INTR_CAUSE_REG(port), 0);
+
+ }
+
+ mvEthMemAttrGet(&ethDescInSram, &ethDescSwCoher);
+
+#if defined(ETH_DESCR_IN_SRAM)
+ if(ethDescInSram == MV_FALSE)
+ {
+ mvOsPrintf("ethDrv: WARNING! Descriptors will be allocated in DRAM instead of SRAM.\n");
+ }
+#endif /* ETH_DESCR_IN_SRAM */
+}
+
+/*******************************************************************************
+* mvEthMemAttrGet - Define properties (SRAM/DRAM, SW_COHER / HW_COHER / UNCACHED)
+* of of memory location for RX and TX descriptors.
+*
+* DESCRIPTION:
+* This function allocates memory for RX and TX descriptors.
+* - If ETH_DESCR_IN_SRAM defined, allocate from SRAM memory.
+* - If ETH_DESCR_IN_SDRAM defined, allocate from SDRAM memory.
+*
+* INPUT:
+* MV_BOOL* pIsSram - place of descriptors:
+* MV_TRUE - in SRAM
+* MV_FALSE - in DRAM
+* MV_BOOL* pIsSwCoher - cache coherency of descriptors:
+* MV_TRUE - driver is responsible for cache coherency
+* MV_FALSE - driver is not responsible for cache coherency
+*
+* RETURN:
+*
+*******************************************************************************/
+void mvEthMemAttrGet(MV_BOOL* pIsSram, MV_BOOL* pIsSwCoher)
+{
+ MV_BOOL isSram, isSwCoher;
+
+ isSram = MV_FALSE;
+#if (ETHER_DRAM_COHER == MV_CACHE_COHER_SW)
+ isSwCoher = MV_TRUE;
+#else
+ isSwCoher = MV_FALSE;
+#endif
+
+#if defined(ETH_DESCR_IN_SRAM)
+ if( mvCtrlSramSizeGet() > 0)
+ {
+ isSram = MV_TRUE;
+ #if (INTEG_SRAM_COHER == MV_CACHE_COHER_SW)
+ isSwCoher = MV_TRUE;
+ #else
+ isSwCoher = MV_FALSE;
+ #endif
+ }
+#endif /* ETH_DESCR_IN_SRAM */
+
+ if(pIsSram != NULL)
+ *pIsSram = isSram;
+
+ if(pIsSwCoher != NULL)
+ *pIsSwCoher = isSwCoher;
+}
+
+
+
+/******************************************************************************/
+/* Port Initialization functions */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvEthPortInit - Initialize the Ethernet port driver
+*
+* DESCRIPTION:
+* This function initialize the ethernet port.
+* 1) Allocate and initialize internal port Control structure.
+* 2) Create RX and TX descriptor rings for default RX and TX queues
+* 3) Disable RX and TX operations, clear cause registers and
+* mask all interrupts.
+* 4) Set all registers to default values and clean all MAC tables.
+*
+* INPUT:
+* int portNo - Ethernet port number
+* ETH_PORT_INIT *pEthPortInit - Ethernet port init structure
+*
+* RETURN:
+* void* - ethernet port handler, that should be passed to the most other
+* functions dealing with this port.
+*
+* NOTE: This function is called once per port when loading the eth module.
+*******************************************************************************/
+void* mvEthPortInit(int portNo, MV_ETH_PORT_INIT *pEthPortInit)
+{
+ int queue, descSize;
+ ETH_PORT_CTRL* pPortCtrl;
+
+ /* Check validity of parameters */
+ if( (portNo >= (int)mvCtrlEthMaxPortGet()) ||
+ (pEthPortInit->rxDefQ >= MV_ETH_RX_Q_NUM) ||
+ (pEthPortInit->maxRxPktSize < 1518) )
+ {
+ mvOsPrintf("EthPort #%d: Bad initialization parameters\n", portNo);
+ return NULL;
+ }
+ if( (pEthPortInit->rxDescrNum[pEthPortInit->rxDefQ]) == 0)
+ {
+ mvOsPrintf("EthPort #%d: rxDefQ (%d) must be created\n",
+ portNo, pEthPortInit->rxDefQ);
+ return NULL;
+ }
+
+ pPortCtrl = (ETH_PORT_CTRL*)mvOsMalloc( sizeof(ETH_PORT_CTRL) );
+ if(pPortCtrl == NULL)
+ {
+ mvOsPrintf("EthDrv: Can't allocate %dB for port #%d control structure!\n",
+ (int)sizeof(ETH_PORT_CTRL), portNo);
+ return NULL;
+ }
+
+ memset(pPortCtrl, 0, sizeof(ETH_PORT_CTRL) );
+ ethPortCtrl[portNo] = pPortCtrl;
+
+ pPortCtrl->portState = MV_UNDEFINED_STATE;
+
+ pPortCtrl->portNo = portNo;
+
+ pPortCtrl->osHandle = pEthPortInit->osHandle;
+
+ /* Copy Configuration parameters */
+ pPortCtrl->portConfig.maxRxPktSize = pEthPortInit->maxRxPktSize;
+ pPortCtrl->portConfig.rxDefQ = pEthPortInit->rxDefQ;
+ pPortCtrl->portConfig.ejpMode = 0;
+
+ for( queue=0; queue<MV_ETH_RX_Q_NUM; queue++ )
+ {
+ pPortCtrl->rxQueueConfig[queue].descrNum = pEthPortInit->rxDescrNum[queue];
+ }
+ for( queue=0; queue<MV_ETH_TX_Q_NUM; queue++ )
+ {
+ pPortCtrl->txQueueConfig[queue].descrNum = pEthPortInit->txDescrNum[queue];
+ }
+
+ mvEthPortDisable(pPortCtrl);
+
+ /* Set the board information regarding PHY address */
+ mvEthPhyAddrSet(pPortCtrl, mvBoardPhyAddrGet(portNo) );
+
+ /* Create all requested RX queues */
+ for(queue=0; queue<MV_ETH_RX_Q_NUM; queue++)
+ {
+ if(pPortCtrl->rxQueueConfig[queue].descrNum == 0)
+ continue;
+
+ /* Allocate memory for RX descriptors */
+ descSize = ((pPortCtrl->rxQueueConfig[queue].descrNum * ETH_RX_DESC_ALIGNED_SIZE) +
+ CPU_D_CACHE_LINE_SIZE);
+
+ pPortCtrl->rxQueue[queue].descBuf.bufVirtPtr =
+ ethAllocDescrMemory(pPortCtrl, descSize,
+ &pPortCtrl->rxQueue[queue].descBuf.bufPhysAddr,
+ &pPortCtrl->rxQueue[queue].descBuf.memHandle);
+ pPortCtrl->rxQueue[queue].descBuf.bufSize = descSize;
+ if(pPortCtrl->rxQueue[queue].descBuf.bufVirtPtr == NULL)
+ {
+ mvOsPrintf("EthPort #%d, rxQ=%d: Can't allocate %d bytes in %s for %d RX descr\n",
+ pPortCtrl->portNo, queue, descSize,
+ ethDescInSram ? "SRAM" : "DRAM",
+ pPortCtrl->rxQueueConfig[queue].descrNum);
+ return NULL;
+ }
+
+ ethInitRxDescRing(pPortCtrl, queue);
+ }
+ /* Create TX queues */
+ for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+ {
+ if(pPortCtrl->txQueueConfig[queue].descrNum == 0)
+ continue;
+
+ /* Allocate memory for TX descriptors */
+ descSize = ((pPortCtrl->txQueueConfig[queue].descrNum * ETH_TX_DESC_ALIGNED_SIZE) +
+ CPU_D_CACHE_LINE_SIZE);
+
+ pPortCtrl->txQueue[queue].descBuf.bufVirtPtr =
+ ethAllocDescrMemory(pPortCtrl, descSize,
+ &pPortCtrl->txQueue[queue].descBuf.bufPhysAddr,
+ &pPortCtrl->txQueue[queue].descBuf.memHandle);
+ pPortCtrl->txQueue[queue].descBuf.bufSize = descSize;
+ if(pPortCtrl->txQueue[queue].descBuf.bufVirtPtr == NULL)
+ {
+ mvOsPrintf("EthPort #%d, txQ=%d: Can't allocate %d bytes in %s for %d TX descr\n",
+ pPortCtrl->portNo, queue, descSize, ethDescInSram ? "SRAM" : "DRAM",
+ pPortCtrl->txQueueConfig[queue].descrNum);
+ return NULL;
+ }
+
+ ethInitTxDescRing(pPortCtrl, queue);
+ }
+ mvEthDefaultsSet(pPortCtrl);
+
+ pPortCtrl->portState = MV_IDLE;
+ return pPortCtrl;
+}
+
+/*******************************************************************************
+* ethPortFinish - Finish the Ethernet port driver
+*
+* DESCRIPTION:
+* This function finish the ethernet port.
+* 1) Down ethernet port if needed.
+* 2) Delete RX and TX descriptor rings for all created RX and TX queues
+* 3) Free internal port Control structure.
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet port handler
+*
+* RETURN: NONE.
+*
+*******************************************************************************/
+void mvEthPortFinish(void* pPortHndl)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ int queue, portNo = pPortCtrl->portNo;
+
+ if(pPortCtrl->portState == MV_ACTIVE)
+ {
+ mvOsPrintf("ethPort #%d: Warning !!! Finish port in Active state\n",
+ portNo);
+ mvEthPortDisable(pPortHndl);
+ }
+
+ /* Free all allocated RX queues */
+ for(queue=0; queue<MV_ETH_RX_Q_NUM; queue++)
+ {
+ ethFreeDescrMemory(pPortCtrl, &pPortCtrl->rxQueue[queue].descBuf);
+ }
+
+ /* Free all allocated TX queues */
+ for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+ {
+ ethFreeDescrMemory(pPortCtrl, &pPortCtrl->txQueue[queue].descBuf);
+ }
+
+ /* Free port control structure */
+ mvOsFree(pPortCtrl);
+
+ ethPortCtrl[portNo] = NULL;
+}
+
+/*******************************************************************************
+* mvEthDefaultsSet - Set defaults to the ethernet port
+*
+* DESCRIPTION:
+* This function set default values to the ethernet port.
+* 1) Clear Cause registers and Mask all interrupts
+* 2) Clear all MAC tables
+* 3) Set defaults to all registers
+* 4) Reset all created RX and TX descriptors ring
+* 5) Reset PHY
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet port handler
+*
+* RETURN: MV_STATUS
+* MV_OK - Success, Others - Failure
+* NOTE:
+* This function update all the port configuration except those set
+* Initialy by the OsGlue by MV_ETH_PORT_INIT.
+* This function can be called after portDown to return the port setting
+* to defaults.
+*******************************************************************************/
+MV_STATUS mvEthDefaultsSet(void* pPortHndl)
+{
+ int ethPortNo, queue;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ MV_U32 txPrio;
+ MV_U32 portCfgReg, portCfgExtReg, portSerialCtrlReg, portSerialCtrl1Reg, portSdmaCfgReg;
+ MV_BOARD_MAC_SPEED boardMacCfg;
+
+ ethPortNo = pPortCtrl->portNo;
+
+ /* Clear Cause registers */
+ MV_REG_WRITE(ETH_INTR_CAUSE_REG(ethPortNo),0);
+ MV_REG_WRITE(ETH_INTR_CAUSE_EXT_REG(ethPortNo),0);
+
+ /* Mask all interrupts */
+ MV_REG_WRITE(ETH_INTR_MASK_REG(ethPortNo),0);
+ MV_REG_WRITE(ETH_INTR_MASK_EXT_REG(ethPortNo),0);
+
+ portCfgReg = PORT_CONFIG_VALUE;
+ portCfgExtReg = PORT_CONFIG_EXTEND_VALUE;
+
+ boardMacCfg = mvBoardMacSpeedGet(ethPortNo);
+
+ if(boardMacCfg == BOARD_MAC_SPEED_100M)
+ {
+ portSerialCtrlReg = PORT_SERIAL_CONTROL_100MB_FORCE_VALUE;
+ }
+ else if(boardMacCfg == BOARD_MAC_SPEED_1000M)
+ {
+ portSerialCtrlReg = PORT_SERIAL_CONTROL_1000MB_FORCE_VALUE;
+ }
+ else
+ {
+ portSerialCtrlReg = PORT_SERIAL_CONTROL_VALUE;
+ }
+
+ /* build PORT_SDMA_CONFIG_REG */
+ portSdmaCfgReg = ETH_TX_INTR_COAL_MASK(0);
+ portSdmaCfgReg |= ETH_TX_BURST_SIZE_MASK(ETH_BURST_SIZE_16_64BIT_VALUE);
+
+#if ( (ETHER_DRAM_COHER == MV_CACHE_COHER_HW_WB) || \
+ (ETHER_DRAM_COHER == MV_CACHE_COHER_HW_WT) )
+ /* some devices have restricted RX burst size when using HW coherency */
+ portSdmaCfgReg |= ETH_RX_BURST_SIZE_MASK(ETH_BURST_SIZE_4_64BIT_VALUE);
+#else
+ portSdmaCfgReg |= ETH_RX_BURST_SIZE_MASK(ETH_BURST_SIZE_16_64BIT_VALUE);
+#endif
+
+#if defined(MV_CPU_BE)
+ /* big endian */
+# if defined(MV_ARM)
+ portSdmaCfgReg |= (ETH_RX_NO_DATA_SWAP_MASK |
+ ETH_TX_NO_DATA_SWAP_MASK |
+ ETH_DESC_SWAP_MASK);
+# elif defined(MV_PPC)
+ portSdmaCfgReg |= (ETH_RX_DATA_SWAP_MASK |
+ ETH_TX_DATA_SWAP_MASK |
+ ETH_NO_DESC_SWAP_MASK);
+# else
+# error "Giga Ethernet Swap policy is not defined for the CPU_ARCH"
+# endif /* MV_ARM / MV_PPC */
+
+#else /* MV_CPU_LE */
+ /* little endian */
+ portSdmaCfgReg |= (ETH_RX_NO_DATA_SWAP_MASK |
+ ETH_TX_NO_DATA_SWAP_MASK |
+ ETH_NO_DESC_SWAP_MASK);
+#endif /* MV_CPU_BE / MV_CPU_LE */
+
+ pPortCtrl->portRxQueueCmdReg = 0;
+ pPortCtrl->portTxQueueCmdReg = 0;
+
+#if (MV_ETH_VERSION >= 4)
+ if(pPortCtrl->portConfig.ejpMode == MV_TRUE)
+ {
+ MV_REG_WRITE(ETH_TXQ_CMD_1_REG(ethPortNo), ETH_TX_EJP_ENABLE_MASK);
+ }
+ else
+ {
+ MV_REG_WRITE(ETH_TXQ_CMD_1_REG(ethPortNo), 0)
+ }
+#endif /* (MV_ETH_VERSION >= 4) */
+
+ ethSetUcastTable(ethPortNo, -1);
+ mvEthSetSpecialMcastTable(ethPortNo, -1);
+ mvEthSetOtherMcastTable(ethPortNo, -1);
+
+ portSerialCtrlReg &= ~ETH_MAX_RX_PACKET_SIZE_MASK;
+
+ portSerialCtrlReg |= mvEthMruGet(pPortCtrl->portConfig.maxRxPktSize);
+
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNo), portSerialCtrlReg);
+
+ /* Update value of PortConfig register accordingly with all RxQueue types */
+ pPortCtrl->portConfig.rxArpQ = pPortCtrl->portConfig.rxDefQ;
+ pPortCtrl->portConfig.rxBpduQ = pPortCtrl->portConfig.rxDefQ;
+ pPortCtrl->portConfig.rxTcpQ = pPortCtrl->portConfig.rxDefQ;
+ pPortCtrl->portConfig.rxUdpQ = pPortCtrl->portConfig.rxDefQ;
+
+ portCfgReg &= ~ETH_DEF_RX_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_QUEUE_MASK(pPortCtrl->portConfig.rxDefQ);
+
+ portCfgReg &= ~ETH_DEF_RX_ARP_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_ARP_QUEUE_MASK(pPortCtrl->portConfig.rxArpQ);
+
+ portCfgReg &= ~ETH_DEF_RX_BPDU_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_BPDU_QUEUE_MASK(pPortCtrl->portConfig.rxBpduQ);
+
+ portCfgReg &= ~ETH_DEF_RX_TCP_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_TCP_QUEUE_MASK(pPortCtrl->portConfig.rxTcpQ);
+
+ portCfgReg &= ~ETH_DEF_RX_UDP_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_UDP_QUEUE_MASK(pPortCtrl->portConfig.rxUdpQ);
+
+ /* Assignment of Tx CTRP of given queue */
+ txPrio = 0;
+
+ for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+ {
+ pQueueCtrl = &pPortCtrl->txQueue[queue];
+
+ if(pQueueCtrl->pFirstDescr != NULL)
+ {
+ ethResetTxDescRing(pPortCtrl, queue);
+
+ MV_REG_WRITE(ETH_TXQ_TOKEN_COUNT_REG(ethPortNo, queue),
+ 0x3fffffff);
+ MV_REG_WRITE(ETH_TXQ_TOKEN_CFG_REG(ethPortNo, queue),
+ 0x03ffffff);
+ }
+ else
+ {
+ MV_REG_WRITE(ETH_TXQ_TOKEN_COUNT_REG(ethPortNo, queue), 0x0);
+ MV_REG_WRITE(ETH_TXQ_TOKEN_CFG_REG(ethPortNo, queue), 0x0);
+ }
+ }
+
+ /* Assignment of Rx CRDP of given queue */
+ for(queue=0; queue<MV_ETH_RX_Q_NUM; queue++)
+ {
+ ethResetRxDescRing(pPortCtrl, queue);
+ }
+
+ /* Allow receiving packes with odd number of preamble nibbles */
+ portSerialCtrl1Reg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(ethPortNo));
+ portSerialCtrl1Reg |= ETH_EN_MII_ODD_PRE_MASK;
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(ethPortNo), portSerialCtrl1Reg);
+
+ /* Assign port configuration and command. */
+ MV_REG_WRITE(ETH_PORT_CONFIG_REG(ethPortNo), portCfgReg);
+
+ MV_REG_WRITE(ETH_PORT_CONFIG_EXTEND_REG(ethPortNo), portCfgExtReg);
+
+ /* Assign port SDMA configuration */
+ MV_REG_WRITE(ETH_SDMA_CONFIG_REG(ethPortNo), portSdmaCfgReg);
+
+ /* Turn off the port/queue bandwidth limitation */
+ MV_REG_WRITE(ETH_MAX_TRANSMIT_UNIT_REG(ethPortNo), 0x0);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* ethPortUp - Start the Ethernet port RX and TX activity.
+*
+* DESCRIPTION:
+* This routine start Rx and Tx activity:
+*
+* Note: Each Rx and Tx queue descriptor's list must be initialized prior
+* to calling this function (use etherInitTxDescRing for Tx queues and
+* etherInitRxDescRing for Rx queues).
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet port handler
+*
+* RETURN: MV_STATUS
+* MV_OK - Success, Others - Failure.
+*
+* NOTE : used for port link up.
+*******************************************************************************/
+MV_STATUS mvEthPortUp(void* pEthPortHndl)
+{
+ int ethPortNo;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+
+ ethPortNo = pPortCtrl->portNo;
+
+ if( (pPortCtrl->portState != MV_ACTIVE) &&
+ (pPortCtrl->portState != MV_PAUSED) )
+ {
+ mvOsPrintf("ethDrv port%d: Unexpected port state %d\n",
+ ethPortNo, pPortCtrl->portState);
+ return MV_BAD_STATE;
+ }
+
+ ethPortNo = pPortCtrl->portNo;
+
+ /* Enable port RX. */
+ MV_REG_WRITE(ETH_RX_QUEUE_COMMAND_REG(ethPortNo), pPortCtrl->portRxQueueCmdReg);
+
+ /* Enable port TX. */
+ MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(ethPortNo)) = pPortCtrl->portTxQueueCmdReg;
+
+ pPortCtrl->portState = MV_ACTIVE;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* ethPortDown - Stop the Ethernet port activity.
+*
+* DESCRIPTION:
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet port handler
+*
+* RETURN: MV_STATUS
+* MV_OK - Success, Others - Failure.
+*
+* NOTE : used for port link down.
+*******************************************************************************/
+MV_STATUS mvEthPortDown(void* pEthPortHndl)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ int ethPortNum = pPortCtrl->portNo;
+ unsigned int regData;
+ volatile int uDelay, mDelay;
+
+ /* Stop Rx port activity. Check port Rx activity. */
+ regData = (MV_REG_READ(ETH_RX_QUEUE_COMMAND_REG(ethPortNum))) & ETH_RXQ_ENABLE_MASK;
+ if(regData != 0)
+ {
+ /* Issue stop command for active channels only */
+ MV_REG_WRITE(ETH_RX_QUEUE_COMMAND_REG(ethPortNum), (regData << ETH_RXQ_DISABLE_OFFSET));
+ }
+
+ /* Stop Tx port activity. Check port Tx activity. */
+ regData = (MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(ethPortNum))) & ETH_TXQ_ENABLE_MASK;
+ if(regData != 0)
+ {
+ /* Issue stop command for active channels only */
+ MV_REG_WRITE(ETH_TX_QUEUE_COMMAND_REG(ethPortNum),
+ (regData << ETH_TXQ_DISABLE_OFFSET) );
+ }
+
+ /* Force link down */
+/*
+ regData = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(ethPortNum));
+ regData &= ~(ETH_DO_NOT_FORCE_LINK_FAIL_MASK);
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNum), regData);
+*/
+ /* Wait for all Rx activity to terminate. */
+ mDelay = 0;
+ do
+ {
+ if(mDelay >= RX_DISABLE_TIMEOUT_MSEC)
+ {
+ mvOsPrintf("ethPort_%d: TIMEOUT for RX stopped !!! rxQueueCmd - 0x08%x\n",
+ ethPortNum, regData);
+ break;
+ }
+ mvOsDelay(1);
+ mDelay++;
+
+ /* Check port RX Command register that all Rx queues are stopped */
+ regData = MV_REG_READ(ETH_RX_QUEUE_COMMAND_REG(ethPortNum));
+ }
+ while(regData & 0xFF);
+
+ /* Wait for all Tx activity to terminate. */
+ mDelay = 0;
+ do
+ {
+ if(mDelay >= TX_DISABLE_TIMEOUT_MSEC)
+ {
+ mvOsPrintf("ethPort_%d: TIMEOUT for TX stoped !!! txQueueCmd - 0x08%x\n",
+ ethPortNum, regData);
+ break;
+ }
+ mvOsDelay(1);
+ mDelay++;
+
+ /* Check port TX Command register that all Tx queues are stopped */
+ regData = MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(ethPortNum));
+ }
+ while(regData & 0xFF);
+
+ /* Double check to Verify that TX FIFO is Empty */
+ mDelay = 0;
+ while(MV_TRUE)
+ {
+ do
+ {
+ if(mDelay >= TX_FIFO_EMPTY_TIMEOUT_MSEC)
+ {
+ mvOsPrintf("\n ethPort_%d: TIMEOUT for TX FIFO empty !!! portStatus - 0x08%x\n",
+ ethPortNum, regData);
+ break;
+ }
+ mvOsDelay(1);
+ mDelay++;
+
+ regData = MV_REG_READ(ETH_PORT_STATUS_REG(ethPortNum));
+ }
+ while( ((regData & ETH_TX_FIFO_EMPTY_MASK) == 0) ||
+ ((regData & ETH_TX_IN_PROGRESS_MASK) != 0) );
+
+ if(mDelay >= TX_FIFO_EMPTY_TIMEOUT_MSEC)
+ break;
+
+ /* Double check */
+ regData = MV_REG_READ(ETH_PORT_STATUS_REG(ethPortNum));
+ if( ((regData & ETH_TX_FIFO_EMPTY_MASK) != 0) &&
+ ((regData & ETH_TX_IN_PROGRESS_MASK) == 0) )
+ {
+ break;
+ }
+ else
+ mvOsPrintf("ethPort_%d: TX FIFO Empty double check failed. %d msec, portStatus=0x%x\n",
+ ethPortNum, mDelay, regData);
+ }
+
+ /* Do NOT force link down */
+/*
+ regData = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(ethPortNum));
+ regData |= (ETH_DO_NOT_FORCE_LINK_FAIL_MASK);
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNum), regData);
+*/
+ /* Wait about 2500 tclk cycles */
+ uDelay = (PORT_DISABLE_WAIT_TCLOCKS/(mvBoardTclkGet()/1000000));
+ mvOsUDelay(uDelay);
+
+ pPortCtrl->portState = MV_PAUSED;
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* ethPortEnable - Enable the Ethernet port and Start RX and TX.
+*
+* DESCRIPTION:
+* This routine enable the Ethernet port and Rx and Tx activity:
+*
+* Note: Each Rx and Tx queue descriptor's list must be initialized prior
+* to calling this function (use etherInitTxDescRing for Tx queues and
+* etherInitRxDescRing for Rx queues).
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet port handler
+*
+* RETURN: MV_STATUS
+* MV_OK - Success, Others - Failure.
+*
+* NOTE: main usage is to enable the port after ifconfig up.
+*******************************************************************************/
+MV_STATUS mvEthPortEnable(void* pEthPortHndl)
+{
+ int ethPortNo;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ MV_U32 portSerialCtrlReg;
+
+ ethPortNo = pPortCtrl->portNo;
+
+ /* Enable port */
+ portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(ethPortNo));
+ portSerialCtrlReg |= (ETH_DO_NOT_FORCE_LINK_FAIL_MASK | ETH_PORT_ENABLE_MASK);
+
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNo), portSerialCtrlReg);
+
+ mvEthMibCountersClear(pEthPortHndl);
+
+ pPortCtrl->portState = MV_PAUSED;
+
+ /* If Link is UP, Start RX and TX traffic */
+ if( MV_REG_READ( ETH_PORT_STATUS_REG(ethPortNo) ) & ETH_LINK_UP_MASK)
+ return( mvEthPortUp(pEthPortHndl) );
+
+ return MV_NOT_READY;
+}
+
+
+/*******************************************************************************
+* mvEthPortDisable - Stop RX and TX activities and Disable the Ethernet port.
+*
+* DESCRIPTION:
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet port handler
+*
+* RETURN: MV_STATUS
+* MV_OK - Success, Others - Failure.
+*
+* NOTE: main usage is to disable the port after ifconfig down.
+*******************************************************************************/
+MV_STATUS mvEthPortDisable(void* pEthPortHndl)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ int ethPortNum = pPortCtrl->portNo;
+ unsigned int regData;
+ volatile int mvDelay;
+
+ if(pPortCtrl->portState == MV_ACTIVE)
+ {
+ /* Stop RX and TX activities */
+ mvEthPortDown(pEthPortHndl);
+ }
+
+ /* Reset the Enable bit in the Serial Control Register */
+ regData = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(ethPortNum));
+ regData &= ~(ETH_PORT_ENABLE_MASK);
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNum), regData);
+
+ /* Wait about 2500 tclk cycles */
+ mvDelay = (PORT_DISABLE_WAIT_TCLOCKS*(mvCpuPclkGet()/mvBoardTclkGet()));
+ for(mvDelay; mvDelay>0; mvDelay--);
+
+ pPortCtrl->portState = MV_IDLE;
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthPortForceTxDone - Get next buffer from TX queue in spite of buffer ownership.
+*
+* DESCRIPTION:
+* This routine used to free buffers attached to the Tx ring and should
+* be called only when Giga Ethernet port is Down
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet Port handler.
+* int txQueue - Number of TX queue.
+*
+* OUTPUT:
+* MV_PKT_INFO *pPktInfo - Pointer to packet was sent.
+*
+* RETURN:
+* MV_EMPTY - There is no more buffers in this queue.
+* MV_OK - Buffer detached from the queue and pPktInfo structure
+* filled with relevant information.
+*
+*******************************************************************************/
+MV_PKT_INFO* mvEthPortForceTxDone(void* pEthPortHndl, int txQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ MV_PKT_INFO* pPktInfo;
+ ETH_TX_DESC* pTxDesc;
+ int port = pPortCtrl->portNo;
+
+ pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+ while( (pQueueCtrl->pUsedDescr != pQueueCtrl->pCurrentDescr) ||
+ (pQueueCtrl->resource == 0) )
+ {
+ /* Free next descriptor */
+ pQueueCtrl->resource++;
+ pTxDesc = (ETH_TX_DESC*)pQueueCtrl->pUsedDescr;
+
+ /* pPktInfo is available only in descriptors which are last descriptors */
+ pPktInfo = (MV_PKT_INFO*)pTxDesc->returnInfo;
+ if (pPktInfo)
+ pPktInfo->status = pTxDesc->cmdSts;
+
+ pTxDesc->cmdSts = 0x0;
+ pTxDesc->returnInfo = 0x0;
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pTxDesc);
+
+ pQueueCtrl->pUsedDescr = TX_NEXT_DESC_PTR(pTxDesc, pQueueCtrl);
+
+ if (pPktInfo)
+ if (pPktInfo->status & ETH_TX_LAST_DESC_MASK)
+ return pPktInfo;
+ }
+ MV_REG_WRITE( ETH_TX_CUR_DESC_PTR_REG(port, txQueue),
+ (MV_U32)ethDescVirtToPhy(pQueueCtrl, pQueueCtrl->pCurrentDescr) );
+ return NULL;
+}
+
+
+
+/*******************************************************************************
+* mvEthPortForceRx - Get next buffer from RX queue in spite of buffer ownership.
+*
+* DESCRIPTION:
+* This routine used to free buffers attached to the Rx ring and should
+* be called only when Giga Ethernet port is Down
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet Port handler.
+* int rxQueue - Number of Rx queue.
+*
+* OUTPUT:
+* MV_PKT_INFO *pPktInfo - Pointer to received packet.
+*
+* RETURN:
+* MV_EMPTY - There is no more buffers in this queue.
+* MV_OK - Buffer detached from the queue and pBufInfo structure
+* filled with relevant information.
+*
+*******************************************************************************/
+MV_PKT_INFO* mvEthPortForceRx(void* pEthPortHndl, int rxQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ ETH_RX_DESC* pRxDesc;
+ MV_PKT_INFO* pPktInfo;
+ int port = pPortCtrl->portNo;
+
+ pQueueCtrl = &pPortCtrl->rxQueue[rxQueue];
+
+ if(pQueueCtrl->resource == 0)
+ {
+ MV_REG_WRITE( ETH_RX_CUR_DESC_PTR_REG(port, rxQueue),
+ (MV_U32)ethDescVirtToPhy(pQueueCtrl, pQueueCtrl->pCurrentDescr) );
+
+ return NULL;
+ }
+ /* Free next descriptor */
+ pQueueCtrl->resource--;
+ pRxDesc = (ETH_RX_DESC*)pQueueCtrl->pCurrentDescr;
+ pPktInfo = (MV_PKT_INFO*)pRxDesc->returnInfo;
+
+ pPktInfo->status = pRxDesc->cmdSts;
+ pRxDesc->cmdSts = 0x0;
+ pRxDesc->returnInfo = 0x0;
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pRxDesc);
+
+ pQueueCtrl->pCurrentDescr = RX_NEXT_DESC_PTR(pRxDesc, pQueueCtrl);
+ return pPktInfo;
+}
+
+
+/******************************************************************************/
+/* Port Configuration functions */
+/******************************************************************************/
+/*******************************************************************************
+* mvEthMruGet - Get MRU configuration for Max Rx packet size.
+*
+* INPUT:
+* MV_U32 maxRxPktSize - max packet size.
+*
+* RETURN: MV_U32 - MRU configuration.
+*
+*******************************************************************************/
+static MV_U32 mvEthMruGet(MV_U32 maxRxPktSize)
+{
+ MV_U32 portSerialCtrlReg = 0;
+
+ if(maxRxPktSize > 9192)
+ portSerialCtrlReg |= ETH_MAX_RX_PACKET_9700BYTE;
+ else if(maxRxPktSize > 9022)
+ portSerialCtrlReg |= ETH_MAX_RX_PACKET_9192BYTE;
+ else if(maxRxPktSize > 1552)
+ portSerialCtrlReg |= ETH_MAX_RX_PACKET_9022BYTE;
+ else if(maxRxPktSize > 1522)
+ portSerialCtrlReg |= ETH_MAX_RX_PACKET_1552BYTE;
+ else if(maxRxPktSize > 1518)
+ portSerialCtrlReg |= ETH_MAX_RX_PACKET_1522BYTE;
+ else
+ portSerialCtrlReg |= ETH_MAX_RX_PACKET_1518BYTE;
+
+ return portSerialCtrlReg;
+}
+
+/*******************************************************************************
+* mvEthRxCoalSet - Sets coalescing interrupt mechanism on RX path
+*
+* DESCRIPTION:
+* This routine sets the RX coalescing interrupt mechanism parameter.
+* This parameter is a timeout counter, that counts in 64 tClk
+* chunks, that when timeout event occurs a maskable interrupt occurs.
+* The parameter is calculated using the tCLK frequency of the
+* MV-64xxx chip, and the required number is in micro seconds.
+*
+* INPUT:
+* void* pPortHndl - Ethernet Port handler.
+* MV_U32 uSec - Number of micro seconds between
+* RX interrupts
+*
+* RETURN:
+* None.
+*
+* COMMENT:
+* 1 sec - TCLK_RATE clocks
+* 1 uSec - TCLK_RATE / 1,000,000 clocks
+*
+* Register Value for N micro seconds - ((N * ( (TCLK_RATE / 1,000,000)) / 64)
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_U32 mvEthRxCoalSet (void* pPortHndl, MV_U32 uSec)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ MV_U32 coal = ((uSec * (mvBoardTclkGet() / 1000000)) / 64);
+ MV_U32 portSdmaCfgReg;
+
+ portSdmaCfgReg = MV_REG_READ(ETH_SDMA_CONFIG_REG(pPortCtrl->portNo));
+ portSdmaCfgReg &= ~ETH_RX_INTR_COAL_ALL_MASK;
+
+ portSdmaCfgReg |= ETH_RX_INTR_COAL_MASK(coal);
+
+#if (MV_ETH_VERSION >= 2)
+ /* Set additional bit if needed ETH_RX_INTR_COAL_MSB_BIT (25) */
+ if(ETH_RX_INTR_COAL_MASK(coal) > ETH_RX_INTR_COAL_ALL_MASK)
+ portSdmaCfgReg |= ETH_RX_INTR_COAL_MSB_MASK;
+#endif /* MV_ETH_VERSION >= 2 */
+
+ MV_REG_WRITE (ETH_SDMA_CONFIG_REG(pPortCtrl->portNo), portSdmaCfgReg);
+ return coal;
+}
+
+/*******************************************************************************
+* mvEthTxCoalSet - Sets coalescing interrupt mechanism on TX path
+*
+* DESCRIPTION:
+* This routine sets the TX coalescing interrupt mechanism parameter.
+* This parameter is a timeout counter, that counts in 64 tClk
+* chunks, that when timeout event occurs a maskable interrupt
+* occurs.
+* The parameter is calculated using the tCLK frequency of the
+* MV-64xxx chip, and the required number is in micro seconds.
+*
+* INPUT:
+* void* pPortHndl - Ethernet Port handler.
+* MV_U32 uSec - Number of micro seconds between
+* RX interrupts
+*
+* RETURN:
+* None.
+*
+* COMMENT:
+* 1 sec - TCLK_RATE clocks
+* 1 uSec - TCLK_RATE / 1,000,000 clocks
+*
+* Register Value for N micro seconds - ((N * ( (TCLK_RATE / 1,000,000)) / 64)
+*
+*******************************************************************************/
+MV_U32 mvEthTxCoalSet(void* pPortHndl, MV_U32 uSec)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ MV_U32 coal = ((uSec * (mvBoardTclkGet() / 1000000)) / 64);
+ MV_U32 regVal;
+
+ regVal = MV_REG_READ(ETH_TX_FIFO_URGENT_THRESH_REG(pPortCtrl->portNo));
+ regVal &= ~ETH_TX_INTR_COAL_ALL_MASK;
+ regVal |= ETH_TX_INTR_COAL_MASK(coal);
+
+ /* Set TX Coalescing mechanism */
+ MV_REG_WRITE (ETH_TX_FIFO_URGENT_THRESH_REG(pPortCtrl->portNo), regVal);
+ return coal;
+}
+
+/*******************************************************************************
+* mvEthCoalGet - Gets RX and TX coalescing values in micro seconds
+*
+* DESCRIPTION:
+* This routine gets the RX and TX coalescing interrupt values.
+* The parameter is calculated using the tCLK frequency of the
+* MV-64xxx chip, and the returned numbers are in micro seconds.
+*
+* INPUTs:
+* void* pPortHndl - Ethernet Port handler.
+*
+* OUTPUTs:
+* MV_U32* pRxCoal - Number of micro seconds between RX interrupts
+* MV_U32* pTxCoal - Number of micro seconds between TX interrupts
+*
+* RETURN:
+* MV_STATUS MV_OK - success
+* Others - failure.
+*
+* COMMENT:
+* 1 sec - TCLK_RATE clocks
+* 1 uSec - TCLK_RATE / 1,000,000 clocks
+*
+* Register Value for N micro seconds - ((N * ( (TCLK_RATE / 1,000,000)) / 64)
+*
+*******************************************************************************/
+MV_STATUS mvEthCoalGet(void* pPortHndl, MV_U32* pRxCoal, MV_U32* pTxCoal)
+{
+ MV_U32 regVal, coal, usec;
+
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+ /* get TX Coalescing */
+ regVal = MV_REG_READ (ETH_TX_FIFO_URGENT_THRESH_REG(pPortCtrl->portNo));
+ coal = ((regVal & ETH_TX_INTR_COAL_ALL_MASK) >> ETH_TX_INTR_COAL_OFFSET);
+
+ usec = (coal * 64) / (mvBoardTclkGet() / 1000000);
+ if(pTxCoal != NULL)
+ *pTxCoal = usec;
+
+ /* Get RX Coalescing */
+ regVal = MV_REG_READ(ETH_SDMA_CONFIG_REG(pPortCtrl->portNo));
+ coal = ((regVal & ETH_RX_INTR_COAL_ALL_MASK) >> ETH_RX_INTR_COAL_OFFSET);
+
+#if (MV_ETH_VERSION >= 2)
+ if(regVal & ETH_RX_INTR_COAL_MSB_MASK)
+ {
+ /* Add MSB */
+ coal |= (ETH_RX_INTR_COAL_ALL_MASK + 1);
+ }
+#endif /* MV_ETH_VERSION >= 2 */
+
+ usec = (coal * 64) / (mvBoardTclkGet() / 1000000);
+ if(pRxCoal != NULL)
+ *pRxCoal = usec;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthMaxRxSizeSet -
+*
+* DESCRIPTION:
+* Change maximum receive size of the port. This configuration will take place
+* after next call of ethPortSetDefaults() function.
+*
+* INPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvEthMaxRxSizeSet(void* pPortHndl, int maxRxSize)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ MV_U32 portSerialCtrlReg;
+
+ if((maxRxSize < 1518) || (maxRxSize & ~ETH_RX_BUFFER_MASK))
+ return MV_BAD_PARAM;
+
+ pPortCtrl->portConfig.maxRxPktSize = maxRxSize;
+
+ portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(pPortCtrl->portNo));
+ portSerialCtrlReg &= ~ETH_MAX_RX_PACKET_SIZE_MASK;
+ portSerialCtrlReg |= mvEthMruGet(pPortCtrl->portConfig.maxRxPktSize);
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(pPortCtrl->portNo), portSerialCtrlReg);
+
+ return MV_OK;
+}
+
+
+/******************************************************************************/
+/* MAC Filtering functions */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvEthRxFilterModeSet - Configure Fitering mode of Ethernet port
+*
+* DESCRIPTION:
+* This routine used to free buffers attached to the Rx ring and should
+* be called only when Giga Ethernet port is Down
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet Port handler.
+* MV_BOOL isPromisc - Promiscous mode
+* MV_TRUE - accept all Broadcast, Multicast
+* and Unicast packets
+* MV_FALSE - accept all Broadcast,
+* specially added Multicast and
+* single Unicast packets
+*
+* RETURN: MV_STATUS MV_OK - Success, Other - Failure
+*
+*******************************************************************************/
+MV_STATUS mvEthRxFilterModeSet(void* pEthPortHndl, MV_BOOL isPromisc)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ int queue;
+ MV_U32 portCfgReg;
+
+ portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+ /* Set / Clear UPM bit in port configuration register */
+ if(isPromisc)
+ {
+ /* Accept all multicast packets to RX default queue */
+ queue = pPortCtrl->portConfig.rxDefQ;
+ portCfgReg |= ETH_UNICAST_PROMISCUOUS_MODE_MASK;
+ memset(pPortCtrl->mcastCount, 1, sizeof(pPortCtrl->mcastCount));
+ MV_REG_WRITE(ETH_MAC_ADDR_LOW_REG(pPortCtrl->portNo),0xFFFF);
+ MV_REG_WRITE(ETH_MAC_ADDR_HIGH_REG(pPortCtrl->portNo),0xFFFFFFFF);
+ }
+ else
+ {
+ /* Reject all Multicast addresses */
+ queue = -1;
+ portCfgReg &= ~ETH_UNICAST_PROMISCUOUS_MODE_MASK;
+ /* Clear all mcastCount */
+ memset(pPortCtrl->mcastCount, 0, sizeof(pPortCtrl->mcastCount));
+ }
+ MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+ /* Set Special Multicast and Other Multicast tables */
+ mvEthSetSpecialMcastTable(pPortCtrl->portNo, queue);
+ mvEthSetOtherMcastTable(pPortCtrl->portNo, queue);
+ ethSetUcastTable(pPortCtrl->portNo, queue);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthMacAddrSet - This function Set the port Unicast address.
+*
+* DESCRIPTION:
+* This function Set the port Ethernet MAC address. This address
+* will be used to send Pause frames if enabled. Packets with this
+* address will be accepted and dispatched to default RX queue
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet port handler.
+* char* pAddr - Address to be set
+*
+* RETURN: MV_STATUS
+* MV_OK - Success, Other - Faulure
+*
+*******************************************************************************/
+MV_STATUS mvEthMacAddrSet(void* pPortHndl, unsigned char *pAddr, int queue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ unsigned int macH;
+ unsigned int macL;
+
+ if(queue >= MV_ETH_RX_Q_NUM)
+ {
+ mvOsPrintf("ethDrv: RX queue #%d is out of range\n", queue);
+ return MV_BAD_PARAM;
+ }
+
+ if(queue != -1)
+ {
+ macL = (pAddr[4] << 8) | (pAddr[5]);
+ macH = (pAddr[0] << 24)| (pAddr[1] << 16) |
+ (pAddr[2] << 8) | (pAddr[3] << 0);
+
+ MV_REG_WRITE(ETH_MAC_ADDR_LOW_REG(pPortCtrl->portNo), macL);
+ MV_REG_WRITE(ETH_MAC_ADDR_HIGH_REG(pPortCtrl->portNo), macH);
+ }
+
+ /* Accept frames of this address */
+ ethSetUcastAddr(pPortCtrl->portNo, pAddr[5], queue);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthMacAddrGet - This function returns the port Unicast address.
+*
+* DESCRIPTION:
+* This function returns the port Ethernet MAC address.
+*
+* INPUT:
+* int portNo - Ethernet port number.
+* char* pAddr - Pointer where address will be written to
+*
+* RETURN: MV_STATUS
+* MV_OK - Success, Other - Faulure
+*
+*******************************************************************************/
+MV_STATUS mvEthMacAddrGet(int portNo, unsigned char *pAddr)
+{
+ unsigned int macH;
+ unsigned int macL;
+
+ if(pAddr == NULL)
+ {
+ mvOsPrintf("mvEthMacAddrGet: NULL pointer.\n");
+ return MV_BAD_PARAM;
+ }
+
+ macH = MV_REG_READ(ETH_MAC_ADDR_HIGH_REG(portNo));
+ macL = MV_REG_READ(ETH_MAC_ADDR_LOW_REG(portNo));
+ pAddr[0] = (macH >> 24) & 0xff;
+ pAddr[1] = (macH >> 16) & 0xff;
+ pAddr[2] = (macH >> 8) & 0xff;
+ pAddr[3] = macH & 0xff;
+ pAddr[4] = (macL >> 8) & 0xff;
+ pAddr[5] = macL & 0xff;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthMcastCrc8Get - Calculate CRC8 of MAC address.
+*
+* DESCRIPTION:
+*
+* INPUT:
+* MV_U8* pAddr - Address to calculate CRC-8
+*
+* RETURN: MV_U8 - CRC-8 of this MAC address
+*
+*******************************************************************************/
+MV_U8 mvEthMcastCrc8Get(MV_U8* pAddr)
+{
+ unsigned int macH;
+ unsigned int macL;
+ int macArray[48];
+ int crc[8];
+ int i;
+ unsigned char crcResult = 0;
+
+ /* Calculate CRC-8 out of the given address */
+ macH = (pAddr[0] << 8) | (pAddr[1]);
+ macL = (pAddr[2] << 24)| (pAddr[3] << 16) |
+ (pAddr[4] << 8) | (pAddr[5] << 0);
+
+ for(i=0; i<32; i++)
+ macArray[i] = (macL >> i) & 0x1;
+
+ for(i=32; i<48; i++)
+ macArray[i] = (macH >> (i - 32)) & 0x1;
+
+ crc[0] = macArray[45] ^ macArray[43] ^ macArray[40] ^ macArray[39] ^
+ macArray[35] ^ macArray[34] ^ macArray[31] ^ macArray[30] ^
+ macArray[28] ^ macArray[23] ^ macArray[21] ^ macArray[19] ^
+ macArray[18] ^ macArray[16] ^ macArray[14] ^ macArray[12] ^
+ macArray[8] ^ macArray[7] ^ macArray[6] ^ macArray[0];
+
+ crc[1] = macArray[46] ^ macArray[45] ^ macArray[44] ^ macArray[43] ^
+ macArray[41] ^ macArray[39] ^ macArray[36] ^ macArray[34] ^
+ macArray[32] ^ macArray[30] ^ macArray[29] ^ macArray[28] ^
+ macArray[24] ^ macArray[23] ^ macArray[22] ^ macArray[21] ^
+ macArray[20] ^ macArray[18] ^ macArray[17] ^ macArray[16] ^
+ macArray[15] ^ macArray[14] ^ macArray[13] ^ macArray[12] ^
+ macArray[9] ^ macArray[6] ^ macArray[1] ^ macArray[0];
+
+ crc[2] = macArray[47] ^ macArray[46] ^ macArray[44] ^ macArray[43] ^
+ macArray[42] ^ macArray[39] ^ macArray[37] ^ macArray[34] ^
+ macArray[33] ^ macArray[29] ^ macArray[28] ^ macArray[25] ^
+ macArray[24] ^ macArray[22] ^ macArray[17] ^ macArray[15] ^
+ macArray[13] ^ macArray[12] ^ macArray[10] ^ macArray[8] ^
+ macArray[6] ^ macArray[2] ^ macArray[1] ^ macArray[0];
+
+ crc[3] = macArray[47] ^ macArray[45] ^ macArray[44] ^ macArray[43] ^
+ macArray[40] ^ macArray[38] ^ macArray[35] ^ macArray[34] ^
+ macArray[30] ^ macArray[29] ^ macArray[26] ^ macArray[25] ^
+ macArray[23] ^ macArray[18] ^ macArray[16] ^ macArray[14] ^
+ macArray[13] ^ macArray[11] ^ macArray[9] ^ macArray[7] ^
+ macArray[3] ^ macArray[2] ^ macArray[1];
+
+ crc[4] = macArray[46] ^ macArray[45] ^ macArray[44] ^ macArray[41] ^
+ macArray[39] ^ macArray[36] ^ macArray[35] ^ macArray[31] ^
+ macArray[30] ^ macArray[27] ^ macArray[26] ^ macArray[24] ^
+ macArray[19] ^ macArray[17] ^ macArray[15] ^ macArray[14] ^
+ macArray[12] ^ macArray[10] ^ macArray[8] ^ macArray[4] ^
+ macArray[3] ^ macArray[2];
+
+ crc[5] = macArray[47] ^ macArray[46] ^ macArray[45] ^ macArray[42] ^
+ macArray[40] ^ macArray[37] ^ macArray[36] ^ macArray[32] ^
+ macArray[31] ^ macArray[28] ^ macArray[27] ^ macArray[25] ^
+ macArray[20] ^ macArray[18] ^ macArray[16] ^ macArray[15] ^
+ macArray[13] ^ macArray[11] ^ macArray[9] ^ macArray[5] ^
+ macArray[4] ^ macArray[3];
+
+ crc[6] = macArray[47] ^ macArray[46] ^ macArray[43] ^ macArray[41] ^
+ macArray[38] ^ macArray[37] ^ macArray[33] ^ macArray[32] ^
+ macArray[29] ^ macArray[28] ^ macArray[26] ^ macArray[21] ^
+ macArray[19] ^ macArray[17] ^ macArray[16] ^ macArray[14] ^
+ macArray[12] ^ macArray[10] ^ macArray[6] ^ macArray[5] ^
+ macArray[4];
+
+ crc[7] = macArray[47] ^ macArray[44] ^ macArray[42] ^ macArray[39] ^
+ macArray[38] ^ macArray[34] ^ macArray[33] ^ macArray[30] ^
+ macArray[29] ^ macArray[27] ^ macArray[22] ^ macArray[20] ^
+ macArray[18] ^ macArray[17] ^ macArray[15] ^ macArray[13] ^
+ macArray[11] ^ macArray[7] ^ macArray[6] ^ macArray[5];
+
+ for(i=0; i<8; i++)
+ crcResult = crcResult | (crc[i] << i);
+
+ return crcResult;
+}
+/*******************************************************************************
+* mvEthMcastAddrSet - Multicast address settings.
+*
+* DESCRIPTION:
+* This API controls the MV device MAC multicast support.
+* The MV device supports multicast using two tables:
+* 1) Special Multicast Table for MAC addresses of the form
+* 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+* The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+* Table entries in the DA-Filter table.
+* In this case, the function calls ethPortSmcAddr() routine to set the
+* Special Multicast Table.
+* 2) Other Multicast Table for multicast of another type. A CRC-8bit
+* is used as an index to the Other Multicast Table entries in the
+* DA-Filter table.
+* In this case, the function calculates the CRC-8bit value and calls
+* ethPortOmcAddr() routine to set the Other Multicast Table.
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet port handler.
+* MV_U8* pAddr - Address to be set
+* int queue - RX queue to capture all packets with this
+* Multicast MAC address.
+* -1 means delete this Multicast address.
+*
+* RETURN: MV_STATUS
+* MV_TRUE - Success, Other - Failure
+*
+*******************************************************************************/
+MV_STATUS mvEthMcastAddrSet(void* pPortHndl, MV_U8 *pAddr, int queue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ unsigned char crcResult = 0;
+
+ if(queue >= MV_ETH_RX_Q_NUM)
+ {
+ mvOsPrintf("ethPort %d: RX queue #%d is out of range\n",
+ pPortCtrl->portNo, queue);
+ return MV_BAD_PARAM;
+ }
+
+ if((pAddr[0] == 0x01) &&
+ (pAddr[1] == 0x00) &&
+ (pAddr[2] == 0x5E) &&
+ (pAddr[3] == 0x00) &&
+ (pAddr[4] == 0x00))
+ {
+ ethSetSpecialMcastAddr(pPortCtrl->portNo, pAddr[5], queue);
+ }
+ else
+ {
+ crcResult = mvEthMcastCrc8Get(pAddr);
+
+ /* Check Add counter for this CRC value */
+ if(queue == -1)
+ {
+ if(pPortCtrl->mcastCount[crcResult] == 0)
+ {
+ mvOsPrintf("ethPort #%d: No valid Mcast for crc8=0x%02x\n",
+ pPortCtrl->portNo, (unsigned)crcResult);
+ return MV_NO_SUCH;
+ }
+
+ pPortCtrl->mcastCount[crcResult]--;
+ if(pPortCtrl->mcastCount[crcResult] != 0)
+ {
+ mvOsPrintf("ethPort #%d: After delete there are %d valid Mcast for crc8=0x%02x\n",
+ pPortCtrl->portNo, pPortCtrl->mcastCount[crcResult],
+ (unsigned)crcResult);
+ return MV_NO_CHANGE;
+ }
+ }
+ else
+ {
+ pPortCtrl->mcastCount[crcResult]++;
+ if(pPortCtrl->mcastCount[crcResult] > 1)
+ {
+ mvOsPrintf("ethPort #%d: Valid Mcast for crc8=0x%02x already exists\n",
+ pPortCtrl->portNo, (unsigned)crcResult);
+ return MV_NO_CHANGE;
+ }
+ }
+ ethSetOtherMcastAddr(pPortCtrl->portNo, crcResult, queue);
+ }
+ return MV_OK;
+}
+
+/*******************************************************************************
+* ethSetUcastTable - Unicast address settings.
+*
+* DESCRIPTION:
+* Set all entries in the Unicast MAC Table queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+static void ethSetUcastTable(int portNo, int queue)
+{
+ int offset;
+ MV_U32 regValue;
+
+ if(queue == -1)
+ {
+ regValue = 0;
+ }
+ else
+ {
+ regValue = (((0x01 | (queue<<1)) << 0) |
+ ((0x01 | (queue<<1)) << 8) |
+ ((0x01 | (queue<<1)) << 16) |
+ ((0x01 | (queue<<1)) << 24));
+ }
+
+ for (offset=0; offset<=0xC; offset+=4)
+ MV_REG_WRITE((ETH_DA_FILTER_UCAST_BASE(portNo) + offset), regValue);
+}
+
+/*******************************************************************************
+* mvEthSetSpecialMcastTable - Special Multicast address settings.
+*
+* DESCRIPTION:
+* Set all entries to the Special Multicast MAC Table. queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvEthSetSpecialMcastTable(int portNo, int queue)
+{
+ int offset;
+ MV_U32 regValue;
+
+ if(queue == -1)
+ {
+ regValue = 0;
+ }
+ else
+ {
+ regValue = (((0x01 | (queue<<1)) << 0) |
+ ((0x01 | (queue<<1)) << 8) |
+ ((0x01 | (queue<<1)) << 16) |
+ ((0x01 | (queue<<1)) << 24));
+ }
+
+ for (offset=0; offset<=0xFC; offset+=4)
+ {
+ MV_REG_WRITE((ETH_DA_FILTER_SPEC_MCAST_BASE(portNo) +
+ offset), regValue);
+ }
+}
+
+/*******************************************************************************
+* mvEthSetOtherMcastTable - Other Multicast address settings.
+*
+* DESCRIPTION:
+* Set all entries to the Other Multicast MAC Table. queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvEthSetOtherMcastTable(int portNo, int queue)
+{
+ int offset;
+ MV_U32 regValue;
+
+ if(queue == -1)
+ {
+ regValue = 0;
+ }
+ else
+ {
+ regValue = (((0x01 | (queue<<1)) << 0) |
+ ((0x01 | (queue<<1)) << 8) |
+ ((0x01 | (queue<<1)) << 16) |
+ ((0x01 | (queue<<1)) << 24));
+ }
+
+ for (offset=0; offset<=0xFC; offset+=4)
+ {
+ MV_REG_WRITE((ETH_DA_FILTER_OTH_MCAST_BASE(portNo) +
+ offset), regValue);
+ }
+}
+
+/*******************************************************************************
+* ethSetUcastAddr - This function Set the port unicast address table
+*
+* DESCRIPTION:
+* This function locates the proper entry in the Unicast table for the
+* specified MAC nibble and sets its properties according to function
+* parameters.
+*
+* INPUT:
+* int ethPortNum - Port number.
+* MV_U8 lastNibble - Unicast MAC Address last nibble.
+* int queue - Rx queue number for this MAC address.
+* value "-1" means remove address
+*
+* OUTPUT:
+* This function add/removes MAC addresses from the port unicast address
+* table.
+*
+* RETURN:
+* MV_TRUE is output succeeded.
+* MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL ethSetUcastAddr(int portNo, MV_U8 lastNibble, int queue)
+{
+ unsigned int unicastReg;
+ unsigned int tblOffset;
+ unsigned int regOffset;
+
+ /* Locate the Unicast table entry */
+ lastNibble = (0xf & lastNibble);
+ tblOffset = (lastNibble / 4) * 4; /* Register offset from unicast table base*/
+ regOffset = lastNibble % 4; /* Entry offset within the above register */
+
+
+ unicastReg = MV_REG_READ( (ETH_DA_FILTER_UCAST_BASE(portNo) +
+ tblOffset));
+
+
+ if(queue == -1)
+ {
+ /* Clear accepts frame bit at specified unicast DA table entry */
+ unicastReg &= ~(0xFF << (8*regOffset));
+ }
+ else
+ {
+ unicastReg &= ~(0xFF << (8*regOffset));
+ unicastReg |= ((0x01 | (queue<<1)) << (8*regOffset));
+ }
+ MV_REG_WRITE( (ETH_DA_FILTER_UCAST_BASE(portNo) + tblOffset),
+ unicastReg);
+
+ return MV_TRUE;
+}
+
+/*******************************************************************************
+* ethSetSpecialMcastAddr - Special Multicast address settings.
+*
+* DESCRIPTION:
+* This routine controls the MV device special MAC multicast support.
+* The Special Multicast Table for MAC addresses supports MAC of the form
+* 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+* The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+* Table entries in the DA-Filter table.
+* This function set the Special Multicast Table appropriate entry
+* according to the argument given.
+*
+* INPUT:
+* int ethPortNum Port number.
+* unsigned char mcByte Multicast addr last byte (MAC DA[7:0] bits).
+* int queue Rx queue number for this MAC address.
+* int option 0 = Add, 1 = remove address.
+*
+* OUTPUT:
+* See description.
+*
+* RETURN:
+* MV_TRUE is output succeeded.
+* MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL ethSetSpecialMcastAddr(int ethPortNum, MV_U8 lastByte, int queue)
+{
+ unsigned int smcTableReg;
+ unsigned int tblOffset;
+ unsigned int regOffset;
+
+ /* Locate the SMC table entry */
+ tblOffset = (lastByte / 4); /* Register offset from SMC table base */
+ regOffset = lastByte % 4; /* Entry offset within the above register */
+
+ smcTableReg = MV_REG_READ((ETH_DA_FILTER_SPEC_MCAST_BASE(ethPortNum) + tblOffset*4));
+
+ if(queue == -1)
+ {
+ /* Clear accepts frame bit at specified Special DA table entry */
+ smcTableReg &= ~(0xFF << (8 * regOffset));
+ }
+ else
+ {
+ smcTableReg &= ~(0xFF << (8 * regOffset));
+ smcTableReg |= ((0x01 | (queue<<1)) << (8 * regOffset));
+ }
+ MV_REG_WRITE((ETH_DA_FILTER_SPEC_MCAST_BASE(ethPortNum) +
+ tblOffset*4), smcTableReg);
+
+ return MV_TRUE;
+}
+
+/*******************************************************************************
+* ethSetOtherMcastAddr - Multicast address settings.
+*
+* DESCRIPTION:
+* This routine controls the MV device Other MAC multicast support.
+* The Other Multicast Table is used for multicast of another type.
+* A CRC-8bit is used as an index to the Other Multicast Table entries
+* in the DA-Filter table.
+* The function gets the CRC-8bit value from the calling routine and
+* set the Other Multicast Table appropriate entry according to the
+* CRC-8 argument given.
+*
+* INPUT:
+* int ethPortNum Port number.
+* MV_U8 crc8 A CRC-8bit (Polynomial: x^8+x^2+x^1+1).
+* int queue Rx queue number for this MAC address.
+*
+* OUTPUT:
+* See description.
+*
+* RETURN:
+* MV_TRUE is output succeeded.
+* MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL ethSetOtherMcastAddr(int ethPortNum, MV_U8 crc8, int queue)
+{
+ unsigned int omcTableReg;
+ unsigned int tblOffset;
+ unsigned int regOffset;
+
+ /* Locate the OMC table entry */
+ tblOffset = (crc8 / 4) * 4; /* Register offset from OMC table base */
+ regOffset = crc8 % 4; /* Entry offset within the above register */
+
+ omcTableReg = MV_REG_READ(
+ (ETH_DA_FILTER_OTH_MCAST_BASE(ethPortNum) + tblOffset));
+
+ if(queue == -1)
+ {
+ /* Clear accepts frame bit at specified Other DA table entry */
+ omcTableReg &= ~(0xFF << (8 * regOffset));
+ }
+ else
+ {
+ omcTableReg &= ~(0xFF << (8 * regOffset));
+ omcTableReg |= ((0x01 | (queue<<1)) << (8 * regOffset));
+ }
+
+ MV_REG_WRITE((ETH_DA_FILTER_OTH_MCAST_BASE(ethPortNum) + tblOffset),
+ omcTableReg);
+
+ return MV_TRUE;
+}
+
+
+/******************************************************************************/
+/* MIB Counters functions */
+/******************************************************************************/
+
+
+/*******************************************************************************
+* mvEthMibCounterRead - Read a MIB counter
+*
+* DESCRIPTION:
+* This function reads a MIB counter of a specific ethernet port.
+* NOTE - Read from ETH_MIB_GOOD_OCTETS_RECEIVED_LOW or
+* ETH_MIB_GOOD_OCTETS_SENT_LOW counters will return 64 bits value,
+* so pHigh32 pointer should not be NULL in this case.
+*
+* INPUT:
+* int ethPortNum - Ethernet Port number.
+* unsigned int mibOffset - MIB counter offset.
+*
+* OUTPUT:
+* MV_U32* pHigh32 - pointer to place where 32 most significant bits
+* of the counter will be stored.
+*
+* RETURN:
+* 32 low sgnificant bits of MIB counter value.
+*
+*******************************************************************************/
+MV_U32 mvEthMibCounterRead(void* pPortHandle, unsigned int mibOffset,
+ MV_U32* pHigh32)
+{
+ int portNo;
+ MV_U32 valLow32, valHigh32;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+
+ portNo = pPortCtrl->portNo;
+
+ valLow32 = MV_REG_READ(ETH_MIB_COUNTERS_BASE(portNo) + mibOffset);
+
+ /* Implement FEr ETH. Erroneous Value when Reading the Upper 32-bits */
+ /* of a 64-bit MIB Counter. */
+ if( (mibOffset == ETH_MIB_GOOD_OCTETS_RECEIVED_LOW) ||
+ (mibOffset == ETH_MIB_GOOD_OCTETS_SENT_LOW) )
+ {
+ valHigh32 = MV_REG_READ(ETH_MIB_COUNTERS_BASE(portNo) + mibOffset + 4);
+ if(pHigh32 != NULL)
+ *pHigh32 = valHigh32;
+ }
+ return valLow32;
+}
+
+/*******************************************************************************
+* mvEthMibCountersClear - Clear all MIB counters
+*
+* DESCRIPTION:
+* This function clears all MIB counters
+*
+* INPUT:
+* int ethPortNum - Ethernet Port number.
+*
+*
+* RETURN: void
+*
+*******************************************************************************/
+void mvEthMibCountersClear(void* pPortHandle)
+{
+ int i, portNo;
+ unsigned int dummy;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+
+ portNo = pPortCtrl->portNo;
+
+ /* Perform dummy reads from MIB counters */
+ for(i=ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i<ETH_MIB_LATE_COLLISION; i+=4)
+ dummy = MV_REG_READ((ETH_MIB_COUNTERS_BASE(portNo) + i));
+}
+
+
+/******************************************************************************/
+/* RX Dispatching configuration routines */
+/******************************************************************************/
+
+int mvEthTosToRxqGet(void* pPortHandle, int tos)
+{
+ MV_U32 regValue;
+ int regIdx, regOffs, rxq;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+
+ if(tos > 0xFF)
+ {
+ mvOsPrintf("eth_%d: tos=0x%x is out of range\n", pPortCtrl->portNo, tos);
+ return -1;
+ }
+ regIdx = mvOsDivide(tos>>2, 10);
+ regOffs = mvOsReminder(tos>>2, 10);
+
+ regValue = MV_REG_READ(ETH_DIFF_SERV_PRIO_REG(pPortCtrl->portNo, regIdx) );
+ rxq = (regValue >> (regOffs*3));
+ rxq &= 0x7;
+
+ return rxq;
+}
+
+/*******************************************************************************
+* mvEthTosToRxqSet - Map packets with special TOS value to special RX queue
+*
+* DESCRIPTION:
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* int tos - TOS value in the IP header of the packet
+* int rxq - RX Queue for packets with the configured TOS value
+* Negative value (-1) means no special processing for these packets,
+* so they will be processed as regular packets.
+*
+* RETURN: MV_STATUS
+*******************************************************************************/
+MV_STATUS mvEthTosToRxqSet(void* pPortHandle, int tos, int rxq)
+{
+ MV_U32 regValue;
+ int regIdx, regOffs;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+
+ if( (rxq < 0) || (rxq >= MV_ETH_RX_Q_NUM) )
+ {
+ mvOsPrintf("eth_%d: RX queue #%d is out of range\n", pPortCtrl->portNo, rxq);
+ return MV_BAD_PARAM;
+ }
+ if(tos > 0xFF)
+ {
+ mvOsPrintf("eth_%d: tos=0x%x is out of range\n", pPortCtrl->portNo, tos);
+ return MV_BAD_PARAM;
+ }
+ regIdx = mvOsDivide(tos>>2, 10);
+ regOffs = mvOsReminder(tos>>2, 10);
+
+ regValue = MV_REG_READ(ETH_DIFF_SERV_PRIO_REG(pPortCtrl->portNo, regIdx) );
+ regValue &= ~(0x7 << (regOffs*3));
+ regValue |= (rxq << (regOffs*3));
+
+ MV_REG_WRITE(ETH_DIFF_SERV_PRIO_REG(pPortCtrl->portNo, regIdx), regValue);
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthVlanPrioRxQueue - Configure RX queue to capture VLAN tagged packets with
+* special priority bits [0-2]
+*
+* DESCRIPTION:
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* int bpduQueue - Special queue to capture VLAN tagged packets with special
+* priority.
+* Negative value (-1) means no special processing for these packets,
+* so they will be processed as regular packets.
+*
+* RETURN: MV_STATUS
+* MV_OK - Success
+* MV_FAIL - Failed.
+*
+*******************************************************************************/
+MV_STATUS mvEthVlanPrioRxQueue(void* pPortHandle, int vlanPrio, int vlanPrioQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ MV_U32 vlanPrioReg;
+
+ if(vlanPrioQueue >= MV_ETH_RX_Q_NUM)
+ {
+ mvOsPrintf("ethDrv: RX queue #%d is out of range\n", vlanPrioQueue);
+ return MV_BAD_PARAM;
+ }
+ if(vlanPrio >= 8)
+ {
+ mvOsPrintf("ethDrv: vlanPrio=%d is out of range\n", vlanPrio);
+ return MV_BAD_PARAM;
+ }
+
+ vlanPrioReg = MV_REG_READ(ETH_VLAN_TAG_TO_PRIO_REG(pPortCtrl->portNo));
+ vlanPrioReg &= ~(0x7 << (vlanPrio*3));
+ vlanPrioReg |= (vlanPrioQueue << (vlanPrio*3));
+ MV_REG_WRITE(ETH_VLAN_TAG_TO_PRIO_REG(pPortCtrl->portNo), vlanPrioReg);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthBpduRxQueue - Configure RX queue to capture BPDU packets.
+*
+* DESCRIPTION:
+* This function defines processing of BPDU packets.
+* BPDU packets can be accepted and captured to one of RX queues
+* or can be processing as regular Multicast packets.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* int bpduQueue - Special queue to capture BPDU packets (DA is equal to
+* 01-80-C2-00-00-00 through 01-80-C2-00-00-FF,
+* except for the Flow-Control Pause packets).
+* Negative value (-1) means no special processing for BPDU,
+* packets so they will be processed as regular Multicast packets.
+*
+* RETURN: MV_STATUS
+* MV_OK - Success
+* MV_FAIL - Failed.
+*
+*******************************************************************************/
+MV_STATUS mvEthBpduRxQueue(void* pPortHandle, int bpduQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ MV_U32 portCfgReg;
+ MV_U32 portCfgExtReg;
+
+ if(bpduQueue >= MV_ETH_RX_Q_NUM)
+ {
+ mvOsPrintf("ethDrv: RX queue #%d is out of range\n", bpduQueue);
+ return MV_BAD_PARAM;
+ }
+
+ portCfgExtReg = MV_REG_READ(ETH_PORT_CONFIG_EXTEND_REG(pPortCtrl->portNo));
+
+ portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+ if(bpduQueue >= 0)
+ {
+ pPortCtrl->portConfig.rxBpduQ = bpduQueue;
+
+ portCfgReg &= ~ETH_DEF_RX_BPDU_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_BPDU_QUEUE_MASK(pPortCtrl->portConfig.rxBpduQ);
+
+ MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+ portCfgExtReg |= ETH_CAPTURE_SPAN_BPDU_ENABLE_MASK;
+ }
+ else
+ {
+ pPortCtrl->portConfig.rxBpduQ = -1;
+ /* no special processing for BPDU packets */
+ portCfgExtReg &= (~ETH_CAPTURE_SPAN_BPDU_ENABLE_MASK);
+ }
+
+ MV_REG_WRITE(ETH_PORT_CONFIG_EXTEND_REG(pPortCtrl->portNo), portCfgExtReg);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthArpRxQueue - Configure RX queue to capture ARP packets.
+*
+* DESCRIPTION:
+* This function defines processing of ARP (type=0x0806) packets.
+* ARP packets can be accepted and captured to one of RX queues
+* or can be processed as other Broadcast packets.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* int arpQueue - Special queue to capture ARP packets (type=0x806).
+* Negative value (-1) means discard ARP packets
+*
+* RETURN: MV_STATUS
+* MV_OK - Success
+* MV_FAIL - Failed.
+*
+*******************************************************************************/
+MV_STATUS mvEthArpRxQueue(void* pPortHandle, int arpQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ MV_U32 portCfgReg;
+
+ if(arpQueue >= MV_ETH_RX_Q_NUM)
+ {
+ mvOsPrintf("ethDrv: RX queue #%d is out of range\n", arpQueue);
+ return MV_BAD_PARAM;
+ }
+
+ portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+
+ if(arpQueue >= 0)
+ {
+ pPortCtrl->portConfig.rxArpQ = arpQueue;
+ portCfgReg &= ~ETH_DEF_RX_ARP_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_ARP_QUEUE_MASK(pPortCtrl->portConfig.rxArpQ);
+
+ portCfgReg &= (~ETH_REJECT_ARP_BCAST_MASK);
+ }
+ else
+ {
+ pPortCtrl->portConfig.rxArpQ = -1;
+ portCfgReg |= ETH_REJECT_ARP_BCAST_MASK;
+ }
+
+ MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthTcpRxQueue - Configure RX queue to capture TCP packets.
+*
+* DESCRIPTION:
+* This function defines processing of TCP packets.
+* TCP packets can be accepted and captured to one of RX queues
+* or can be processed as regular Unicast packets.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* int tcpQueue - Special queue to capture TCP packets. Value "-1"
+* means no special processing for TCP packets,
+* so they will be processed as regular
+*
+* RETURN: MV_STATUS
+* MV_OK - Success
+* MV_FAIL - Failed.
+*
+*******************************************************************************/
+MV_STATUS mvEthTcpRxQueue(void* pPortHandle, int tcpQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ MV_U32 portCfgReg;
+
+ if(tcpQueue >= MV_ETH_RX_Q_NUM)
+ {
+ mvOsPrintf("ethDrv: RX queue #%d is out of range\n", tcpQueue);
+ return MV_BAD_PARAM;
+ }
+ portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+
+ if(tcpQueue >= 0)
+ {
+ pPortCtrl->portConfig.rxTcpQ = tcpQueue;
+ portCfgReg &= ~ETH_DEF_RX_TCP_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_TCP_QUEUE_MASK(pPortCtrl->portConfig.rxTcpQ);
+
+ portCfgReg |= ETH_CAPTURE_TCP_FRAMES_ENABLE_MASK;
+ }
+ else
+ {
+ pPortCtrl->portConfig.rxTcpQ = -1;
+ portCfgReg &= (~ETH_CAPTURE_TCP_FRAMES_ENABLE_MASK);
+ }
+
+ MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthUdpRxQueue - Configure RX queue to capture UDP packets.
+*
+* DESCRIPTION:
+* This function defines processing of UDP packets.
+* TCP packets can be accepted and captured to one of RX queues
+* or can be processed as regular Unicast packets.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* int udpQueue - Special queue to capture UDP packets. Value "-1"
+* means no special processing for UDP packets,
+* so they will be processed as regular
+*
+* RETURN: MV_STATUS
+* MV_OK - Success
+* MV_FAIL - Failed.
+*
+*******************************************************************************/
+MV_STATUS mvEthUdpRxQueue(void* pPortHandle, int udpQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ MV_U32 portCfgReg;
+
+ if(udpQueue >= MV_ETH_RX_Q_NUM)
+ {
+ mvOsPrintf("ethDrv: RX queue #%d is out of range\n", udpQueue);
+ return MV_BAD_PARAM;
+ }
+
+ portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+
+ if(udpQueue >= 0)
+ {
+ pPortCtrl->portConfig.rxUdpQ = udpQueue;
+ portCfgReg &= ~ETH_DEF_RX_UDP_QUEUE_ALL_MASK;
+ portCfgReg |= ETH_DEF_RX_UDP_QUEUE_MASK(pPortCtrl->portConfig.rxUdpQ);
+
+ portCfgReg |= ETH_CAPTURE_UDP_FRAMES_ENABLE_MASK;
+ }
+ else
+ {
+ pPortCtrl->portConfig.rxUdpQ = -1;
+ portCfgReg &= ~ETH_CAPTURE_UDP_FRAMES_ENABLE_MASK;
+ }
+
+ MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+ return MV_OK;
+}
+
+
+/******************************************************************************/
+/* Speed, Duplex, FlowControl routines */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvEthSpeedDuplexSet - Set Speed and Duplex of the port.
+*
+* DESCRIPTION:
+* This function configure the port to work with desirable Duplex and Speed.
+* Changing of these parameters are allowed only when port is disabled.
+* This function disable the port if was enabled, change duplex and speed
+* and, enable the port back if needed.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* ETH_PORT_SPEED speed - Speed of the port.
+* ETH_PORT_SPEED duplex - Duplex of the port.
+*
+* RETURN: MV_STATUS
+* MV_OK - Success
+* MV_OUT_OF_RANGE - Failed. Port is out of valid range
+* MV_NOT_FOUND - Failed. Port is not initialized.
+* MV_BAD_PARAM - Input parameters (speed/duplex) in conflict.
+* MV_BAD_VALUE - Value of one of input parameters (speed, duplex)
+* is not valid
+*
+*******************************************************************************/
+MV_STATUS mvEthSpeedDuplexSet(void* pPortHandle, MV_ETH_PORT_SPEED speed,
+ MV_ETH_PORT_DUPLEX duplex)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ int port = pPortCtrl->portNo;
+ MV_U32 portSerialCtrlReg;
+
+ if( (port < 0) || (port >= (int)mvCtrlEthMaxPortGet()) )
+ return MV_OUT_OF_RANGE;
+
+ pPortCtrl = ethPortCtrl[port];
+ if(pPortCtrl == NULL)
+ return MV_NOT_FOUND;
+
+ /* Check validity */
+ if( (speed == MV_ETH_SPEED_1000) && (duplex == MV_ETH_DUPLEX_HALF) )
+ return MV_BAD_PARAM;
+
+ portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+ /* Set Speed */
+ switch(speed)
+ {
+ case MV_ETH_SPEED_AN:
+ portSerialCtrlReg &= ~ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+ break;
+
+ case MV_ETH_SPEED_10:
+ portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+ portSerialCtrlReg &= ~ETH_SET_GMII_SPEED_1000_MASK;
+ portSerialCtrlReg &= ~ETH_SET_MII_SPEED_100_MASK;
+ break;
+
+ case MV_ETH_SPEED_100:
+ portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+ portSerialCtrlReg &= ~ETH_SET_GMII_SPEED_1000_MASK;
+ portSerialCtrlReg |= ETH_SET_MII_SPEED_100_MASK;
+ break;
+
+ case MV_ETH_SPEED_1000:
+ portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+ portSerialCtrlReg |= ETH_SET_GMII_SPEED_1000_MASK;
+ break;
+
+ default:
+ mvOsPrintf("ethDrv: Unexpected Speed value %d\n", speed);
+ return MV_BAD_VALUE;
+ }
+ /* Set duplex */
+ switch(duplex)
+ {
+ case MV_ETH_DUPLEX_AN:
+ portSerialCtrlReg &= ~ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+ break;
+
+ case MV_ETH_DUPLEX_HALF:
+ portSerialCtrlReg |= ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+ portSerialCtrlReg &= ~ETH_SET_FULL_DUPLEX_MASK;
+ break;
+
+ case MV_ETH_DUPLEX_FULL:
+ portSerialCtrlReg |= ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+ portSerialCtrlReg |= ETH_SET_FULL_DUPLEX_MASK;
+ break;
+
+ default:
+ mvOsPrintf("ethDrv: Unexpected Duplex value %d\n", duplex);
+ return MV_BAD_VALUE;
+ }
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(port), portSerialCtrlReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthFlowCtrlSet - Set Flow Control of the port.
+*
+* DESCRIPTION:
+* This function configure the port to work with desirable Duplex and
+* Speed. Changing of these parameters are allowed only when port is
+* disabled. This function disable the port if was enabled, change
+* duplex and speed and, enable the port back if needed.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* MV_ETH_PORT_FC flowControl - Flow control of the port.
+*
+* RETURN: MV_STATUS
+* MV_OK - Success
+* MV_OUT_OF_RANGE - Failed. Port is out of valid range
+* MV_NOT_FOUND - Failed. Port is not initialized.
+* MV_BAD_VALUE - Value flowControl parameters is not valid
+*
+*******************************************************************************/
+MV_STATUS mvEthFlowCtrlSet(void* pPortHandle, MV_ETH_PORT_FC flowControl)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ int port = pPortCtrl->portNo;
+ MV_U32 portSerialCtrlReg;
+
+ if( (port < 0) || (port >= (int)mvCtrlEthMaxPortGet() ) )
+ return MV_OUT_OF_RANGE;
+
+ pPortCtrl = ethPortCtrl[port];
+ if(pPortCtrl == NULL)
+ return MV_NOT_FOUND;
+
+ portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+ switch(flowControl)
+ {
+ case MV_ETH_FC_AN_ADV_DIS:
+ portSerialCtrlReg &= ~ETH_DISABLE_FC_AUTO_NEG_MASK;
+ portSerialCtrlReg &= ~ETH_ADVERTISE_SYM_FC_MASK;
+ break;
+
+ case MV_ETH_FC_AN_ADV_SYM:
+ portSerialCtrlReg &= ~ETH_DISABLE_FC_AUTO_NEG_MASK;
+ portSerialCtrlReg |= ETH_ADVERTISE_SYM_FC_MASK;
+ break;
+
+ case MV_ETH_FC_DISABLE:
+ portSerialCtrlReg |= ETH_DISABLE_FC_AUTO_NEG_MASK;
+ portSerialCtrlReg &= ~ETH_SET_FLOW_CTRL_MASK;
+ break;
+
+ case MV_ETH_FC_ENABLE:
+ portSerialCtrlReg |= ETH_DISABLE_FC_AUTO_NEG_MASK;
+ portSerialCtrlReg |= ETH_SET_FLOW_CTRL_MASK;
+ break;
+
+ default:
+ mvOsPrintf("ethDrv: Unexpected FlowControl value %d\n", flowControl);
+ return MV_BAD_VALUE;
+ }
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(port), portSerialCtrlReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthHeaderModeSet - Set port header mode.
+*
+* DESCRIPTION:
+* This function configures the port to work in Marvell-Header mode.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* MV_ETH_HEADER_MODE headerMode - The header mode to set the port in.
+*
+* RETURN: MV_STATUS
+* MV_OK - Success
+* MV_NOT_SUPPORTED- Feature not supported.
+* MV_OUT_OF_RANGE - Failed. Port is out of valid range
+* MV_NOT_FOUND - Failed. Port is not initialized.
+* MV_BAD_VALUE - Value of headerMode or numRxQueue parameter is not valid.
+*
+*******************************************************************************/
+MV_STATUS mvEthHeaderModeSet(void* pPortHandle, MV_ETH_HEADER_MODE headerMode)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ int port = pPortCtrl->portNo;
+ MV_U32 mvHeaderReg;
+ MV_U32 numRxQ = MV_ETH_RX_Q_NUM;
+
+ if((port < 0) || (port >= mvCtrlEthMaxPortGet()))
+ return MV_OUT_OF_RANGE;
+
+ pPortCtrl = ethPortCtrl[port];
+ if(pPortCtrl == NULL)
+ return MV_NOT_FOUND;
+
+ mvHeaderReg = MV_REG_READ(ETH_PORT_MARVELL_HEADER_REG(port));
+ /* Disable header mode. */
+ mvHeaderReg &= ~ETH_MVHDR_EN_MASK;
+
+ if(headerMode != MV_ETH_DISABLE_HEADER_MODE)
+ {
+ /* Enable Header mode. */
+ mvHeaderReg |= ETH_MVHDR_EN_MASK;
+
+ /* Clear DA-Prefix & MHMask fields.*/
+ mvHeaderReg &= ~(ETH_MVHDR_DAPREFIX_MASK | ETH_MVHDR_MHMASK_MASK);
+
+ if(numRxQ > 1)
+ {
+ switch (headerMode)
+ {
+ case(MV_ETH_ENABLE_HEADER_MODE_PRI_2_1):
+ mvHeaderReg |= ETH_MVHDR_DAPREFIX_PRI_1_2;
+ break;
+ case(MV_ETH_ENABLE_HEADER_MODE_PRI_DBNUM):
+ mvHeaderReg |= ETH_MVHDR_DAPREFIX_DBNUM_PRI;
+ break;
+ case(MV_ETH_ENABLE_HEADER_MODE_PRI_SPID):
+ mvHeaderReg |= ETH_MVHDR_DAPREFIX_SPID_PRI;
+ break;
+ default:
+ break;
+ }
+
+ switch (numRxQ)
+ {
+ case (4):
+ mvHeaderReg |= ETH_MVHDR_MHMASK_4_QUEUE;
+ break;
+ case (8):
+ mvHeaderReg |= ETH_MVHDR_MHMASK_8_QUEUE;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ MV_REG_WRITE(ETH_PORT_MARVELL_HEADER_REG(port), mvHeaderReg);
+
+ return MV_OK;
+}
+
+#if (MV_ETH_VERSION >= 4)
+/*******************************************************************************
+* mvEthEjpModeSet - Enable / Disable EJP policy for TX.
+*
+* DESCRIPTION:
+* This function
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* MV_BOOL TRUE - enable EJP mode
+* FALSE - disable EJP mode
+*
+* OUTPUT: MV_STATUS
+* MV_OK - Success
+* Other - Failure
+*
+* RETURN: None.
+*
+*******************************************************************************/
+MV_STATUS mvEthEjpModeSet(void* pPortHandle, int mode)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ int port = pPortCtrl->portNo;
+
+ if((port < 0) || (port >= mvCtrlEthMaxPortGet()))
+ return MV_OUT_OF_RANGE;
+
+ pPortCtrl = ethPortCtrl[port];
+ if(pPortCtrl == NULL)
+ return MV_NOT_FOUND;
+
+ pPortCtrl->portConfig.ejpMode = mode;
+ if(mode)
+ {
+ /* EJP enabled */
+ MV_REG_WRITE(ETH_TXQ_CMD_1_REG(port), ETH_TX_EJP_ENABLE_MASK);
+ }
+ else
+ {
+ /* EJP disabled */
+ MV_REG_WRITE(ETH_TXQ_CMD_1_REG(port), 0);
+ }
+ mvOsPrintf("eth_%d: EJP %s - ETH_TXQ_CMD_1_REG: 0x%x = 0x%08x\n",
+ port, mode ? "Enabled" : "Disabled", ETH_TXQ_CMD_1_REG(port),
+ MV_REG_READ(ETH_TXQ_CMD_1_REG(port)));
+
+ return MV_OK;
+}
+#endif /* MV_ETH_VERSION >= 4 */
+
+/*******************************************************************************
+* mvEthStatusGet - Get major properties of the port .
+*
+* DESCRIPTION:
+* This function get major properties of the port (link, speed, duplex,
+* flowControl, etc) and return them using the single structure.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+*
+* OUTPUT:
+* MV_ETH_PORT_STATUS* pStatus - Pointer to structure, were port status
+* will be placed.
+*
+* RETURN: None.
+*
+*******************************************************************************/
+void mvEthStatusGet(void* pPortHandle, MV_ETH_PORT_STATUS* pStatus)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ int port = pPortCtrl->portNo;
+
+ MV_U32 regValue;
+
+ regValue = MV_REG_READ( ETH_PORT_STATUS_REG(port) );
+
+ if(regValue & ETH_GMII_SPEED_1000_MASK)
+ pStatus->speed = MV_ETH_SPEED_1000;
+ else if(regValue & ETH_MII_SPEED_100_MASK)
+ pStatus->speed = MV_ETH_SPEED_100;
+ else
+ pStatus->speed = MV_ETH_SPEED_10;
+
+ if(regValue & ETH_LINK_UP_MASK)
+ pStatus->isLinkUp = MV_TRUE;
+ else
+ pStatus->isLinkUp = MV_FALSE;
+
+ if(regValue & ETH_FULL_DUPLEX_MASK)
+ pStatus->duplex = MV_ETH_DUPLEX_FULL;
+ else
+ pStatus->duplex = MV_ETH_DUPLEX_HALF;
+
+
+ if(regValue & ETH_ENABLE_RCV_FLOW_CTRL_MASK)
+ pStatus->flowControl = MV_ETH_FC_ENABLE;
+ else
+ pStatus->flowControl = MV_ETH_FC_DISABLE;
+}
+
+
+/******************************************************************************/
+/* PHY Control Functions */
+/******************************************************************************/
+
+
+/*******************************************************************************
+* mvEthPhyAddrSet - Set the ethernet port PHY address.
+*
+* DESCRIPTION:
+* This routine set the ethernet port PHY address according to given
+* parameter.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+* int phyAddr - PHY address
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+void mvEthPhyAddrSet(void* pPortHandle, int phyAddr)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ int port = pPortCtrl->portNo;
+ unsigned int regData;
+
+ regData = MV_REG_READ(ETH_PHY_ADDR_REG(port));
+
+ regData &= ~ETH_PHY_ADDR_MASK;
+ regData |= phyAddr;
+
+ MV_REG_WRITE(ETH_PHY_ADDR_REG(port), regData);
+
+ return;
+}
+
+/*******************************************************************************
+* mvEthPhyAddrGet - Get the ethernet port PHY address.
+*
+* DESCRIPTION:
+* This routine returns the given ethernet port PHY address.
+*
+* INPUT:
+* void* pPortHandle - Pointer to port specific handler;
+*
+*
+* RETURN: int - PHY address.
+*
+*******************************************************************************/
+int mvEthPhyAddrGet(void* pPortHandle)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+ int port = pPortCtrl->portNo;
+ unsigned int regData;
+
+ regData = MV_REG_READ(ETH_PHY_ADDR_REG(port));
+
+ return ((regData >> (5 * port)) & 0x1f);
+}
+
+/******************************************************************************/
+/* Descriptor handling Functions */
+/******************************************************************************/
+
+/*******************************************************************************
+* etherInitRxDescRing - Curve a Rx chain desc list and buffer in memory.
+*
+* DESCRIPTION:
+* This function prepares a Rx chained list of descriptors and packet
+* buffers in a form of a ring. The routine must be called after port
+* initialization routine and before port start routine.
+* The Ethernet SDMA engine uses CPU bus addresses to access the various
+* devices in the system (i.e. DRAM). This function uses the ethernet
+* struct 'virtual to physical' routine (set by the user) to set the ring
+* with physical addresses.
+*
+* INPUT:
+* ETH_QUEUE_CTRL *pEthPortCtrl Ethernet Port Control srtuct.
+* int rxQueue Number of Rx queue.
+* int rxDescNum Number of Rx descriptors
+* MV_U8* rxDescBaseAddr Rx descriptors memory area base addr.
+*
+* OUTPUT:
+* The routine updates the Ethernet port control struct with information
+* regarding the Rx descriptors and buffers.
+*
+* RETURN: None
+*
+*******************************************************************************/
+static void ethInitRxDescRing(ETH_PORT_CTRL* pPortCtrl, int queue)
+{
+ ETH_RX_DESC *pRxDescBase, *pRxDesc, *pRxPrevDesc;
+ int ix, rxDescNum = pPortCtrl->rxQueueConfig[queue].descrNum;
+ ETH_QUEUE_CTRL *pQueueCtrl = &pPortCtrl->rxQueue[queue];
+
+ /* Make sure descriptor address is cache line size aligned */
+ pRxDescBase = (ETH_RX_DESC*)MV_ALIGN_UP((MV_ULONG)pQueueCtrl->descBuf.bufVirtPtr,
+ CPU_D_CACHE_LINE_SIZE);
+
+ pRxDesc = (ETH_RX_DESC*)pRxDescBase;
+ pRxPrevDesc = pRxDesc;
+
+ /* initialize the Rx descriptors ring */
+ for (ix=0; ix<rxDescNum; ix++)
+ {
+ pRxDesc->bufSize = 0x0;
+ pRxDesc->byteCnt = 0x0;
+ pRxDesc->cmdSts = ETH_BUFFER_OWNED_BY_HOST;
+ pRxDesc->bufPtr = 0x0;
+ pRxDesc->returnInfo = 0x0;
+ pRxPrevDesc = pRxDesc;
+ if(ix == (rxDescNum-1))
+ {
+ /* Closing Rx descriptors ring */
+ pRxPrevDesc->nextDescPtr = (MV_U32)ethDescVirtToPhy(pQueueCtrl, (void*)pRxDescBase);
+ }
+ else
+ {
+ pRxDesc = (ETH_RX_DESC*)((MV_ULONG)pRxDesc + ETH_RX_DESC_ALIGNED_SIZE);
+ pRxPrevDesc->nextDescPtr = (MV_U32)ethDescVirtToPhy(pQueueCtrl, (void*)pRxDesc);
+ }
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pRxPrevDesc);
+ }
+
+ pQueueCtrl->pCurrentDescr = pRxDescBase;
+ pQueueCtrl->pUsedDescr = pRxDescBase;
+
+ pQueueCtrl->pFirstDescr = pRxDescBase;
+ pQueueCtrl->pLastDescr = pRxDesc;
+ pQueueCtrl->resource = 0;
+}
+
+void ethResetRxDescRing(void* pPortHndl, int queue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->rxQueue[queue];
+ ETH_RX_DESC* pRxDesc = (ETH_RX_DESC*)pQueueCtrl->pFirstDescr;
+
+ pQueueCtrl->resource = 0;
+ if(pQueueCtrl->pFirstDescr != NULL)
+ {
+ while(MV_TRUE)
+ {
+ pRxDesc->bufSize = 0x0;
+ pRxDesc->byteCnt = 0x0;
+ pRxDesc->cmdSts = ETH_BUFFER_OWNED_BY_HOST;
+ pRxDesc->bufPtr = 0x0;
+ pRxDesc->returnInfo = 0x0;
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pRxDesc);
+ if( (void*)pRxDesc == pQueueCtrl->pLastDescr)
+ break;
+ pRxDesc = RX_NEXT_DESC_PTR(pRxDesc, pQueueCtrl);
+ }
+ pQueueCtrl->pCurrentDescr = pQueueCtrl->pFirstDescr;
+ pQueueCtrl->pUsedDescr = pQueueCtrl->pFirstDescr;
+
+ /* Update RX Command register */
+ pPortCtrl->portRxQueueCmdReg |= (1 << queue);
+
+ /* update HW */
+ MV_REG_WRITE( ETH_RX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue),
+ (MV_U32)ethDescVirtToPhy(pQueueCtrl, pQueueCtrl->pCurrentDescr) );
+ }
+ else
+ {
+ /* Update RX Command register */
+ pPortCtrl->portRxQueueCmdReg &= ~(1 << queue);
+
+ /* update HW */
+ MV_REG_WRITE( ETH_RX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue), 0);
+ }
+}
+
+/*******************************************************************************
+* etherInitTxDescRing - Curve a Tx chain desc list and buffer in memory.
+*
+* DESCRIPTION:
+* This function prepares a Tx chained list of descriptors and packet
+* buffers in a form of a ring. The routine must be called after port
+* initialization routine and before port start routine.
+* The Ethernet SDMA engine uses CPU bus addresses to access the various
+* devices in the system (i.e. DRAM). This function uses the ethernet
+* struct 'virtual to physical' routine (set by the user) to set the ring
+* with physical addresses.
+*
+* INPUT:
+* ETH_PORT_CTRL *pEthPortCtrl Ethernet Port Control srtuct.
+* int txQueue Number of Tx queue.
+* int txDescNum Number of Tx descriptors
+* int txBuffSize Size of Tx buffer
+* MV_U8* pTxDescBase Tx descriptors memory area base addr.
+*
+* OUTPUT:
+* The routine updates the Ethernet port control struct with information
+* regarding the Tx descriptors and buffers.
+*
+* RETURN: None.
+*
+*******************************************************************************/
+static void ethInitTxDescRing(ETH_PORT_CTRL* pPortCtrl, int queue)
+{
+ ETH_TX_DESC *pTxDescBase, *pTxDesc, *pTxPrevDesc;
+ int ix, txDescNum = pPortCtrl->txQueueConfig[queue].descrNum;
+ ETH_QUEUE_CTRL *pQueueCtrl = &pPortCtrl->txQueue[queue];
+
+ /* Make sure descriptor address is cache line size aligned */
+ pTxDescBase = (ETH_TX_DESC*)MV_ALIGN_UP((MV_ULONG)pQueueCtrl->descBuf.bufVirtPtr,
+ CPU_D_CACHE_LINE_SIZE);
+
+ pTxDesc = (ETH_TX_DESC*)pTxDescBase;
+ pTxPrevDesc = pTxDesc;
+
+ /* initialize the Tx descriptors ring */
+ for (ix=0; ix<txDescNum; ix++)
+ {
+ pTxDesc->byteCnt = 0x0000;
+ pTxDesc->L4iChk = 0x0000;
+ pTxDesc->cmdSts = ETH_BUFFER_OWNED_BY_HOST;
+ pTxDesc->bufPtr = 0x0;
+ pTxDesc->returnInfo = 0x0;
+
+ pTxPrevDesc = pTxDesc;
+
+ if(ix == (txDescNum-1))
+ {
+ /* Closing Tx descriptors ring */
+ pTxPrevDesc->nextDescPtr = (MV_U32)ethDescVirtToPhy(pQueueCtrl, (void*)pTxDescBase);
+ }
+ else
+ {
+ pTxDesc = (ETH_TX_DESC*)((MV_ULONG)pTxDesc + ETH_TX_DESC_ALIGNED_SIZE);
+ pTxPrevDesc->nextDescPtr = (MV_U32)ethDescVirtToPhy(pQueueCtrl, (void*)pTxDesc);
+ }
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pTxPrevDesc);
+ }
+
+ pQueueCtrl->pCurrentDescr = pTxDescBase;
+ pQueueCtrl->pUsedDescr = pTxDescBase;
+
+ pQueueCtrl->pFirstDescr = pTxDescBase;
+ pQueueCtrl->pLastDescr = pTxDesc;
+ /* Leave one TX descriptor out of use */
+ pQueueCtrl->resource = txDescNum - 1;
+}
+
+void ethResetTxDescRing(void* pPortHndl, int queue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->txQueue[queue];
+ ETH_TX_DESC* pTxDesc = (ETH_TX_DESC*)pQueueCtrl->pFirstDescr;
+
+ pQueueCtrl->resource = 0;
+ if(pQueueCtrl->pFirstDescr != NULL)
+ {
+ while(MV_TRUE)
+ {
+ pTxDesc->byteCnt = 0x0000;
+ pTxDesc->L4iChk = 0x0000;
+ pTxDesc->cmdSts = ETH_BUFFER_OWNED_BY_HOST;
+ pTxDesc->bufPtr = 0x0;
+ pTxDesc->returnInfo = 0x0;
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pTxDesc);
+ pQueueCtrl->resource++;
+ if( (void*)pTxDesc == pQueueCtrl->pLastDescr)
+ break;
+ pTxDesc = TX_NEXT_DESC_PTR(pTxDesc, pQueueCtrl);
+ }
+ /* Leave one TX descriptor out of use */
+ pQueueCtrl->resource--;
+ pQueueCtrl->pCurrentDescr = pQueueCtrl->pFirstDescr;
+ pQueueCtrl->pUsedDescr = pQueueCtrl->pFirstDescr;
+
+ /* Update TX Command register */
+ pPortCtrl->portTxQueueCmdReg |= MV_32BIT_LE_FAST(1 << queue);
+ /* update HW */
+ MV_REG_WRITE( ETH_TX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue),
+ (MV_U32)ethDescVirtToPhy(pQueueCtrl, pQueueCtrl->pCurrentDescr) );
+ }
+ else
+ {
+ /* Update TX Command register */
+ pPortCtrl->portTxQueueCmdReg &= MV_32BIT_LE_FAST(~(1 << queue));
+ /* update HW */
+ MV_REG_WRITE( ETH_TX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue), 0 );
+ }
+}
+
+/*******************************************************************************
+* ethAllocDescrMemory - Free memory allocated for RX and TX descriptors.
+*
+* DESCRIPTION:
+* This function allocates memory for RX and TX descriptors.
+* - If ETH_DESCR_IN_SRAM defined, allocate memory from SRAM.
+* - If ETH_DESCR_IN_SDRAM defined, allocate memory in SDRAM.
+*
+* INPUT:
+* int size - size of memory should be allocated.
+*
+* RETURN: None
+*
+*******************************************************************************/
+static MV_U8* ethAllocDescrMemory(ETH_PORT_CTRL* pPortCtrl, int descSize,
+ MV_ULONG* pPhysAddr, MV_U32 *memHandle)
+{
+ MV_U8* pVirt;
+
+#if defined(ETH_DESCR_IN_SRAM)
+ if(ethDescInSram == MV_TRUE)
+ pVirt = (char*)mvSramMalloc(descSize, pPhysAddr);
+ else
+#endif /* ETH_DESCR_IN_SRAM */
+ {
+#ifdef ETH_DESCR_UNCACHED
+ pVirt = (MV_U8*)mvOsIoUncachedMalloc(pPortCtrl->osHandle, descSize,
+ pPhysAddr,memHandle);
+#else
+ pVirt = (MV_U8*)mvOsIoCachedMalloc(pPortCtrl->osHandle, descSize,
+ pPhysAddr, memHandle);
+#endif /* ETH_DESCR_UNCACHED */
+ }
+ memset(pVirt, 0, descSize);
+
+ return pVirt;
+}
+
+/*******************************************************************************
+* ethFreeDescrMemory - Free memory allocated for RX and TX descriptors.
+*
+* DESCRIPTION:
+* This function frees memory allocated for RX and TX descriptors.
+* - If ETH_DESCR_IN_SRAM defined, free memory using gtSramFree() function.
+* - If ETH_DESCR_IN_SDRAM defined, free memory using mvOsFree() function.
+*
+* INPUT:
+* void* pVirtAddr - virtual pointer to memory allocated for RX and TX
+* desriptors.
+*
+* RETURN: None
+*
+*******************************************************************************/
+void ethFreeDescrMemory(ETH_PORT_CTRL* pPortCtrl, MV_BUF_INFO* pDescBuf)
+{
+ if( (pDescBuf == NULL) || (pDescBuf->bufVirtPtr == NULL) )
+ return;
+
+#if defined(ETH_DESCR_IN_SRAM)
+ if( ethDescInSram )
+ {
+ mvSramFree(pDescBuf->bufSize, pDescBuf->bufPhysAddr, pDescBuf->bufVirtPtr);
+ return;
+ }
+#endif /* ETH_DESCR_IN_SRAM */
+
+#ifdef ETH_DESCR_UNCACHED
+ mvOsIoUncachedFree(pPortCtrl->osHandle, pDescBuf->bufSize, pDescBuf->bufPhysAddr,
+ pDescBuf->bufVirtPtr,pDescBuf->memHandle);
+#else
+ mvOsIoCachedFree(pPortCtrl->osHandle, pDescBuf->bufSize, pDescBuf->bufPhysAddr,
+ pDescBuf->bufVirtPtr,pDescBuf->memHandle);
+#endif /* ETH_DESCR_UNCACHED */
+}
+
+/******************************************************************************/
+/* Other Functions */
+/******************************************************************************/
+
+void mvEthPortPowerUp(int port)
+{
+ MV_U32 regVal;
+
+ /* MAC Cause register should be cleared */
+ MV_REG_WRITE(ETH_UNIT_INTR_CAUSE_REG(port), 0);
+
+ if (mvBoardIsPortInSgmii(port))
+ mvEthPortSgmiiConfig(port);
+
+ /* Cancel Port Reset */
+ regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+ regVal &= (~ETH_PORT_RESET_MASK);
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal);
+ while( (MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port)) & ETH_PORT_RESET_MASK) != 0);
+}
+
+void mvEthPortPowerDown(int port)
+{
+ MV_U32 regVal;
+
+ /* Port must be DISABLED */
+ regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+ if( (regVal & ETH_PORT_ENABLE_MASK) != 0)
+ {
+ mvOsPrintf("ethPort #%d: PowerDown - port must be Disabled (PSC=0x%x)\n",
+ port, regVal);
+ return;
+ }
+
+ /* Port Reset (Read after write the register as a precaution) */
+ regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal | ETH_PORT_RESET_MASK);
+ while((MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port)) & ETH_PORT_RESET_MASK) == 0);
+}
+
+static void mvEthPortSgmiiConfig(int port)
+{
+ MV_U32 regVal;
+
+ regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+
+ regVal |= (ETH_SGMII_MODE_MASK /*| ETH_INBAND_AUTO_NEG_ENABLE_MASK */);
+ regVal &= (~ETH_INBAND_AUTO_NEG_BYPASS_MASK);
+
+ MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal);
+}
+
+
+
+
+
+
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.c
new file mode 100644
index 000000000..f53347599
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.c
@@ -0,0 +1,748 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvEthDebug.c - Source file for user friendly debug functions
+*
+* DESCRIPTION:
+*
+* DEPENDENCIES:
+* None.
+*
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "eth-phy/mvEthPhy.h"
+#include "eth/mvEth.h"
+#include "eth/gbe/mvEthDebug.h"
+
+/* #define mvOsPrintf printf */
+
+void mvEthPortShow(void* pHndl);
+void mvEthQueuesShow(void* pHndl, int rxQueue, int txQueue, int mode);
+
+/******************************************************************************/
+/* Debug functions */
+/******************************************************************************/
+void ethRxCoal(int port, int usec)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthRxCoalSet(pHndl, usec);
+ }
+}
+
+void ethTxCoal(int port, int usec)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthTxCoalSet(pHndl, usec);
+ }
+}
+
+#if (MV_ETH_VERSION >= 4)
+void ethEjpModeSet(int port, int mode)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthEjpModeSet(pHndl, mode);
+ }
+}
+#endif /* (MV_ETH_VERSION >= 4) */
+
+void ethBpduRxQ(int port, int bpduQueue)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthBpduRxQueue(pHndl, bpduQueue);
+ }
+}
+
+void ethArpRxQ(int port, int arpQueue)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthArpRxQueue(pHndl, arpQueue);
+ }
+}
+
+void ethTcpRxQ(int port, int tcpQueue)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthTcpRxQueue(pHndl, tcpQueue);
+ }
+}
+
+void ethUdpRxQ(int port, int udpQueue)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthUdpRxQueue(pHndl, udpQueue);
+ }
+}
+
+void ethTxPolicyRegs(int port)
+{
+ int queue;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)mvEthPortHndlGet(port);
+
+ if(pPortCtrl == NULL)
+ {
+ return;
+ }
+ mvOsPrintf("Port #%d TX Policy: EJP=%d, TXQs: ",
+ port, pPortCtrl->portConfig.ejpMode);
+ for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+ {
+ if(pPortCtrl->txQueueConfig[queue].descrNum > 0)
+ mvOsPrintf("%d, ", queue);
+ }
+ mvOsPrintf("\n");
+
+ mvOsPrintf("\n\t TX policy Port #%d configuration registers\n", port);
+
+ mvOsPrintf("ETH_TX_QUEUE_COMMAND_REG : 0x%X = 0x%08x\n",
+ ETH_TX_QUEUE_COMMAND_REG(port),
+ MV_REG_READ( ETH_TX_QUEUE_COMMAND_REG(port) ) );
+
+ mvOsPrintf("ETH_TX_FIXED_PRIO_CFG_REG : 0x%X = 0x%08x\n",
+ ETH_TX_FIXED_PRIO_CFG_REG(port),
+ MV_REG_READ( ETH_TX_FIXED_PRIO_CFG_REG(port) ) );
+
+ mvOsPrintf("ETH_TX_TOKEN_RATE_CFG_REG : 0x%X = 0x%08x\n",
+ ETH_TX_TOKEN_RATE_CFG_REG(port),
+ MV_REG_READ( ETH_TX_TOKEN_RATE_CFG_REG(port) ) );
+
+ mvOsPrintf("ETH_MAX_TRANSMIT_UNIT_REG : 0x%X = 0x%08x\n",
+ ETH_MAX_TRANSMIT_UNIT_REG(port),
+ MV_REG_READ( ETH_MAX_TRANSMIT_UNIT_REG(port) ) );
+
+ mvOsPrintf("ETH_TX_TOKEN_BUCKET_SIZE_REG : 0x%X = 0x%08x\n",
+ ETH_TX_TOKEN_BUCKET_SIZE_REG(port),
+ MV_REG_READ( ETH_TX_TOKEN_BUCKET_SIZE_REG(port) ) );
+
+ mvOsPrintf("ETH_TX_TOKEN_BUCKET_COUNT_REG : 0x%X = 0x%08x\n",
+ ETH_TX_TOKEN_BUCKET_COUNT_REG(port),
+ MV_REG_READ( ETH_TX_TOKEN_BUCKET_COUNT_REG(port) ) );
+
+ for(queue=0; queue<MV_ETH_MAX_TXQ; queue++)
+ {
+ mvOsPrintf("\n\t TX policy Port #%d, Queue #%d configuration registers\n", port, queue);
+
+ mvOsPrintf("ETH_TXQ_TOKEN_COUNT_REG : 0x%X = 0x%08x\n",
+ ETH_TXQ_TOKEN_COUNT_REG(port, queue),
+ MV_REG_READ( ETH_TXQ_TOKEN_COUNT_REG(port, queue) ) );
+
+ mvOsPrintf("ETH_TXQ_TOKEN_CFG_REG : 0x%X = 0x%08x\n",
+ ETH_TXQ_TOKEN_CFG_REG(port, queue),
+ MV_REG_READ( ETH_TXQ_TOKEN_CFG_REG(port, queue) ) );
+
+ mvOsPrintf("ETH_TXQ_ARBITER_CFG_REG : 0x%X = 0x%08x\n",
+ ETH_TXQ_ARBITER_CFG_REG(port, queue),
+ MV_REG_READ( ETH_TXQ_ARBITER_CFG_REG(port, queue) ) );
+ }
+ mvOsPrintf("\n");
+}
+
+/* Print important registers of Ethernet port */
+void ethPortRegs(int port)
+{
+ mvOsPrintf("\n\t ethGiga #%d port Registers:\n", port);
+
+ mvOsPrintf("ETH_PORT_STATUS_REG : 0x%X = 0x%08x\n",
+ ETH_PORT_STATUS_REG(port),
+ MV_REG_READ( ETH_PORT_STATUS_REG(port) ) );
+
+ mvOsPrintf("ETH_PORT_SERIAL_CTRL_REG : 0x%X = 0x%08x\n",
+ ETH_PORT_SERIAL_CTRL_REG(port),
+ MV_REG_READ( ETH_PORT_SERIAL_CTRL_REG(port) ) );
+
+ mvOsPrintf("ETH_PORT_CONFIG_REG : 0x%X = 0x%08x\n",
+ ETH_PORT_CONFIG_REG(port),
+ MV_REG_READ( ETH_PORT_CONFIG_REG(port) ) );
+
+ mvOsPrintf("ETH_PORT_CONFIG_EXTEND_REG : 0x%X = 0x%08x\n",
+ ETH_PORT_CONFIG_EXTEND_REG(port),
+ MV_REG_READ( ETH_PORT_CONFIG_EXTEND_REG(port) ) );
+
+ mvOsPrintf("ETH_SDMA_CONFIG_REG : 0x%X = 0x%08x\n",
+ ETH_SDMA_CONFIG_REG(port),
+ MV_REG_READ( ETH_SDMA_CONFIG_REG(port) ) );
+
+ mvOsPrintf("ETH_TX_FIFO_URGENT_THRESH_REG : 0x%X = 0x%08x\n",
+ ETH_TX_FIFO_URGENT_THRESH_REG(port),
+ MV_REG_READ( ETH_TX_FIFO_URGENT_THRESH_REG(port) ) );
+
+ mvOsPrintf("ETH_RX_QUEUE_COMMAND_REG : 0x%X = 0x%08x\n",
+ ETH_RX_QUEUE_COMMAND_REG(port),
+ MV_REG_READ( ETH_RX_QUEUE_COMMAND_REG(port) ) );
+
+ mvOsPrintf("ETH_TX_QUEUE_COMMAND_REG : 0x%X = 0x%08x\n",
+ ETH_TX_QUEUE_COMMAND_REG(port),
+ MV_REG_READ( ETH_TX_QUEUE_COMMAND_REG(port) ) );
+
+ mvOsPrintf("ETH_INTR_CAUSE_REG : 0x%X = 0x%08x\n",
+ ETH_INTR_CAUSE_REG(port),
+ MV_REG_READ( ETH_INTR_CAUSE_REG(port) ) );
+
+ mvOsPrintf("ETH_INTR_EXTEND_CAUSE_REG : 0x%X = 0x%08x\n",
+ ETH_INTR_CAUSE_EXT_REG(port),
+ MV_REG_READ( ETH_INTR_CAUSE_EXT_REG(port) ) );
+
+ mvOsPrintf("ETH_INTR_MASK_REG : 0x%X = 0x%08x\n",
+ ETH_INTR_MASK_REG(port),
+ MV_REG_READ( ETH_INTR_MASK_REG(port) ) );
+
+ mvOsPrintf("ETH_INTR_EXTEND_MASK_REG : 0x%X = 0x%08x\n",
+ ETH_INTR_MASK_EXT_REG(port),
+ MV_REG_READ( ETH_INTR_MASK_EXT_REG(port) ) );
+
+ mvOsPrintf("ETH_RX_DESCR_STAT_CMD_REG : 0x%X = 0x%08x\n",
+ ETH_RX_DESCR_STAT_CMD_REG(port, 0),
+ MV_REG_READ( ETH_RX_DESCR_STAT_CMD_REG(port, 0) ) );
+
+ mvOsPrintf("ETH_RX_BYTE_COUNT_REG : 0x%X = 0x%08x\n",
+ ETH_RX_BYTE_COUNT_REG(port, 0),
+ MV_REG_READ( ETH_RX_BYTE_COUNT_REG(port, 0) ) );
+
+ mvOsPrintf("ETH_RX_BUF_PTR_REG : 0x%X = 0x%08x\n",
+ ETH_RX_BUF_PTR_REG(port, 0),
+ MV_REG_READ( ETH_RX_BUF_PTR_REG(port, 0) ) );
+
+ mvOsPrintf("ETH_RX_CUR_DESC_PTR_REG : 0x%X = 0x%08x\n",
+ ETH_RX_CUR_DESC_PTR_REG(port, 0),
+ MV_REG_READ( ETH_RX_CUR_DESC_PTR_REG(port, 0) ) );
+}
+
+
+/* Print Giga Ethernet UNIT registers */
+void ethRegs(int port)
+{
+ mvOsPrintf("ETH_PHY_ADDR_REG : 0x%X = 0x%08x\n",
+ ETH_PHY_ADDR_REG(port),
+ MV_REG_READ(ETH_PHY_ADDR_REG(port)) );
+
+ mvOsPrintf("ETH_UNIT_INTR_CAUSE_REG : 0x%X = 0x%08x\n",
+ ETH_UNIT_INTR_CAUSE_REG(port),
+ MV_REG_READ( ETH_UNIT_INTR_CAUSE_REG(port)) );
+
+ mvOsPrintf("ETH_UNIT_INTR_MASK_REG : 0x%X = 0x%08x\n",
+ ETH_UNIT_INTR_MASK_REG(port),
+ MV_REG_READ( ETH_UNIT_INTR_MASK_REG(port)) );
+
+ mvOsPrintf("ETH_UNIT_ERROR_ADDR_REG : 0x%X = 0x%08x\n",
+ ETH_UNIT_ERROR_ADDR_REG(port),
+ MV_REG_READ(ETH_UNIT_ERROR_ADDR_REG(port)) );
+
+ mvOsPrintf("ETH_UNIT_INT_ADDR_ERROR_REG : 0x%X = 0x%08x\n",
+ ETH_UNIT_INT_ADDR_ERROR_REG(port),
+ MV_REG_READ(ETH_UNIT_INT_ADDR_ERROR_REG(port)) );
+
+}
+
+/******************************************************************************/
+/* MIB Counters functions */
+/******************************************************************************/
+
+/*******************************************************************************
+* ethClearMibCounters - Clear all MIB counters
+*
+* DESCRIPTION:
+* This function clears all MIB counters of a specific ethernet port.
+* A read from the MIB counter will reset the counter.
+*
+* INPUT:
+* int port - Ethernet Port number.
+*
+* RETURN: None
+*
+*******************************************************************************/
+void ethClearCounters(int port)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ mvEthMibCountersClear(pHndl);
+
+ return;
+}
+
+
+/* Print counters of the Ethernet port */
+void ethPortCounters(int port)
+{
+ MV_U32 regValue, regValHigh;
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl == NULL)
+ return;
+
+ mvOsPrintf("\n\t Port #%d MIB Counters\n\n", port);
+
+ mvOsPrintf("GoodFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_FRAMES_RECEIVED, NULL));
+ mvOsPrintf("BadFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_BAD_FRAMES_RECEIVED, NULL));
+ mvOsPrintf("BroadcastFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_BROADCAST_FRAMES_RECEIVED, NULL));
+ mvOsPrintf("MulticastFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_MULTICAST_FRAMES_RECEIVED, NULL));
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW,
+ &regValHigh);
+ mvOsPrintf("GoodOctetsReceived = 0x%08x%08x\n",
+ regValHigh, regValue);
+
+ mvOsPrintf("\n");
+ mvOsPrintf("GoodFramesSent = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_FRAMES_SENT, NULL));
+ mvOsPrintf("BroadcastFramesSent = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_BROADCAST_FRAMES_SENT, NULL));
+ mvOsPrintf("MulticastFramesSent = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_MULTICAST_FRAMES_SENT, NULL));
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_OCTETS_SENT_LOW,
+ &regValHigh);
+ mvOsPrintf("GoodOctetsSent = 0x%08x%08x\n", regValHigh, regValue);
+
+
+ mvOsPrintf("\n\t FC Control Counters\n");
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_UNREC_MAC_CONTROL_RECEIVED, NULL);
+ mvOsPrintf("UnrecogMacControlReceived = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_FC_RECEIVED, NULL);
+ mvOsPrintf("GoodFCFramesReceived = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_BAD_FC_RECEIVED, NULL);
+ mvOsPrintf("BadFCFramesReceived = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_FC_SENT, NULL);
+ mvOsPrintf("FCFramesSent = %u\n", regValue);
+
+
+ mvOsPrintf("\n\t RX Errors\n");
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_BAD_OCTETS_RECEIVED, NULL);
+ mvOsPrintf("BadOctetsReceived = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_UNDERSIZE_RECEIVED, NULL);
+ mvOsPrintf("UndersizeFramesReceived = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_FRAGMENTS_RECEIVED, NULL);
+ mvOsPrintf("FragmentsReceived = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_OVERSIZE_RECEIVED, NULL);
+ mvOsPrintf("OversizeFramesReceived = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_JABBER_RECEIVED, NULL);
+ mvOsPrintf("JabbersReceived = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_MAC_RECEIVE_ERROR, NULL);
+ mvOsPrintf("MacReceiveErrors = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_BAD_CRC_EVENT, NULL);
+ mvOsPrintf("BadCrcReceived = %u\n", regValue);
+
+ mvOsPrintf("\n\t TX Errors\n");
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR, NULL);
+ mvOsPrintf("TxMacErrors = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_EXCESSIVE_COLLISION, NULL);
+ mvOsPrintf("TxExcessiveCollisions = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_COLLISION, NULL);
+ mvOsPrintf("TxCollisions = %u\n", regValue);
+
+ regValue = mvEthMibCounterRead(pHndl, ETH_MIB_LATE_COLLISION, NULL);
+ mvOsPrintf("TxLateCollisions = %u\n", regValue);
+
+
+ mvOsPrintf("\n");
+ regValue = MV_REG_READ( ETH_RX_DISCARD_PKTS_CNTR_REG(port));
+ mvOsPrintf("Rx Discard packets counter = %u\n", regValue);
+
+ regValue = MV_REG_READ(ETH_RX_OVERRUN_PKTS_CNTR_REG(port));
+ mvOsPrintf("Rx Overrun packets counter = %u\n", regValue);
+}
+
+/* Print RMON counters of the Ethernet port */
+void ethPortRmonCounters(int port)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl == NULL)
+ return;
+
+ mvOsPrintf("\n\t Port #%d RMON MIB Counters\n\n", port);
+
+ mvOsPrintf("64 ByteFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_64_OCTETS, NULL));
+ mvOsPrintf("65...127 ByteFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_65_TO_127_OCTETS, NULL));
+ mvOsPrintf("128...255 ByteFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_128_TO_255_OCTETS, NULL));
+ mvOsPrintf("256...511 ByteFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_256_TO_511_OCTETS, NULL));
+ mvOsPrintf("512...1023 ByteFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_512_TO_1023_OCTETS, NULL));
+ mvOsPrintf("1024...Max ByteFramesReceived = %u\n",
+ mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_1024_TO_MAX_OCTETS, NULL));
+}
+
+/* Print port information */
+void ethPortStatus(int port)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthPortShow(pHndl);
+ }
+}
+
+/* Print port queues information */
+void ethPortQueues(int port, int rxQueue, int txQueue, int mode)
+{
+ void* pHndl;
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvEthQueuesShow(pHndl, rxQueue, txQueue, mode);
+ }
+}
+
+void ethUcastSet(int port, char* macStr, int queue)
+{
+ void* pHndl;
+ MV_U8 macAddr[MV_MAC_ADDR_SIZE];
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvMacStrToHex(macStr, macAddr);
+ mvEthMacAddrSet(pHndl, macAddr, queue);
+ }
+}
+
+
+void ethPortUcastShow(int port)
+{
+ MV_U32 unicastReg, macL, macH;
+ int i, j;
+
+ macL = MV_REG_READ(ETH_MAC_ADDR_LOW_REG(port));
+ macH = MV_REG_READ(ETH_MAC_ADDR_HIGH_REG(port));
+
+ mvOsPrintf("\n\t Port #%d Unicast MAC table: %02x:%02x:%02x:%02x:%02x:%02x\n\n",
+ port, ((macH >> 24) & 0xff), ((macH >> 16) & 0xff),
+ ((macH >> 8) & 0xff), (macH & 0xff),
+ ((macL >> 8) & 0xff), (macL & 0xff) );
+
+ for (i=0; i<4; i++)
+ {
+ unicastReg = MV_REG_READ( (ETH_DA_FILTER_UCAST_BASE(port) + i*4));
+ for(j=0; j<4; j++)
+ {
+ MV_U8 macEntry = (unicastReg >> (8*j)) & 0xFF;
+
+ mvOsPrintf("%X: %8s, Q = %d\n", i*4+j,
+ (macEntry & BIT0) ? "Accept" : "Reject", (macEntry >> 1) & 0x7);
+ }
+ }
+}
+
+void ethMcastAdd(int port, char* macStr, int queue)
+{
+ void* pHndl;
+ MV_U8 macAddr[MV_MAC_ADDR_SIZE];
+
+ pHndl = mvEthPortHndlGet(port);
+ if(pHndl != NULL)
+ {
+ mvMacStrToHex(macStr, macAddr);
+ mvEthMcastAddrSet(pHndl, macAddr, queue);
+ }
+}
+
+void ethPortMcast(int port)
+{
+ int tblIdx, regIdx;
+ MV_U32 regVal;
+
+ mvOsPrintf("\n\t Port #%d Special (IP) Multicast table: 01:00:5E:00:00:XX\n\n",
+ port);
+
+ for(tblIdx=0; tblIdx<(256/4); tblIdx++)
+ {
+ regVal = MV_REG_READ((ETH_DA_FILTER_SPEC_MCAST_BASE(port) + tblIdx*4));
+ for(regIdx=0; regIdx<4; regIdx++)
+ {
+ if((regVal & (0x01 << (regIdx*8))) != 0)
+ {
+ mvOsPrintf("0x%02X: Accepted, rxQ = %d\n",
+ tblIdx*4+regIdx, ((regVal >> (regIdx*8+1)) & 0x07));
+ }
+ }
+ }
+ mvOsPrintf("\n\t Port #%d Other Multicast table\n\n", port);
+ for(tblIdx=0; tblIdx<(256/4); tblIdx++)
+ {
+ regVal = MV_REG_READ((ETH_DA_FILTER_OTH_MCAST_BASE(port) + tblIdx*4));
+ for(regIdx=0; regIdx<4; regIdx++)
+ {
+ if((regVal & (0x01 << (regIdx*8))) != 0)
+ {
+ mvOsPrintf("Crc8=0x%02X: Accepted, rxQ = %d\n",
+ tblIdx*4+regIdx, ((regVal >> (regIdx*8+1)) & 0x07));
+ }
+ }
+ }
+}
+
+
+/* Print status of Ethernet port */
+void mvEthPortShow(void* pHndl)
+{
+ MV_U32 regValue, rxCoal, txCoal;
+ int speed, queue, port;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pHndl;
+
+ port = pPortCtrl->portNo;
+
+ regValue = MV_REG_READ( ETH_PORT_STATUS_REG(port) );
+
+ mvOsPrintf("\n\t ethGiga #%d port Status: 0x%04x = 0x%08x\n\n",
+ port, ETH_PORT_STATUS_REG(port), regValue);
+
+ mvOsPrintf("descInSram=%d, descSwCoher=%d\n",
+ ethDescInSram, ethDescSwCoher);
+
+ if(regValue & ETH_GMII_SPEED_1000_MASK)
+ speed = 1000;
+ else if(regValue & ETH_MII_SPEED_100_MASK)
+ speed = 100;
+ else
+ speed = 10;
+
+ mvEthCoalGet(pPortCtrl, &rxCoal, &txCoal);
+
+ /* Link, Speed, Duplex, FlowControl */
+ mvOsPrintf("Link=%s, Speed=%d, Duplex=%s, RxFlowControl=%s",
+ (regValue & ETH_LINK_UP_MASK) ? "UP" : "DOWN",
+ speed,
+ (regValue & ETH_FULL_DUPLEX_MASK) ? "FULL" : "HALF",
+ (regValue & ETH_ENABLE_RCV_FLOW_CTRL_MASK) ? "ENABLE" : "DISABLE");
+
+ mvOsPrintf("\n");
+
+ mvOsPrintf("RxCoal = %d usec, TxCoal = %d usec\n",
+ rxCoal, txCoal);
+
+ mvOsPrintf("rxDefQ=%d, arpQ=%d, bpduQ=%d, tcpQ=%d, udpQ=%d\n\n",
+ pPortCtrl->portConfig.rxDefQ, pPortCtrl->portConfig.rxArpQ,
+ pPortCtrl->portConfig.rxBpduQ,
+ pPortCtrl->portConfig.rxTcpQ, pPortCtrl->portConfig.rxUdpQ);
+
+ /* Print all RX and TX queues */
+ for(queue=0; queue<MV_ETH_RX_Q_NUM; queue++)
+ {
+ mvOsPrintf("RX Queue #%d: base=0x%lx, free=%d\n",
+ queue, (MV_ULONG)pPortCtrl->rxQueue[queue].pFirstDescr,
+ mvEthRxResourceGet(pPortCtrl, queue) );
+ }
+ mvOsPrintf("\n");
+ for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+ {
+ mvOsPrintf("TX Queue #%d: base=0x%lx, free=%d\n",
+ queue, (MV_ULONG)pPortCtrl->txQueue[queue].pFirstDescr,
+ mvEthTxResourceGet(pPortCtrl, queue) );
+ }
+}
+
+/* Print RX and TX queue of the Ethernet port */
+void mvEthQueuesShow(void* pHndl, int rxQueue, int txQueue, int mode)
+{
+ ETH_PORT_CTRL *pPortCtrl = (ETH_PORT_CTRL*)pHndl;
+ ETH_QUEUE_CTRL *pQueueCtrl;
+ MV_U32 regValue;
+ ETH_RX_DESC *pRxDescr;
+ ETH_TX_DESC *pTxDescr;
+ int i, port = pPortCtrl->portNo;
+
+ if( (rxQueue >=0) && (rxQueue < MV_ETH_RX_Q_NUM) )
+ {
+ pQueueCtrl = &(pPortCtrl->rxQueue[rxQueue]);
+ mvOsPrintf("Port #%d, RX Queue #%d\n\n", port, rxQueue);
+
+ mvOsPrintf("CURR_RX_DESC_PTR : 0x%X = 0x%08x\n",
+ ETH_RX_CUR_DESC_PTR_REG(port, rxQueue),
+ MV_REG_READ( ETH_RX_CUR_DESC_PTR_REG(port, rxQueue)));
+
+
+ if(pQueueCtrl->pFirstDescr != NULL)
+ {
+ mvOsPrintf("pFirstDescr=0x%lx, pLastDescr=0x%lx, numOfResources=%d\n",
+ (MV_ULONG)pQueueCtrl->pFirstDescr, (MV_ULONG)pQueueCtrl->pLastDescr,
+ pQueueCtrl->resource);
+ mvOsPrintf("pCurrDescr: 0x%lx, pUsedDescr: 0x%lx\n",
+ (MV_ULONG)pQueueCtrl->pCurrentDescr,
+ (MV_ULONG)pQueueCtrl->pUsedDescr);
+
+ if(mode == 1)
+ {
+ pRxDescr = (ETH_RX_DESC*)pQueueCtrl->pFirstDescr;
+ i = 0;
+ do
+ {
+ mvOsPrintf("%3d. desc=%08x (%08x), cmd=%08x, data=%4d, buf=%4d, buf=%08x, pkt=%lx, os=%lx\n",
+ i, (MV_U32)pRxDescr, (MV_U32)ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pRxDescr),
+ pRxDescr->cmdSts, pRxDescr->byteCnt, (MV_U32)pRxDescr->bufSize,
+ (unsigned int)pRxDescr->bufPtr, (MV_ULONG)pRxDescr->returnInfo,
+ ((MV_PKT_INFO*)pRxDescr->returnInfo)->osInfo);
+
+ ETH_DESCR_INV(pPortCtrl, pRxDescr);
+ pRxDescr = RX_NEXT_DESC_PTR(pRxDescr, pQueueCtrl);
+ i++;
+ } while (pRxDescr != pQueueCtrl->pFirstDescr);
+ }
+ }
+ else
+ mvOsPrintf("RX Queue #%d is NOT CREATED\n", rxQueue);
+ }
+
+ if( (txQueue >=0) && (txQueue < MV_ETH_TX_Q_NUM) )
+ {
+ pQueueCtrl = &(pPortCtrl->txQueue[txQueue]);
+ mvOsPrintf("Port #%d, TX Queue #%d\n\n", port, txQueue);
+
+ regValue = MV_REG_READ( ETH_TX_CUR_DESC_PTR_REG(port, txQueue));
+ mvOsPrintf("CURR_TX_DESC_PTR : 0x%X = 0x%08x\n",
+ ETH_TX_CUR_DESC_PTR_REG(port, txQueue), regValue);
+
+ if(pQueueCtrl->pFirstDescr != NULL)
+ {
+ mvOsPrintf("pFirstDescr=0x%lx, pLastDescr=0x%lx, numOfResources=%d\n",
+ (MV_ULONG)pQueueCtrl->pFirstDescr,
+ (MV_ULONG)pQueueCtrl->pLastDescr,
+ pQueueCtrl->resource);
+ mvOsPrintf("pCurrDescr: 0x%lx, pUsedDescr: 0x%lx\n",
+ (MV_ULONG)pQueueCtrl->pCurrentDescr,
+ (MV_ULONG)pQueueCtrl->pUsedDescr);
+
+ if(mode == 1)
+ {
+ pTxDescr = (ETH_TX_DESC*)pQueueCtrl->pFirstDescr;
+ i = 0;
+ do
+ {
+ mvOsPrintf("%3d. desc=%08x (%08x), cmd=%08x, data=%4d, buf=%08x, pkt=%lx, os=%lx\n",
+ i, (MV_U32)pTxDescr, (MV_U32)ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pTxDescr),
+ pTxDescr->cmdSts, pTxDescr->byteCnt,
+ (MV_U32)pTxDescr->bufPtr, (MV_ULONG)pTxDescr->returnInfo,
+ pTxDescr->returnInfo ? (((MV_PKT_INFO*)pTxDescr->returnInfo)->osInfo) : 0x0);
+
+ ETH_DESCR_INV(pPortCtrl, pTxDescr);
+ pTxDescr = TX_NEXT_DESC_PTR(pTxDescr, pQueueCtrl);
+ i++;
+ } while (pTxDescr != pQueueCtrl->pFirstDescr);
+ }
+ }
+ else
+ mvOsPrintf("TX Queue #%d is NOT CREATED\n", txQueue);
+ }
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.h
new file mode 100644
index 000000000..f026f966d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.h
@@ -0,0 +1,146 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __MV_ETH_DEBUG_H__
+#define __MV_ETH_DEBUG_H__
+
+#if 0
+/*
+ ** Externs
+ */
+void ethBpduRxQ(int port, int bpduQueue);
+void ethArpRxQ(int port, int bpduQueue);
+void ethTcpRxQ(int port, int bpduQueue);
+void ethUdpRxQ(int port, int bpduQueue);
+void ethMcastAdd(int port, char* macStr, int queue);
+
+#ifdef INCLUDE_MULTI_QUEUE
+void ethRxPolicy( int port);
+void ethTxPolicy( int port);
+void ethTxPolDA(int port, char* macStr, int txQ, char* headerHexStr);
+void ethRxPolMode(int port, MV_ETH_PRIO_MODE prioMode);
+void ethRxPolQ(int port, int rxQueue, int rxQuota);
+#endif /* INCLUDE_MULTI_QUEUE */
+
+void print_egiga_stat(void *sc, unsigned int port);
+void ethPortStatus (int port);
+void ethPortQueues( int port, int rxQueue, int txQueue, int mode);
+void ethPortMcast(int port);
+void ethPortRegs(int port);
+void ethPortCounters(int port);
+void ethPortRmonCounters(int port);
+void ethRxCoal(int port, int usec);
+void ethTxCoal(int port, int usec);
+
+void ethRegs(int port);
+void ethClearCounters(int port);
+void ethUcastSet(int port, char* macStr, int queue);
+void ethPortUcastShow(int port);
+
+#ifdef CONFIG_MV_ETH_HEADER
+void run_com_header(const char *buffer);
+#endif
+
+#ifdef INCLUDE_MULTI_QUEUE
+void ethRxPolMode(int port, MV_ETH_PRIO_MODE prioMode);
+void ethRxPolQ(int port, int queue, int quota);
+void ethRxPolicy(int port);
+void ethTxPolDef(int port, int txQ, char* headerHexStr);
+void ethTxPolDA(int port, char* macStr, int txQ, char* headerHexStr);
+void ethTxPolicy(int port);
+#endif /* INCLUDE_MULTI_QUEUE */
+
+#if (MV_ETH_VERSION >= 4)
+void ethEjpModeSet(int port, int mode)
+#endif
+#endif /* 0 */
+
+
+
+
+void ethRxCoal(int port, int usec);
+void ethTxCoal(int port, int usec);
+#if (MV_ETH_VERSION >= 4)
+void ethEjpModeSet(int port, int mode);
+#endif /* (MV_ETH_VERSION >= 4) */
+
+void ethBpduRxQ(int port, int bpduQueue);
+void ethArpRxQ(int port, int arpQueue);
+void ethTcpRxQ(int port, int tcpQueue);
+void ethUdpRxQ(int port, int udpQueue);
+void ethTxPolicyRegs(int port);
+void ethPortRegs(int port);
+void ethRegs(int port);
+void ethClearCounters(int port);
+void ethPortCounters(int port);
+void ethPortRmonCounters(int port);
+void ethPortStatus(int port);
+void ethPortQueues(int port, int rxQueue, int txQueue, int mode);
+void ethUcastSet(int port, char* macStr, int queue);
+void ethPortUcastShow(int port);
+void ethMcastAdd(int port, char* macStr, int queue);
+void ethPortMcast(int port);
+void mvEthPortShow(void* pHndl);
+void mvEthQueuesShow(void* pHndl, int rxQueue, int txQueue, int mode);
+
+#endif
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h
new file mode 100644
index 000000000..f4cae5078
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h
@@ -0,0 +1,751 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvEth.h - Header File for : Marvell Gigabit Ethernet Controller
+*
+* DESCRIPTION:
+* This header file contains macros typedefs and function declaration specific to
+* the Marvell Gigabit Ethernet Controller.
+*
+* DEPENDENCIES:
+* None.
+*
+*******************************************************************************/
+
+#ifndef __mvEthGbe_h__
+#define __mvEthGbe_h__
+
+extern MV_BOOL ethDescInSram;
+extern MV_BOOL ethDescSwCoher;
+extern ETH_PORT_CTRL* ethPortCtrl[];
+
+static INLINE MV_ULONG ethDescVirtToPhy(ETH_QUEUE_CTRL* pQueueCtrl, MV_U8* pDesc)
+{
+#if defined (ETH_DESCR_IN_SRAM)
+ if( ethDescInSram )
+ return mvSramVirtToPhy(pDesc);
+ else
+#endif /* ETH_DESCR_IN_SRAM */
+ return (pQueueCtrl->descBuf.bufPhysAddr + (pDesc - pQueueCtrl->descBuf.bufVirtPtr));
+}
+/* Return port handler */
+#define mvEthPortHndlGet(port) ethPortCtrl[port]
+
+/* Used as WA for HW/SW race on TX */
+static INLINE int mvEthPortTxEnable(void* pPortHndl, int queue, int max_deep)
+{
+ int deep = 0;
+ MV_U32 txCurrReg, txEnReg;
+ ETH_TX_DESC* pTxLastDesc;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+ txEnReg = MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo));
+ if( (txEnReg & MV_32BIT_LE_FAST(ETH_TXQ_ENABLE_MASK)) == 0)
+ {
+ MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
+ return 0;
+ }
+
+ pQueueCtrl = &pPortCtrl->txQueue[queue];
+ pTxLastDesc = pQueueCtrl->pCurrentDescr;
+ txCurrReg = MV_REG_READ(ETH_TX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue));
+ if(ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pTxLastDesc) == txCurrReg)
+ {
+ /* All descriptors are processed, no chance for race */
+ return 0;
+ }
+
+ /* Check distance betwee HW and SW location: */
+ /* If distance between HW and SW pointers is less than max_deep descriptors */
+ /* Race condition is possible, so wait end of TX and restart TXQ */
+ while(deep < max_deep)
+ {
+ pTxLastDesc = TX_PREV_DESC_PTR(pTxLastDesc, pQueueCtrl);
+ if(ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pTxLastDesc) == txCurrReg)
+ {
+ int count = 0;
+
+ while( (txEnReg & MV_32BIT_LE_FAST(ETH_TXQ_ENABLE_MASK)) != 0)
+ {
+ count++;
+ if(count > 10000)
+ {
+ mvOsPrintf("mvEthPortTxEnable: timeout - TXQ_CMD=0x%08x\n",
+ MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) );
+ break;
+ }
+ txEnReg = MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo));
+ }
+
+ MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
+ return count;
+ }
+ deep++;
+ }
+ /* Distance between HW and SW pointers is more than max_deep descriptors, */
+ /* So NO race condition - do nothing */
+ return -1;
+}
+
+
+/* defines */
+#define ETH_CSUM_MIN_BYTE_COUNT 72
+
+/* Tailgate and Kirwood have only 2K TX FIFO */
+#if (MV_ETH_VERSION == 2) || (MV_ETH_VERSION == 4)
+#define ETH_CSUM_MAX_BYTE_COUNT 1600
+#else
+#define ETH_CSUM_MAX_BYTE_COUNT 9*1024
+#endif /* MV_ETH_VERSION */
+
+#define ETH_MV_HEADER_SIZE 2
+#define ETH_MV_TX_EN
+
+/* An offest in Tx descriptors to store data for buffers less than 8 Bytes */
+#define MIN_TX_BUFF_LOAD 8
+#define TX_BUF_OFFSET_IN_DESC (ETH_TX_DESC_ALIGNED_SIZE - MIN_TX_BUFF_LOAD)
+
+/* Default port configuration value */
+#define PORT_CONFIG_VALUE \
+ ETH_DEF_RX_QUEUE_MASK(0) | \
+ ETH_DEF_RX_ARP_QUEUE_MASK(0) | \
+ ETH_DEF_RX_TCP_QUEUE_MASK(0) | \
+ ETH_DEF_RX_UDP_QUEUE_MASK(0) | \
+ ETH_DEF_RX_BPDU_QUEUE_MASK(0) | \
+ ETH_RX_CHECKSUM_WITH_PSEUDO_HDR
+
+/* Default port extend configuration value */
+#define PORT_CONFIG_EXTEND_VALUE 0
+
+#define PORT_SERIAL_CONTROL_VALUE \
+ ETH_DISABLE_FC_AUTO_NEG_MASK | \
+ BIT9 | \
+ ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
+ ETH_MAX_RX_PACKET_1552BYTE | \
+ ETH_SET_FULL_DUPLEX_MASK
+
+#define PORT_SERIAL_CONTROL_100MB_FORCE_VALUE \
+ ETH_FORCE_LINK_PASS_MASK | \
+ ETH_DISABLE_DUPLEX_AUTO_NEG_MASK | \
+ ETH_DISABLE_FC_AUTO_NEG_MASK | \
+ BIT9 | \
+ ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
+ ETH_DISABLE_SPEED_AUTO_NEG_MASK | \
+ ETH_SET_FULL_DUPLEX_MASK | \
+ ETH_SET_MII_SPEED_100_MASK | \
+ ETH_MAX_RX_PACKET_1552BYTE
+
+
+#define PORT_SERIAL_CONTROL_1000MB_FORCE_VALUE \
+ ETH_FORCE_LINK_PASS_MASK | \
+ ETH_DISABLE_DUPLEX_AUTO_NEG_MASK | \
+ ETH_DISABLE_FC_AUTO_NEG_MASK | \
+ BIT9 | \
+ ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
+ ETH_DISABLE_SPEED_AUTO_NEG_MASK | \
+ ETH_SET_FULL_DUPLEX_MASK | \
+ ETH_SET_GMII_SPEED_1000_MASK | \
+ ETH_MAX_RX_PACKET_1552BYTE
+
+#define PORT_SERIAL_CONTROL_SGMII_IBAN_VALUE \
+ ETH_DISABLE_FC_AUTO_NEG_MASK | \
+ BIT9 | \
+ ETH_IN_BAND_AN_EN_MASK | \
+ ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
+ ETH_MAX_RX_PACKET_1552BYTE
+
+/* Function headers: */
+MV_VOID mvEthSetSpecialMcastTable(int portNo, int queue);
+MV_STATUS mvEthArpRxQueue(void* pPortHandle, int arpQueue);
+MV_STATUS mvEthUdpRxQueue(void* pPortHandle, int udpQueue);
+MV_STATUS mvEthTcpRxQueue(void* pPortHandle, int tcpQueue);
+MV_STATUS mvEthMacAddrGet(int portNo, unsigned char *pAddr);
+MV_VOID mvEthSetOtherMcastTable(int portNo, int queue);
+MV_STATUS mvEthHeaderModeSet(void* pPortHandle, MV_ETH_HEADER_MODE headerMode);
+/* Interrupt Coalesting functions */
+MV_U32 mvEthRxCoalSet(void* pPortHndl, MV_U32 uSec);
+MV_U32 mvEthTxCoalSet(void* pPortHndl, MV_U32 uSec);
+MV_STATUS mvEthCoalGet(void* pPortHndl, MV_U32* pRxCoal, MV_U32* pTxCoal);
+
+/******************************************************************************/
+/* Data Flow functions */
+/******************************************************************************/
+static INLINE void mvEthPortTxRestart(void* pPortHndl)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+ MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
+}
+
+/* Get number of Free resources in specific TX queue */
+static INLINE int mvEthTxResourceGet(void* pPortHndl, int txQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+ return (pPortCtrl->txQueue[txQueue].resource);
+}
+
+/* Get number of Free resources in specific RX queue */
+static INLINE int mvEthRxResourceGet(void* pPortHndl, int rxQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+ return (pPortCtrl->rxQueue[rxQueue].resource);
+}
+
+static INLINE int mvEthTxQueueIsFull(void* pPortHndl, int txQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+ if(pPortCtrl->txQueue[txQueue].resource == 0)
+ return MV_TRUE;
+
+ return MV_FALSE;
+}
+
+/* Get number of Free resources in specific RX queue */
+static INLINE int mvEthRxQueueIsFull(void* pPortHndl, int rxQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->rxQueue[rxQueue];
+
+ if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
+ (pQueueCtrl->resource != 0) )
+ return MV_TRUE;
+
+ return MV_FALSE;
+}
+
+static INLINE int mvEthTxQueueIsEmpty(void* pPortHndl, int txQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+ if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
+ (pQueueCtrl->resource != 0) )
+ {
+ return MV_TRUE;
+ }
+ return MV_FALSE;
+}
+
+/* Get number of Free resources in specific RX queue */
+static INLINE int mvEthRxQueueIsEmpty(void* pPortHndl, int rxQueue)
+{
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+ if(pPortCtrl->rxQueue[rxQueue].resource == 0)
+ return MV_TRUE;
+
+ return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvEthPortTx - Send an Ethernet packet
+*
+* DESCRIPTION:
+* This routine send a given packet described by pPktInfo parameter.
+* Single buffer only.
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet Port handler.
+* int txQueue - Number of Tx queue.
+* MV_PKT_INFO *pPktInfo - User packet to send.
+*
+* RETURN:
+* MV_NO_RESOURCE - No enough resources to send this packet.
+* MV_ERROR - Unexpected Fatal error.
+* MV_OK - Packet send successfully.
+*
+*******************************************************************************/
+static INLINE MV_STATUS mvEthPortTx(void* pEthPortHndl, int txQueue, MV_PKT_INFO* pPktInfo)
+{
+ ETH_TX_DESC* pTxCurrDesc;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ int portNo;
+ MV_BUF_INFO* pBufInfo = pPktInfo->pFrags;
+
+#ifdef ETH_DEBUG
+ if(pPortCtrl->portState != MV_ACTIVE)
+ return MV_BAD_STATE;
+#endif /* ETH_DEBUG */
+
+ portNo = pPortCtrl->portNo;
+ pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+ /* Get the Tx Desc ring indexes */
+ pTxCurrDesc = pQueueCtrl->pCurrentDescr;
+
+ /* Check if there is enough resources to send the packet */
+ if(pQueueCtrl->resource == 0)
+ return MV_NO_RESOURCE;
+
+ pTxCurrDesc->byteCnt = pBufInfo->dataSize;
+
+ /* Flash Buffer */
+ if(pPktInfo->pktSize != 0)
+ {
+#ifdef MV_NETBSD
+ pTxCurrDesc->bufPtr = pBufInfo->bufPhysAddr;
+ ETH_PACKET_CACHE_FLUSH(pBufInfo->bufVirtPtr, pPktInfo->pktSize);
+#else
+ pTxCurrDesc->bufPtr = ETH_PACKET_CACHE_FLUSH(pBufInfo->bufVirtPtr, pPktInfo->pktSize);
+#endif
+ pPktInfo->pktSize = 0;
+ }
+ else
+ pTxCurrDesc->bufPtr = pBufInfo->bufPhysAddr;
+
+ pTxCurrDesc->returnInfo = (MV_ULONG)pPktInfo;
+
+ /* There is only one buffer in the packet */
+ /* The OSG might set some bits for checksum offload, so add them to first descriptor */
+ pTxCurrDesc->cmdSts = pPktInfo->status |
+ ETH_BUFFER_OWNED_BY_DMA |
+ ETH_TX_GENERATE_CRC_MASK |
+ ETH_TX_ENABLE_INTERRUPT_MASK |
+ ETH_TX_ZERO_PADDING_MASK |
+ ETH_TX_FIRST_DESC_MASK |
+ ETH_TX_LAST_DESC_MASK;
+
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
+
+ pQueueCtrl->resource--;
+ pQueueCtrl->pCurrentDescr = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
+
+ /* Apply send command */
+ MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(portNo)) = pPortCtrl->portTxQueueCmdReg;
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthPortSgTx - Send an Ethernet packet
+*
+* DESCRIPTION:
+* This routine send a given packet described by pBufInfo parameter. It
+* supports transmitting of a packet spaned over multiple buffers.
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet Port handler.
+* int txQueue - Number of Tx queue.
+* MV_PKT_INFO *pPktInfo - User packet to send.
+*
+* RETURN:
+* MV_NO_RESOURCE - No enough resources to send this packet.
+* MV_ERROR - Unexpected Fatal error.
+* MV_OK - Packet send successfully.
+*
+*******************************************************************************/
+static INLINE MV_STATUS mvEthPortSgTx(void* pEthPortHndl, int txQueue, MV_PKT_INFO* pPktInfo)
+{
+ ETH_TX_DESC* pTxFirstDesc;
+ ETH_TX_DESC* pTxCurrDesc;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ int portNo, bufCount;
+ MV_BUF_INFO* pBufInfo = pPktInfo->pFrags;
+ MV_U8* pTxBuf;
+
+#ifdef ETH_DEBUG
+ if(pPortCtrl->portState != MV_ACTIVE)
+ return MV_BAD_STATE;
+#endif /* ETH_DEBUG */
+
+ portNo = pPortCtrl->portNo;
+ pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+ /* Get the Tx Desc ring indexes */
+ pTxCurrDesc = pQueueCtrl->pCurrentDescr;
+
+ /* Check if there is enough resources to send the packet */
+ if(pQueueCtrl->resource < pPktInfo->numFrags)
+ return MV_NO_RESOURCE;
+
+ /* Remember first desc */
+ pTxFirstDesc = pTxCurrDesc;
+
+ bufCount = 0;
+ while(MV_TRUE)
+ {
+ if(pBufInfo[bufCount].dataSize <= MIN_TX_BUFF_LOAD)
+ {
+ /* Buffers with a payload smaller than MIN_TX_BUFF_LOAD (8 bytes) must be aligned */
+ /* to 64-bit boundary. Two options here: */
+ /* 1) Usually, copy the payload to the reserved 8 bytes inside descriptor. */
+ /* 2) In the Half duplex workaround, the reserved 8 bytes inside descriptor are used */
+ /* as a pointer to the aligned buffer, copy the small payload to this buffer. */
+ pTxBuf = ((MV_U8*)pTxCurrDesc)+TX_BUF_OFFSET_IN_DESC;
+ mvOsBCopy(pBufInfo[bufCount].bufVirtPtr, pTxBuf, pBufInfo[bufCount].dataSize);
+ pTxCurrDesc->bufPtr = ethDescVirtToPhy(pQueueCtrl, pTxBuf);
+ }
+ else
+ {
+ /* Flash Buffer */
+#ifdef MV_NETBSD
+ pTxCurrDesc->bufPtr = pBufInfo[bufCount].bufPhysAddr;
+ ETH_PACKET_CACHE_FLUSH(pBufInfo[bufCount].bufVirtPtr, pBufInfo[bufCount].dataSize);
+#else
+ pTxCurrDesc->bufPtr = ETH_PACKET_CACHE_FLUSH(pBufInfo[bufCount].bufVirtPtr, pBufInfo[bufCount].dataSize);
+#endif
+ }
+
+ pTxCurrDesc->byteCnt = pBufInfo[bufCount].dataSize;
+ bufCount++;
+
+ if(bufCount >= pPktInfo->numFrags)
+ break;
+
+ if(bufCount > 1)
+ {
+ /* There is middle buffer of the packet Not First and Not Last */
+ pTxCurrDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA;
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
+ }
+ /* Go to next descriptor and next buffer */
+ pTxCurrDesc = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
+ }
+ /* Set last desc with DMA ownership and interrupt enable. */
+ pTxCurrDesc->returnInfo = (MV_ULONG)pPktInfo;
+ if(bufCount == 1)
+ {
+ /* There is only one buffer in the packet */
+ /* The OSG might set some bits for checksum offload, so add them to first descriptor */
+ pTxCurrDesc->cmdSts = pPktInfo->status |
+ ETH_BUFFER_OWNED_BY_DMA |
+ ETH_TX_GENERATE_CRC_MASK |
+ ETH_TX_ENABLE_INTERRUPT_MASK |
+ ETH_TX_ZERO_PADDING_MASK |
+ ETH_TX_FIRST_DESC_MASK |
+ ETH_TX_LAST_DESC_MASK;
+
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
+ }
+ else
+ {
+ /* Last but not First */
+ pTxCurrDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA |
+ ETH_TX_ENABLE_INTERRUPT_MASK |
+ ETH_TX_ZERO_PADDING_MASK |
+ ETH_TX_LAST_DESC_MASK;
+
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
+
+ /* Update First when more than one buffer in the packet */
+ /* The OSG might set some bits for checksum offload, so add them to first descriptor */
+ pTxFirstDesc->cmdSts = pPktInfo->status |
+ ETH_BUFFER_OWNED_BY_DMA |
+ ETH_TX_GENERATE_CRC_MASK |
+ ETH_TX_FIRST_DESC_MASK;
+
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pTxFirstDesc);
+ }
+ /* Update txQueue state */
+ pQueueCtrl->resource -= bufCount;
+ pQueueCtrl->pCurrentDescr = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
+
+ /* Apply send command */
+ MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(portNo)) = pPortCtrl->portTxQueueCmdReg;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthPortTxDone - Free all used Tx descriptors and mBlks.
+*
+* DESCRIPTION:
+* This routine returns the transmitted packet information to the caller.
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet Port handler.
+* int txQueue - Number of Tx queue.
+*
+* OUTPUT:
+* MV_PKT_INFO *pPktInfo - Pointer to packet was sent.
+*
+* RETURN:
+* MV_NOT_FOUND - No transmitted packets to return. Transmit in progress.
+* MV_EMPTY - No transmitted packets to return. TX Queue is empty.
+* MV_ERROR - Unexpected Fatal error.
+* MV_OK - There is transmitted packet in the queue,
+* 'pPktInfo' filled with relevant information.
+*
+*******************************************************************************/
+static INLINE MV_PKT_INFO* mvEthPortTxDone(void* pEthPortHndl, int txQueue)
+{
+ ETH_TX_DESC* pTxCurrDesc;
+ ETH_TX_DESC* pTxUsedDesc;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ MV_PKT_INFO* pPktInfo;
+ MV_U32 commandStatus;
+
+ pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+ pTxUsedDesc = pQueueCtrl->pUsedDescr;
+ pTxCurrDesc = pQueueCtrl->pCurrentDescr;
+
+ while(MV_TRUE)
+ {
+ /* No more used descriptors */
+ commandStatus = pTxUsedDesc->cmdSts;
+ if (commandStatus & (ETH_BUFFER_OWNED_BY_DMA))
+ {
+ ETH_DESCR_INV(pPortCtrl, pTxUsedDesc);
+ return NULL;
+ }
+ if( (pTxUsedDesc == pTxCurrDesc) &&
+ (pQueueCtrl->resource != 0) )
+ {
+ return NULL;
+ }
+ pQueueCtrl->resource++;
+ pQueueCtrl->pUsedDescr = TX_NEXT_DESC_PTR(pTxUsedDesc, pQueueCtrl);
+ if(commandStatus & (ETH_TX_LAST_DESC_MASK))
+ {
+ pPktInfo = (MV_PKT_INFO*)pTxUsedDesc->returnInfo;
+ pPktInfo->status = commandStatus;
+ return pPktInfo;
+ }
+ pTxUsedDesc = pQueueCtrl->pUsedDescr;
+ }
+}
+
+/*******************************************************************************
+* mvEthPortRx - Get new received packets from Rx queue.
+*
+* DESCRIPTION:
+* This routine returns the received data to the caller. There is no
+* data copying during routine operation. All information is returned
+* using pointer to packet information struct passed from the caller.
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet Port handler.
+* int rxQueue - Number of Rx queue.
+*
+* OUTPUT:
+* MV_PKT_INFO *pPktInfo - Pointer to received packet.
+*
+* RETURN:
+* MV_NO_RESOURCE - No free resources in RX queue.
+* MV_ERROR - Unexpected Fatal error.
+* MV_OK - New packet received and 'pBufInfo' structure filled
+* with relevant information.
+*
+*******************************************************************************/
+static INLINE MV_PKT_INFO* mvEthPortRx(void* pEthPortHndl, int rxQueue)
+{
+ ETH_RX_DESC *pRxCurrDesc;
+ MV_U32 commandStatus;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ MV_PKT_INFO* pPktInfo;
+
+ pQueueCtrl = &(pPortCtrl->rxQueue[rxQueue]);
+
+ /* Check resources */
+ if(pQueueCtrl->resource == 0)
+ {
+ mvOsPrintf("ethPortRx: no more resources\n");
+ return NULL;
+ }
+ while(MV_TRUE)
+ {
+ /* Get the Rx Desc ring 'curr and 'used' indexes */
+ pRxCurrDesc = pQueueCtrl->pCurrentDescr;
+
+ commandStatus = pRxCurrDesc->cmdSts;
+ if (commandStatus & (ETH_BUFFER_OWNED_BY_DMA))
+ {
+ /* Nothing to receive... */
+ ETH_DESCR_INV(pPortCtrl, pRxCurrDesc);
+ return NULL;
+ }
+
+ /* Valid RX only if FIRST and LAST bits are set */
+ if( (commandStatus & (ETH_RX_LAST_DESC_MASK | ETH_RX_FIRST_DESC_MASK)) ==
+ (ETH_RX_LAST_DESC_MASK | ETH_RX_FIRST_DESC_MASK) )
+ {
+ pPktInfo = (MV_PKT_INFO*)pRxCurrDesc->returnInfo;
+ pPktInfo->pFrags->dataSize = pRxCurrDesc->byteCnt - 4;
+ pPktInfo->status = commandStatus;
+ pPktInfo->fragIP = pRxCurrDesc->bufSize & ETH_RX_IP_FRAGMENTED_FRAME_MASK;
+
+ pQueueCtrl->resource--;
+ /* Update 'curr' in data structure */
+ pQueueCtrl->pCurrentDescr = RX_NEXT_DESC_PTR(pRxCurrDesc, pQueueCtrl);
+
+#ifdef INCLUDE_SYNC_BARR
+ mvCpuIfSyncBarr(DRAM_TARGET);
+#endif
+ return pPktInfo;
+ }
+ else
+ {
+ ETH_RX_DESC* pRxUsedDesc = pQueueCtrl->pUsedDescr;
+
+#ifdef ETH_DEBUG
+ mvOsPrintf("ethDrv: Unexpected Jumbo frame: "
+ "status=0x%08x, byteCnt=%d, pData=0x%x\n",
+ commandStatus, pRxCurrDesc->byteCnt, pRxCurrDesc->bufPtr);
+#endif /* ETH_DEBUG */
+
+ /* move buffer from pCurrentDescr position to pUsedDescr position */
+ pRxUsedDesc->bufPtr = pRxCurrDesc->bufPtr;
+ pRxUsedDesc->returnInfo = pRxCurrDesc->returnInfo;
+ pRxUsedDesc->bufSize = pRxCurrDesc->bufSize & ETH_RX_BUFFER_MASK;
+
+ /* Return the descriptor to DMA ownership */
+ pRxUsedDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA |
+ ETH_RX_ENABLE_INTERRUPT_MASK;
+
+ /* Flush descriptor and CPU pipe */
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pRxUsedDesc);
+
+ /* Move the used descriptor pointer to the next descriptor */
+ pQueueCtrl->pUsedDescr = RX_NEXT_DESC_PTR(pRxUsedDesc, pQueueCtrl);
+ pQueueCtrl->pCurrentDescr = RX_NEXT_DESC_PTR(pRxCurrDesc, pQueueCtrl);
+ }
+ }
+}
+
+/*******************************************************************************
+* mvEthPortRxDone - Returns a Rx buffer back to the Rx ring.
+*
+* DESCRIPTION:
+* This routine returns a Rx buffer back to the Rx ring.
+*
+* INPUT:
+* void* pEthPortHndl - Ethernet Port handler.
+* int rxQueue - Number of Rx queue.
+* MV_PKT_INFO *pPktInfo - Pointer to received packet.
+*
+* RETURN:
+* MV_ERROR - Unexpected Fatal error.
+* MV_OUT_OF_RANGE - RX queue is already FULL, so this buffer can't be
+* returned to this queue.
+* MV_FULL - Buffer returned successfully and RX queue became full.
+* More buffers should not be returned at the time.
+* MV_OK - Buffer returned successfully and there are more free
+* places in the queue.
+*
+*******************************************************************************/
+static INLINE MV_STATUS mvEthPortRxDone(void* pEthPortHndl, int rxQueue, MV_PKT_INFO *pPktInfo)
+{
+ ETH_RX_DESC* pRxUsedDesc;
+ ETH_QUEUE_CTRL* pQueueCtrl;
+ ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+
+ pQueueCtrl = &pPortCtrl->rxQueue[rxQueue];
+
+ /* Get 'used' Rx descriptor */
+ pRxUsedDesc = pQueueCtrl->pUsedDescr;
+
+ /* Check that ring is not FULL */
+ if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
+ (pQueueCtrl->resource != 0) )
+ {
+ mvOsPrintf("%s %d: out of range Error resource=%d, curr=%p, used=%p\n",
+ __FUNCTION__, pPortCtrl->portNo, pQueueCtrl->resource,
+ pQueueCtrl->pCurrentDescr, pQueueCtrl->pUsedDescr);
+ return MV_OUT_OF_RANGE;
+ }
+
+ pRxUsedDesc->bufPtr = pPktInfo->pFrags->bufPhysAddr;
+ pRxUsedDesc->returnInfo = (MV_ULONG)pPktInfo;
+ pRxUsedDesc->bufSize = pPktInfo->pFrags->bufSize & ETH_RX_BUFFER_MASK;
+
+ /* Invalidate data buffer accordingly with pktSize */
+ if(pPktInfo->pktSize != 0)
+ {
+ ETH_PACKET_CACHE_INVALIDATE(pPktInfo->pFrags->bufVirtPtr, pPktInfo->pktSize);
+ pPktInfo->pktSize = 0;
+ }
+
+ /* Return the descriptor to DMA ownership */
+ pRxUsedDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT_MASK;
+
+ /* Flush descriptor and CPU pipe */
+ ETH_DESCR_FLUSH_INV(pPortCtrl, pRxUsedDesc);
+
+ pQueueCtrl->resource++;
+
+ /* Move the used descriptor pointer to the next descriptor */
+ pQueueCtrl->pUsedDescr = RX_NEXT_DESC_PTR(pRxUsedDesc, pQueueCtrl);
+
+ /* If ring became Full return MV_FULL */
+ if(pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr)
+ return MV_FULL;
+
+ return MV_OK;
+}
+
+
+#endif /* __mvEthGbe_h__ */
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h
new file mode 100644
index 000000000..7b9f0520f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h
@@ -0,0 +1,700 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvEthRegsh
+#define __INCmvEthRegsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+/****************************************/
+/* Ethernet Unit Registers */
+/****************************************/
+#define ETH_REG_BASE MV_ETH_REG_BASE
+
+#define ETH_PHY_ADDR_REG(port) (ETH_REG_BASE(port) + 0x000)
+#define ETH_SMI_REG(port) (ETH_REG_BASE(port) + 0x004)
+#define ETH_UNIT_DEF_ADDR_REG(port) (ETH_REG_BASE(port) + 0x008)
+#define ETH_UNIT_DEF_ID_REG(port) (ETH_REG_BASE(port) + 0x00c)
+#define ETH_UNIT_RESERVED(port) (ETH_REG_BASE(port) + 0x014)
+#define ETH_UNIT_INTR_CAUSE_REG(port) (ETH_REG_BASE(port) + 0x080)
+#define ETH_UNIT_INTR_MASK_REG(port) (ETH_REG_BASE(port) + 0x084)
+
+
+#define ETH_UNIT_ERROR_ADDR_REG(port) (ETH_REG_BASE(port) + 0x094)
+#define ETH_UNIT_INT_ADDR_ERROR_REG(port) (ETH_REG_BASE(port) + 0x098)
+#define ETH_UNIT_CONTROL_REG(port) (ETH_REG_BASE(port) + 0x0B0)
+
+#define ETH_PORT_CONFIG_REG(port) (ETH_REG_BASE(port) + 0x400)
+#define ETH_PORT_CONFIG_EXTEND_REG(port) (ETH_REG_BASE(port) + 0x404)
+#define ETH_MII_SERIAL_PARAM_REG(port) (ETH_REG_BASE(port) + 0x408)
+#define ETH_GMII_SERIAL_PARAM_REG(port) (ETH_REG_BASE(port) + 0x40c)
+#define ETH_VLAN_ETHER_TYPE_REG(port) (ETH_REG_BASE(port) + 0x410)
+#define ETH_MAC_ADDR_LOW_REG(port) (ETH_REG_BASE(port) + 0x414)
+#define ETH_MAC_ADDR_HIGH_REG(port) (ETH_REG_BASE(port) + 0x418)
+#define ETH_SDMA_CONFIG_REG(port) (ETH_REG_BASE(port) + 0x41c)
+#define ETH_DIFF_SERV_PRIO_REG(port, code) (ETH_REG_BASE(port) + 0x420 + ((code)<<2))
+#define ETH_PORT_SERIAL_CTRL_REG(port) (ETH_REG_BASE(port) + 0x43c)
+#define ETH_VLAN_TAG_TO_PRIO_REG(port) (ETH_REG_BASE(port) + 0x440)
+#define ETH_PORT_STATUS_REG(port) (ETH_REG_BASE(port) + 0x444)
+
+#define ETH_RX_QUEUE_COMMAND_REG(port) (ETH_REG_BASE(port) + 0x680)
+#define ETH_TX_QUEUE_COMMAND_REG(port) (ETH_REG_BASE(port) + 0x448)
+
+#define ETH_PORT_SERIAL_CTRL_1_REG(port) (ETH_REG_BASE(port) + 0x44c)
+#define ETH_PORT_STATUS_1_REG(port) (ETH_REG_BASE(port) + 0x450)
+#define ETH_PORT_MARVELL_HEADER_REG(port) (ETH_REG_BASE(port) + 0x454)
+#define ETH_PORT_FIFO_PARAMS_REG(port) (ETH_REG_BASE(port) + 0x458)
+#define ETH_MAX_TOKEN_BUCKET_SIZE_REG(port) (ETH_REG_BASE(port) + 0x45c)
+#define ETH_INTR_CAUSE_REG(port) (ETH_REG_BASE(port) + 0x460)
+#define ETH_INTR_CAUSE_EXT_REG(port) (ETH_REG_BASE(port) + 0x464)
+#define ETH_INTR_MASK_REG(port) (ETH_REG_BASE(port) + 0x468)
+#define ETH_INTR_MASK_EXT_REG(port) (ETH_REG_BASE(port) + 0x46c)
+#define ETH_TX_FIFO_URGENT_THRESH_REG(port) (ETH_REG_BASE(port) + 0x474)
+#define ETH_RX_MINIMAL_FRAME_SIZE_REG(port) (ETH_REG_BASE(port) + 0x47c)
+#define ETH_RX_DISCARD_PKTS_CNTR_REG(port) (ETH_REG_BASE(port) + 0x484)
+#define ETH_RX_OVERRUN_PKTS_CNTR_REG(port) (ETH_REG_BASE(port) + 0x488)
+#define ETH_INTERNAL_ADDR_ERROR_REG(port) (ETH_REG_BASE(port) + 0x494)
+#define ETH_TX_FIXED_PRIO_CFG_REG(port) (ETH_REG_BASE(port) + 0x4dc)
+#define ETH_TX_TOKEN_RATE_CFG_REG(port) (ETH_REG_BASE(port) + 0x4e0)
+#define ETH_TX_QUEUE_COMMAND1_REG(port) (ETH_REG_BASE(port) + 0x4e4)
+#define ETH_MAX_TRANSMIT_UNIT_REG(port) (ETH_REG_BASE(port) + 0x4e8)
+#define ETH_TX_TOKEN_BUCKET_SIZE_REG(port) (ETH_REG_BASE(port) + 0x4ec)
+#define ETH_TX_TOKEN_BUCKET_COUNT_REG(port) (ETH_REG_BASE(port) + 0x780)
+#define ETH_RX_DESCR_STAT_CMD_REG(port, q) (ETH_REG_BASE(port) + 0x600 + ((q)<<4))
+#define ETH_RX_BYTE_COUNT_REG(port, q) (ETH_REG_BASE(port) + 0x604 + ((q)<<4))
+#define ETH_RX_BUF_PTR_REG(port, q) (ETH_REG_BASE(port) + 0x608 + ((q)<<4))
+#define ETH_RX_CUR_DESC_PTR_REG(port, q) (ETH_REG_BASE(port) + 0x60c + ((q)<<4))
+#define ETH_TX_CUR_DESC_PTR_REG(port, q) (ETH_REG_BASE(port) + 0x6c0 + ((q)<<2))
+
+#define ETH_TXQ_TOKEN_COUNT_REG(port, q) (ETH_REG_BASE(port) + 0x700 + ((q)<<4))
+#define ETH_TXQ_TOKEN_CFG_REG(port, q) (ETH_REG_BASE(port) + 0x704 + ((q)<<4))
+#define ETH_TXQ_ARBITER_CFG_REG(port, q) (ETH_REG_BASE(port) + 0x708 + ((q)<<4))
+
+#if (MV_ETH_VERSION >= 4)
+#define ETH_TXQ_CMD_1_REG(port) (ETH_REG_BASE(port) + 0x4E4)
+#define ETH_EJP_TX_HI_IPG_REG(port) (ETH_REG_BASE(port) + 0x7A8)
+#define ETH_EJP_TX_LO_IPG_REG(port) (ETH_REG_BASE(port) + 0x7B8)
+#define ETH_EJP_HI_TKN_LO_PKT_REG(port) (ETH_REG_BASE(port) + 0x7C0)
+#define ETH_EJP_HI_TKN_ASYNC_PKT_REG(port) (ETH_REG_BASE(port) + 0x7C4)
+#define ETH_EJP_LO_TKN_ASYNC_PKT_REG(port) (ETH_REG_BASE(port) + 0x7C8)
+#define ETH_EJP_TX_SPEED_REG(port) (ETH_REG_BASE(port) + 0x7D0)
+#endif /* MV_ETH_VERSION >= 4 */
+
+#define ETH_MIB_COUNTERS_BASE(port) (ETH_REG_BASE(port) + 0x1000)
+#define ETH_DA_FILTER_SPEC_MCAST_BASE(port) (ETH_REG_BASE(port) + 0x1400)
+#define ETH_DA_FILTER_OTH_MCAST_BASE(port) (ETH_REG_BASE(port) + 0x1500)
+#define ETH_DA_FILTER_UCAST_BASE(port) (ETH_REG_BASE(port) + 0x1600)
+
+/* Phy address register definitions */
+#define ETH_PHY_ADDR_OFFS 0
+#define ETH_PHY_ADDR_MASK (0x1f <<ETH_PHY_ADDR_OFFS)
+
+/* MIB Counters register definitions */
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
+#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
+#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
+#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
+#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
+#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
+#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
+#define ETH_MIB_FRAMES_64_OCTETS 0x20
+#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
+#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
+#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
+#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
+#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
+#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
+#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
+#define ETH_MIB_GOOD_FRAMES_SENT 0x40
+#define ETH_MIB_EXCESSIVE_COLLISION 0x44
+#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
+#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
+#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
+#define ETH_MIB_FC_SENT 0x54
+#define ETH_MIB_GOOD_FC_RECEIVED 0x58
+#define ETH_MIB_BAD_FC_RECEIVED 0x5c
+#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
+#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
+#define ETH_MIB_OVERSIZE_RECEIVED 0x68
+#define ETH_MIB_JABBER_RECEIVED 0x6c
+#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
+#define ETH_MIB_BAD_CRC_EVENT 0x74
+#define ETH_MIB_COLLISION 0x78
+#define ETH_MIB_LATE_COLLISION 0x7c
+
+
+/****************************************/
+/* Ethernet Unit Register BITs */
+/****************************************/
+
+#define ETH_RXQ_ENABLE_OFFSET 0
+#define ETH_RXQ_ENABLE_MASK (0x000000FF << ETH_RXQ_ENABLE_OFFSET)
+
+#define ETH_RXQ_DISABLE_OFFSET 8
+#define ETH_RXQ_DISABLE_MASK (0x000000FF << ETH_RXQ_DISABLE_OFFSET)
+
+/***** BITs of Transmit Queue Command (TQC) register *****/
+#define ETH_TXQ_ENABLE_OFFSET 0
+#define ETH_TXQ_ENABLE_MASK (0x000000FF << ETH_TXQ_ENABLE_OFFSET)
+
+#define ETH_TXQ_DISABLE_OFFSET 8
+#define ETH_TXQ_DISABLE_MASK (0x000000FF << ETH_TXQ_DISABLE_OFFSET)
+
+#if (MV_ETH_VERSION >= 4)
+#define ETH_TX_EJP_RESET_BIT 0
+#define ETH_TX_EJP_RESET_MASK (1 << ETH_TX_EJP_RESET_BIT)
+
+#define ETH_TX_EJP_ENABLE_BIT 2
+#define ETH_TX_EJP_ENABLE_MASK (1 << ETH_TX_EJP_ENABLE_BIT)
+
+#define ETH_TX_LEGACY_WRR_BIT 3
+#define ETH_TX_LEGACY_WRR_MASK (1 << ETH_TX_LEGACY_WRR_BIT)
+#endif /* (MV_ETH_VERSION >= 4) */
+
+/***** BITs of Ethernet Port Status reg (PSR) *****/
+#define ETH_LINK_UP_BIT 1
+#define ETH_LINK_UP_MASK (1<<ETH_LINK_UP_BIT)
+
+#define ETH_FULL_DUPLEX_BIT 2
+#define ETH_FULL_DUPLEX_MASK (1<<ETH_FULL_DUPLEX_BIT)
+
+#define ETH_ENABLE_RCV_FLOW_CTRL_BIT 3
+#define ETH_ENABLE_RCV_FLOW_CTRL_MASK (1<<ETH_ENABLE_RCV_FLOW_CTRL_BIT)
+
+#define ETH_GMII_SPEED_1000_BIT 4
+#define ETH_GMII_SPEED_1000_MASK (1<<ETH_GMII_SPEED_1000_BIT)
+
+#define ETH_MII_SPEED_100_BIT 5
+#define ETH_MII_SPEED_100_MASK (1<<ETH_MII_SPEED_100_BIT)
+
+#define ETH_TX_IN_PROGRESS_BIT 7
+#define ETH_TX_IN_PROGRESS_MASK (1<<ETH_TX_IN_PROGRESS_BIT)
+
+#define ETH_TX_FIFO_EMPTY_BIT 10
+#define ETH_TX_FIFO_EMPTY_MASK (1<<ETH_TX_FIFO_EMPTY_BIT)
+
+/***** BITs of Ethernet Port Status 1 reg (PS1R) *****/
+#define ETH_AUTO_NEG_DONE_BIT 4
+#define ETH_AUTO_NEG_DONE_MASK (1<<ETH_AUTO_NEG_DONE_BIT)
+
+#define ETH_SERDES_PLL_LOCKED_BIT 6
+#define ETH_SERDES_PLL_LOCKED_MASK (1<<ETH_SERDES_PLL_LOCKED_BIT)
+
+/***** BITs of Port Configuration reg (PxCR) *****/
+#define ETH_UNICAST_PROMISCUOUS_MODE_BIT 0
+#define ETH_UNICAST_PROMISCUOUS_MODE_MASK (1<<ETH_UNICAST_PROMISCUOUS_MODE_BIT)
+
+#define ETH_DEF_RX_QUEUE_OFFSET 1
+#define ETH_DEF_RX_QUEUE_ALL_MASK (0x7<<ETH_DEF_RX_QUEUE_OFFSET)
+#define ETH_DEF_RX_QUEUE_MASK(queue) ((queue)<<ETH_DEF_RX_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_ARP_QUEUE_OFFSET 4
+#define ETH_DEF_RX_ARP_QUEUE_ALL_MASK (0x7<<ETH_DEF_RX_ARP_QUEUE_OFFSET)
+#define ETH_DEF_RX_ARP_QUEUE_MASK(queue) ((queue)<<ETH_DEF_RX_ARP_QUEUE_OFFSET)
+
+#define ETH_REJECT_NOT_IP_ARP_BCAST_BIT 7
+#define ETH_REJECT_NOT_IP_ARP_BCAST_MASK (1<<ETH_REJECT_NOT_IP_ARP_BCAST_BIT)
+
+#define ETH_REJECT_IP_BCAST_BIT 8
+#define ETH_REJECT_IP_BCAST_MASK (1<<ETH_REJECT_IP_BCAST_BIT)
+
+#define ETH_REJECT_ARP_BCAST_BIT 9
+#define ETH_REJECT_ARP_BCAST_MASK (1<<ETH_REJECT_ARP_BCAST_BIT)
+
+#define ETH_TX_NO_SET_ERROR_SUMMARY_BIT 12
+#define ETH_TX_NO_SET_ERROR_SUMMARY_MASK (1<<ETH_TX_NO_SET_ERROR_SUMMARY_BIT)
+
+#define ETH_CAPTURE_TCP_FRAMES_ENABLE_BIT 14
+#define ETH_CAPTURE_TCP_FRAMES_ENABLE_MASK (1<<ETH_CAPTURE_TCP_FRAMES_ENABLE_BIT)
+
+#define ETH_CAPTURE_UDP_FRAMES_ENABLE_BIT 15
+#define ETH_CAPTURE_UDP_FRAMES_ENABLE_MASK (1<<ETH_CAPTURE_UDP_FRAMES_ENABLE_BIT)
+
+#define ETH_DEF_RX_TCP_QUEUE_OFFSET 16
+#define ETH_DEF_RX_TCP_QUEUE_ALL_MASK (0x7<<ETH_DEF_RX_TCP_QUEUE_OFFSET)
+#define ETH_DEF_RX_TCP_QUEUE_MASK(queue) ((queue)<<ETH_DEF_RX_TCP_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_UDP_QUEUE_OFFSET 19
+#define ETH_DEF_RX_UDP_QUEUE_ALL_MASK (0x7<<ETH_DEF_RX_UDP_QUEUE_OFFSET)
+#define ETH_DEF_RX_UDP_QUEUE_MASK(queue) ((queue)<<ETH_DEF_RX_UDP_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_BPDU_QUEUE_OFFSET 22
+#define ETH_DEF_RX_BPDU_QUEUE_ALL_MASK (0x7<<ETH_DEF_RX_BPDU_QUEUE_OFFSET)
+#define ETH_DEF_RX_BPDU_QUEUE_MASK(queue) ((queue)<<ETH_DEF_RX_BPDU_QUEUE_OFFSET)
+
+#define ETH_RX_CHECKSUM_MODE_OFFSET 25
+#define ETH_RX_CHECKSUM_NO_PSEUDO_HDR (0<<ETH_RX_CHECKSUM_MODE_OFFSET)
+#define ETH_RX_CHECKSUM_WITH_PSEUDO_HDR (1<<ETH_RX_CHECKSUM_MODE_OFFSET)
+
+/***** BITs of Port Configuration Extend reg (PxCXR) *****/
+#define ETH_CAPTURE_SPAN_BPDU_ENABLE_BIT 1
+#define ETH_CAPTURE_SPAN_BPDU_ENABLE_MASK (1<<ETH_CAPTURE_SPAN_BPDU_ENABLE_BIT)
+
+#define ETH_TX_DISABLE_GEN_CRC_BIT 3
+#define ETH_TX_DISABLE_GEN_CRC_MASK (1<<ETH_TX_DISABLE_GEN_CRC_BIT)
+
+/***** BITs of Tx/Rx queue command reg (RQCR/TQCR) *****/
+#define ETH_QUEUE_ENABLE_OFFSET 0
+#define ETH_QUEUE_ENABLE_ALL_MASK (0xFF<<ETH_QUEUE_ENABLE_OFFSET)
+#define ETH_QUEUE_ENABLE_MASK(queue) (1<<((queue)+ETH_QUEUE_ENABLE_OFFSET))
+
+#define ETH_QUEUE_DISABLE_OFFSET 8
+#define ETH_QUEUE_DISABLE_ALL_MASK (0xFF<<ETH_QUEUE_DISABLE_OFFSET)
+#define ETH_QUEUE_DISABLE_MASK(queue) (1<<((queue)+ETH_QUEUE_DISABLE_OFFSET))
+
+
+/***** BITs of Port Sdma Configuration reg (SDCR) *****/
+#define ETH_RX_FRAME_INTERRUPT_BIT 0
+#define ETH_RX_FRAME_INTERRUPT_MASK (1<<ETH_RX_FRAME_INTERRUPT_BIT)
+
+#define ETH_BURST_SIZE_1_64BIT_VALUE 0
+#define ETH_BURST_SIZE_2_64BIT_VALUE 1
+#define ETH_BURST_SIZE_4_64BIT_VALUE 2
+#define ETH_BURST_SIZE_8_64BIT_VALUE 3
+#define ETH_BURST_SIZE_16_64BIT_VALUE 4
+
+#define ETH_RX_BURST_SIZE_OFFSET 1
+#define ETH_RX_BURST_SIZE_ALL_MASK (0x7<<ETH_RX_BURST_SIZE_OFFSET)
+#define ETH_RX_BURST_SIZE_MASK(burst) ((burst)<<ETH_RX_BURST_SIZE_OFFSET)
+
+#define ETH_RX_NO_DATA_SWAP_BIT 4
+#define ETH_RX_NO_DATA_SWAP_MASK (1<<ETH_RX_NO_DATA_SWAP_BIT)
+#define ETH_RX_DATA_SWAP_MASK (0<<ETH_RX_NO_DATA_SWAP_BIT)
+
+#define ETH_TX_NO_DATA_SWAP_BIT 5
+#define ETH_TX_NO_DATA_SWAP_MASK (1<<ETH_TX_NO_DATA_SWAP_BIT)
+#define ETH_TX_DATA_SWAP_MASK (0<<ETH_TX_NO_DATA_SWAP_BIT)
+
+#define ETH_DESC_SWAP_BIT 6
+#define ETH_DESC_SWAP_MASK (1<<ETH_DESC_SWAP_BIT)
+#define ETH_NO_DESC_SWAP_MASK (0<<ETH_DESC_SWAP_BIT)
+
+#define ETH_RX_INTR_COAL_OFFSET 7
+#define ETH_RX_INTR_COAL_ALL_MASK (0x3fff<<ETH_RX_INTR_COAL_OFFSET)
+#define ETH_RX_INTR_COAL_MASK(value) (((value)<<ETH_RX_INTR_COAL_OFFSET) \
+ & ETH_RX_INTR_COAL_ALL_MASK)
+
+#define ETH_TX_BURST_SIZE_OFFSET 22
+#define ETH_TX_BURST_SIZE_ALL_MASK (0x7<<ETH_TX_BURST_SIZE_OFFSET)
+#define ETH_TX_BURST_SIZE_MASK(burst) ((burst)<<ETH_TX_BURST_SIZE_OFFSET)
+
+#define ETH_RX_INTR_COAL_MSB_BIT 25
+#define ETH_RX_INTR_COAL_MSB_MASK (1<<ETH_RX_INTR_COAL_MSB_BIT)
+
+/* BITs Port #x Tx FIFO Urgent Threshold (PxTFUT) */
+#define ETH_TX_INTR_COAL_OFFSET 4
+#define ETH_TX_INTR_COAL_ALL_MASK (0x3fff << ETH_TX_INTR_COAL_OFFSET)
+#define ETH_TX_INTR_COAL_MASK(value) (((value) << ETH_TX_INTR_COAL_OFFSET) \
+ & ETH_TX_INTR_COAL_ALL_MASK)
+
+/* BITs of Port Serial Control reg (PSCR) */
+#define ETH_PORT_ENABLE_BIT 0
+#define ETH_PORT_ENABLE_MASK (1<<ETH_PORT_ENABLE_BIT)
+
+#define ETH_FORCE_LINK_PASS_BIT 1
+#define ETH_FORCE_LINK_PASS_MASK (1<<ETH_FORCE_LINK_PASS_BIT)
+
+#define ETH_DISABLE_DUPLEX_AUTO_NEG_BIT 2
+#define ETH_DISABLE_DUPLEX_AUTO_NEG_MASK (1<<ETH_DISABLE_DUPLEX_AUTO_NEG_BIT)
+
+#define ETH_DISABLE_FC_AUTO_NEG_BIT 3
+#define ETH_DISABLE_FC_AUTO_NEG_MASK (1<<ETH_DISABLE_FC_AUTO_NEG_BIT)
+
+#define ETH_ADVERTISE_SYM_FC_BIT 4
+#define ETH_ADVERTISE_SYM_FC_MASK (1<<ETH_ADVERTISE_SYM_FC_BIT)
+
+#define ETH_TX_FC_MODE_OFFSET 5
+#define ETH_TX_FC_MODE_MASK (3<<ETH_TX_FC_MODE_OFFSET)
+#define ETH_TX_FC_NO_PAUSE (0<<ETH_TX_FC_MODE_OFFSET)
+#define ETH_TX_FC_SEND_PAUSE (1<<ETH_TX_FC_MODE_OFFSET)
+
+#define ETH_TX_BP_MODE_OFFSET 7
+#define ETH_TX_BP_MODE_MASK (3<<ETH_TX_BP_MODE_OFFSET)
+#define ETH_TX_BP_NO_JAM (0<<ETH_TX_BP_MODE_OFFSET)
+#define ETH_TX_BP_SEND_JAM (1<<ETH_TX_BP_MODE_OFFSET)
+
+#define ETH_DO_NOT_FORCE_LINK_FAIL_BIT 10
+#define ETH_DO_NOT_FORCE_LINK_FAIL_MASK (1<<ETH_DO_NOT_FORCE_LINK_FAIL_BIT)
+
+#define ETH_RETRANSMIT_FOREVER_BIT 11
+#define ETH_RETRANSMIT_FOREVER_MASK (1<<ETH_RETRANSMIT_FOREVER_BIT)
+
+#define ETH_DISABLE_SPEED_AUTO_NEG_BIT 13
+#define ETH_DISABLE_SPEED_AUTO_NEG_MASK (1<<ETH_DISABLE_SPEED_AUTO_NEG_BIT)
+
+#define ETH_DTE_ADVERT_BIT 14
+#define ETH_DTE_ADVERT_MASK (1<<ETH_DTE_ADVERT_BIT)
+
+#define ETH_MII_PHY_MODE_BIT 15
+#define ETH_MII_PHY_MODE_MAC (0<<ETH_MII_PHY_MODE_BIT)
+#define ETH_MII_PHY_MODE_PHY (1<<ETH_MII_PHY_MODE_BIT)
+
+#define ETH_MII_SOURCE_SYNCH_BIT 16
+#define ETH_MII_STANDARD_SYNCH (0<<ETH_MII_SOURCE_SYNCH_BIT)
+#define ETH_MII_400Mbps_SYNCH (1<<ETH_MII_SOURCE_CLK_BIT)
+
+#define ETH_MAX_RX_PACKET_SIZE_OFFSET 17
+#define ETH_MAX_RX_PACKET_SIZE_MASK (7<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1518BYTE (0<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1522BYTE (1<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1552BYTE (2<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9022BYTE (3<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9192BYTE (4<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9700BYTE (5<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+
+#define ETH_SET_FULL_DUPLEX_BIT 21
+#define ETH_SET_FULL_DUPLEX_MASK (1<<ETH_SET_FULL_DUPLEX_BIT)
+
+#define ETH_SET_FLOW_CTRL_BIT 22
+#define ETH_SET_FLOW_CTRL_MASK (1<<ETH_SET_FLOW_CTRL_BIT)
+
+#define ETH_SET_GMII_SPEED_1000_BIT 23
+#define ETH_SET_GMII_SPEED_1000_MASK (1<<ETH_SET_GMII_SPEED_1000_BIT)
+
+#define ETH_SET_MII_SPEED_100_BIT 24
+#define ETH_SET_MII_SPEED_100_MASK (1<<ETH_SET_MII_SPEED_100_BIT)
+
+/* BITs of Port Serial Control 1 reg (PSC1R) */
+#define ETH_PSC_ENABLE_BIT 2
+#define ETH_PSC_ENABLE_MASK (1<<ETH_PSC_ENABLE_BIT)
+
+#define ETH_RGMII_ENABLE_BIT 3
+#define ETH_RGMII_ENABLE_MASK (1<<ETH_RGMII_ENABLE_BIT)
+
+#define ETH_PORT_RESET_BIT 4
+#define ETH_PORT_RESET_MASK (1<<ETH_PORT_RESET_BIT)
+
+#define ETH_INBAND_AUTO_NEG_ENABLE_BIT 6
+#define ETH_INBAND_AUTO_NEG_ENABLE_MASK (1<<ETH_INBAND_AUTO_NEG_ENABLE_BIT)
+
+#define ETH_INBAND_AUTO_NEG_BYPASS_BIT 7
+#define ETH_INBAND_AUTO_NEG_BYPASS_MASK (1<<ETH_INBAND_AUTO_NEG_BYPASS_BIT)
+
+#define ETH_INBAND_AUTO_NEG_START_BIT 8
+#define ETH_INBAND_AUTO_NEG_START_MASK (1<<ETH_INBAND_AUTO_NEG_START_BIT)
+
+#define ETH_PORT_TYPE_BIT 11
+#define ETH_PORT_TYPE_1000BasedX_MASK (1<<ETH_PORT_TYPE_BIT)
+
+#define ETH_SGMII_MODE_BIT 12
+#define ETH_1000BaseX_MODE_MASK (0<<ETH_SGMII_MODE_BIT)
+#define ETH_SGMII_MODE_MASK (1<<ETH_SGMII_MODE_BIT)
+
+#define ETH_MGMII_MODE_BIT 13
+
+#define ETH_EN_MII_ODD_PRE_BIT 22
+#define ETH_EN_MII_ODD_PRE_MASK (1<<ETH_EN_MII_ODD_PRE_BIT)
+
+/* BITs of SDMA Descriptor Command/Status field */
+#if defined(MV_CPU_BE)
+typedef struct _ethRxDesc
+{
+ MV_U16 byteCnt ; /* Descriptor buffer byte count */
+ MV_U16 bufSize ; /* Buffer size */
+ MV_U32 cmdSts ; /* Descriptor command status */
+ MV_U32 nextDescPtr; /* Next descriptor pointer */
+ MV_U32 bufPtr ; /* Descriptor buffer pointer */
+ MV_ULONG returnInfo ; /* User resource return information */
+} ETH_RX_DESC;
+
+typedef struct _ethTxDesc
+{
+ MV_U16 byteCnt ; /* Descriptor buffer byte count */
+ MV_U16 L4iChk ; /* CPU provided TCP Checksum */
+ MV_U32 cmdSts ; /* Descriptor command status */
+ MV_U32 nextDescPtr; /* Next descriptor pointer */
+ MV_U32 bufPtr ; /* Descriptor buffer pointer */
+ MV_ULONG returnInfo ; /* User resource return information */
+ MV_U8* alignBufPtr; /* Pointer to 8 byte aligned buffer */
+} ETH_TX_DESC;
+
+#elif defined(MV_CPU_LE)
+
+typedef struct _ethRxDesc
+{
+ MV_U32 cmdSts ; /* Descriptor command status */
+ MV_U16 bufSize ; /* Buffer size */
+ MV_U16 byteCnt ; /* Descriptor buffer byte count */
+ MV_U32 bufPtr ; /* Descriptor buffer pointer */
+ MV_U32 nextDescPtr; /* Next descriptor pointer */
+ MV_ULONG returnInfo ; /* User resource return information */
+} ETH_RX_DESC;
+
+typedef struct _ethTxDesc
+{
+ MV_U32 cmdSts ; /* Descriptor command status */
+ MV_U16 L4iChk ; /* CPU provided TCP Checksum */
+ MV_U16 byteCnt ; /* Descriptor buffer byte count */
+ MV_U32 bufPtr ; /* Descriptor buffer pointer */
+ MV_U32 nextDescPtr; /* Next descriptor pointer */
+ MV_ULONG returnInfo ; /* User resource return information */
+ MV_U8* alignBufPtr; /* Pointer to 32 byte aligned buffer */
+} ETH_TX_DESC;
+
+#else
+#error "MV_CPU_BE or MV_CPU_LE must be defined"
+#endif /* MV_CPU_BE || MV_CPU_LE */
+
+/* Buffer offset from buffer pointer */
+#define ETH_RX_BUF_OFFSET 0x2
+
+
+/* Tx & Rx descriptor bits */
+#define ETH_ERROR_SUMMARY_BIT 0
+#define ETH_ERROR_SUMMARY_MASK (1<<ETH_ERROR_SUMMARY_BIT)
+
+#define ETH_BUFFER_OWNER_BIT 31
+#define ETH_BUFFER_OWNED_BY_DMA (1<<ETH_BUFFER_OWNER_BIT)
+#define ETH_BUFFER_OWNED_BY_HOST (0<<ETH_BUFFER_OWNER_BIT)
+
+/* Tx descriptor bits */
+#define ETH_TX_ERROR_CODE_OFFSET 1
+#define ETH_TX_ERROR_CODE_MASK (3<<ETH_TX_ERROR_CODE_OFFSET)
+#define ETH_TX_LATE_COLLISION_ERROR (0<<ETH_TX_ERROR_CODE_OFFSET)
+#define ETH_TX_UNDERRUN_ERROR (1<<ETH_TX_ERROR_CODE_OFFSET)
+#define ETH_TX_EXCESSIVE_COLLISION_ERROR (2<<ETH_TX_ERROR_CODE_OFFSET)
+
+#define ETH_TX_LLC_SNAP_FORMAT_BIT 9
+#define ETH_TX_LLC_SNAP_FORMAT_MASK (1<<ETH_TX_LLC_SNAP_FORMAT_BIT)
+
+#define ETH_TX_IP_FRAG_BIT 10
+#define ETH_TX_IP_FRAG_MASK (1<<ETH_TX_IP_FRAG_BIT)
+#define ETH_TX_IP_FRAG (0<<ETH_TX_IP_FRAG_BIT)
+#define ETH_TX_IP_NO_FRAG (1<<ETH_TX_IP_FRAG_BIT)
+
+#define ETH_TX_IP_HEADER_LEN_OFFSET 11
+#define ETH_TX_IP_HEADER_LEN_ALL_MASK (0xF<<ETH_TX_IP_HEADER_LEN_OFFSET)
+#define ETH_TX_IP_HEADER_LEN_MASK(len) ((len)<<ETH_TX_IP_HEADER_LEN_OFFSET)
+
+#define ETH_TX_VLAN_TAGGED_FRAME_BIT 15
+#define ETH_TX_VLAN_TAGGED_FRAME_MASK (1<<ETH_TX_VLAN_TAGGED_FRAME_BIT)
+
+#define ETH_TX_L4_TYPE_BIT 16
+#define ETH_TX_L4_TCP_TYPE (0<<ETH_TX_L4_TYPE_BIT)
+#define ETH_TX_L4_UDP_TYPE (1<<ETH_TX_L4_TYPE_BIT)
+
+#define ETH_TX_GENERATE_L4_CHKSUM_BIT 17
+#define ETH_TX_GENERATE_L4_CHKSUM_MASK (1<<ETH_TX_GENERATE_L4_CHKSUM_BIT)
+
+#define ETH_TX_GENERATE_IP_CHKSUM_BIT 18
+#define ETH_TX_GENERATE_IP_CHKSUM_MASK (1<<ETH_TX_GENERATE_IP_CHKSUM_BIT)
+
+#define ETH_TX_ZERO_PADDING_BIT 19
+#define ETH_TX_ZERO_PADDING_MASK (1<<ETH_TX_ZERO_PADDING_BIT)
+
+#define ETH_TX_LAST_DESC_BIT 20
+#define ETH_TX_LAST_DESC_MASK (1<<ETH_TX_LAST_DESC_BIT)
+
+#define ETH_TX_FIRST_DESC_BIT 21
+#define ETH_TX_FIRST_DESC_MASK (1<<ETH_TX_FIRST_DESC_BIT)
+
+#define ETH_TX_GENERATE_CRC_BIT 22
+#define ETH_TX_GENERATE_CRC_MASK (1<<ETH_TX_GENERATE_CRC_BIT)
+
+#define ETH_TX_ENABLE_INTERRUPT_BIT 23
+#define ETH_TX_ENABLE_INTERRUPT_MASK (1<<ETH_TX_ENABLE_INTERRUPT_BIT)
+
+#define ETH_TX_AUTO_MODE_BIT 30
+#define ETH_TX_AUTO_MODE_MASK (1<<ETH_TX_AUTO_MODE_BIT)
+
+
+/* Rx descriptor bits */
+#define ETH_RX_ERROR_CODE_OFFSET 1
+#define ETH_RX_ERROR_CODE_MASK (3<<ETH_RX_ERROR_CODE_OFFSET)
+#define ETH_RX_CRC_ERROR (0<<ETH_RX_ERROR_CODE_OFFSET)
+#define ETH_RX_OVERRUN_ERROR (1<<ETH_RX_ERROR_CODE_OFFSET)
+#define ETH_RX_MAX_FRAME_LEN_ERROR (2<<ETH_RX_ERROR_CODE_OFFSET)
+#define ETH_RX_RESOURCE_ERROR (3<<ETH_RX_ERROR_CODE_OFFSET)
+
+#define ETH_RX_L4_CHECKSUM_OFFSET 3
+#define ETH_RX_L4_CHECKSUM_MASK (0xffff<<ETH_RX_L4_CHECKSUM_OFFSET)
+
+#define ETH_RX_VLAN_TAGGED_FRAME_BIT 19
+#define ETH_RX_VLAN_TAGGED_FRAME_MASK (1<<ETH_RX_VLAN_TAGGED_FRAME_BIT)
+
+#define ETH_RX_BPDU_FRAME_BIT 20
+#define ETH_RX_BPDU_FRAME_MASK (1<<ETH_RX_BPDU_FRAME_BIT)
+
+#define ETH_RX_L4_TYPE_OFFSET 21
+#define ETH_RX_L4_TYPE_MASK (3<<ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_TCP_TYPE (0<<ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_UDP_TYPE (1<<ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_OTHER_TYPE (2<<ETH_RX_L4_TYPE_OFFSET)
+
+#define ETH_RX_NOT_LLC_SNAP_FORMAT_BIT 23
+#define ETH_RX_NOT_LLC_SNAP_FORMAT_MASK (1<<ETH_RX_NOT_LLC_SNAP_FORMAT_BIT)
+
+#define ETH_RX_IP_FRAME_TYPE_BIT 24
+#define ETH_RX_IP_FRAME_TYPE_MASK (1<<ETH_RX_IP_FRAME_TYPE_BIT)
+
+#define ETH_RX_IP_HEADER_OK_BIT 25
+#define ETH_RX_IP_HEADER_OK_MASK (1<<ETH_RX_IP_HEADER_OK_BIT)
+
+#define ETH_RX_LAST_DESC_BIT 26
+#define ETH_RX_LAST_DESC_MASK (1<<ETH_RX_LAST_DESC_BIT)
+
+#define ETH_RX_FIRST_DESC_BIT 27
+#define ETH_RX_FIRST_DESC_MASK (1<<ETH_RX_FIRST_DESC_BIT)
+
+#define ETH_RX_UNKNOWN_DA_BIT 28
+#define ETH_RX_UNKNOWN_DA_MASK (1<<ETH_RX_UNKNOWN_DA_BIT)
+
+#define ETH_RX_ENABLE_INTERRUPT_BIT 29
+#define ETH_RX_ENABLE_INTERRUPT_MASK (1<<ETH_RX_ENABLE_INTERRUPT_BIT)
+
+#define ETH_RX_L4_CHECKSUM_OK_BIT 30
+#define ETH_RX_L4_CHECKSUM_OK_MASK (1<<ETH_RX_L4_CHECKSUM_OK_BIT)
+
+/* Rx descriptor bufSize field */
+#define ETH_RX_IP_FRAGMENTED_FRAME_BIT 2
+#define ETH_RX_IP_FRAGMENTED_FRAME_MASK (1<<ETH_RX_IP_FRAGMENTED_FRAME_BIT)
+
+#define ETH_RX_BUFFER_MASK 0xFFF8
+
+
+/* Ethernet Cause Register BITs */
+#define ETH_CAUSE_RX_READY_SUM_BIT 0
+#define ETH_CAUSE_EXTEND_BIT 1
+
+#define ETH_CAUSE_RX_READY_OFFSET 2
+#define ETH_CAUSE_RX_READY_BIT(queue) (ETH_CAUSE_RX_READY_OFFSET + (queue))
+#define ETH_CAUSE_RX_READY_MASK(queue) (1 << (ETH_CAUSE_RX_READY_BIT(queue)))
+
+#define ETH_CAUSE_RX_ERROR_SUM_BIT 10
+#define ETH_CAUSE_RX_ERROR_OFFSET 11
+#define ETH_CAUSE_RX_ERROR_BIT(queue) (ETH_CAUSE_RX_ERROR_OFFSET + (queue))
+#define ETH_CAUSE_RX_ERROR_MASK(queue) (1 << (ETH_CAUSE_RX_ERROR_BIT(queue)))
+
+#define ETH_CAUSE_TX_END_BIT 19
+#define ETH_CAUSE_SUM_BIT 31
+
+/* Ethernet Cause Extended Register BITs */
+#define ETH_CAUSE_TX_BUF_OFFSET 0
+#define ETH_CAUSE_TX_BUF_BIT(queue) (ETH_CAUSE_TX_BUF_OFFSET + (queue))
+#define ETH_CAUSE_TX_BUF_MASK(queue) (1 << (ETH_CAUSE_TX_BUF_BIT(queue)))
+
+#define ETH_CAUSE_TX_ERROR_OFFSET 8
+#define ETH_CAUSE_TX_ERROR_BIT(queue) (ETH_CAUSE_TX_ERROR_OFFSET + (queue))
+#define ETH_CAUSE_TX_ERROR_MASK(queue) (1 << (ETH_CAUSE_TX_ERROR_BIT(queue)))
+
+#define ETH_CAUSE_PHY_STATUS_CHANGE_BIT 16
+#define ETH_CAUSE_RX_OVERRUN_BIT 18
+#define ETH_CAUSE_TX_UNDERRUN_BIT 19
+#define ETH_CAUSE_LINK_STATE_CHANGE_BIT 20
+#define ETH_CAUSE_INTERNAL_ADDR_ERR_BIT 23
+#define ETH_CAUSE_EXTEND_SUM_BIT 31
+
+/* Marvell Header Register */
+/* Marvell Header register bits */
+#define ETH_MVHDR_EN_BIT 0
+#define ETH_MVHDR_EN_MASK (1 << ETH_MVHDR_EN_BIT)
+
+#define ETH_MVHDR_DAPREFIX_BIT 1
+#define ETH_MVHDR_DAPREFIX_MASK (0x3 << ETH_MVHDR_DAPREFIX_BIT)
+#define ETH_MVHDR_DAPREFIX_PRI_1_2 (0x1 << ETH_MVHDR_DAPREFIX_BIT)
+#define ETH_MVHDR_DAPREFIX_DBNUM_PRI (0x2 << ETH_MVHDR_DAPREFIX_BIT)
+#define ETH_MVHDR_DAPREFIX_SPID_PRI (0x3 << ETH_MVHDR_DAPREFIX_BIT)
+
+#define ETH_MVHDR_MHMASK_BIT 8
+#define ETH_MVHDR_MHMASK_MASK (0x3 << ETH_MVHDR_MHMASK_BIT)
+#define ETH_MVHDR_MHMASK_8_QUEUE (0x0 << ETH_MVHDR_MHMASK_BIT)
+#define ETH_MVHDR_MHMASK_4_QUEUE (0x1 << ETH_MVHDR_MHMASK_BIT)
+#define ETH_MVHDR_MHMASK_2_QUEUE (0x3 << ETH_MVHDR_MHMASK_BIT)
+
+
+/* Relevant for 6183 ONLY */
+#define ETH_UNIT_PORTS_PADS_CALIB_0_REG (MV_ETH_REG_BASE(0) + 0x0A0)
+#define ETH_UNIT_PORTS_PADS_CALIB_1_REG (MV_ETH_REG_BASE(0) + 0x0A4)
+#define ETH_UNIT_PORTS_PADS_CALIB_2_REG (MV_ETH_REG_BASE(0) + 0x0A8)
+/* Ethernet Unit Ports Pads Calibration_REG (ETH_UNIT_PORTS_PADS_CALIB_x_REG) */
+#define ETH_ETHERNET_PAD_CLIB_DRVN_OFFS 0
+#define ETH_ETHERNET_PAD_CLIB_DRVN_MASK (0x1F << ETH_ETHERNET_PAD_CLIB_DRVN_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_DRVP_OFFS 5
+#define ETH_ETHERNET_PAD_CLIB_DRVP_MASK (0x1F << ETH_ETHERNET_PAD_CLIB_DRVP_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_TUNEEN_OFFS 16
+#define ETH_ETHERNET_PAD_CLIB_TUNEEN_MASK (0x1 << ETH_ETHERNET_PAD_CLIB_TUNEEN_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_LOCKN_OFFS 17
+#define ETH_ETHERNET_PAD_CLIB_LOCKN_MASK (0x1F << ETH_ETHERNET_PAD_CLIB_LOCKN_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_OFFST_OFFS 24
+#define ETH_ETHERNET_PAD_CLIB_OFFST_MASK (0x1F << ETH_ETHERNET_PAD_CLIB_OFFST_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_WR_EN_OFFS 31
+#define ETH_ETHERNET_PAD_CLIB_WR_EN_MASK (0x1 << ETH_ETHERNET_PAD_CLIB_WR_EN_OFFS)
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvEthRegsh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvCompVer.txt
new file mode 100644
index 000000000..38a926440
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.4
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h
new file mode 100644
index 000000000..c42ed1a65
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h
@@ -0,0 +1,356 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvEth.h - Header File for : Ethernet Controller
+*
+* DESCRIPTION:
+* This header file contains macros typedefs and function declaration for
+* Marvell Gigabit Ethernet Controllers.
+*
+* DEPENDENCIES:
+* None.
+*
+*******************************************************************************/
+
+#ifndef __mvEth_h__
+#define __mvEth_h__
+
+/* includes */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+#include "eth/gbe/mvEthRegs.h"
+#include "mvSysHwConfig.h"
+
+/* defines */
+
+#define MV_ETH_EXTRA_FRAGS_NUM 2
+
+
+typedef enum
+{
+ MV_ETH_SPEED_AN,
+ MV_ETH_SPEED_10,
+ MV_ETH_SPEED_100,
+ MV_ETH_SPEED_1000
+
+} MV_ETH_PORT_SPEED;
+
+typedef enum
+{
+ MV_ETH_DUPLEX_AN,
+ MV_ETH_DUPLEX_HALF,
+ MV_ETH_DUPLEX_FULL
+
+} MV_ETH_PORT_DUPLEX;
+
+typedef enum
+{
+ MV_ETH_FC_AN_ADV_DIS,
+ MV_ETH_FC_AN_ADV_SYM,
+ MV_ETH_FC_DISABLE,
+ MV_ETH_FC_ENABLE
+
+} MV_ETH_PORT_FC;
+
+typedef enum
+{
+ MV_ETH_PRIO_FIXED = 0, /* Fixed priority mode */
+ MV_ETH_PRIO_WRR = 1 /* Weighted round robin priority mode */
+} MV_ETH_PRIO_MODE;
+
+/* Ethernet port specific infomation */
+typedef struct
+{
+ int maxRxPktSize;
+ int rxDefQ;
+ int rxBpduQ;
+ int rxArpQ;
+ int rxTcpQ;
+ int rxUdpQ;
+ int ejpMode;
+} MV_ETH_PORT_CFG;
+
+typedef struct
+{
+ int descrNum;
+} MV_ETH_RX_Q_CFG;
+
+typedef struct
+{
+ int descrNum;
+ MV_ETH_PRIO_MODE prioMode;
+ int quota;
+} MV_ETH_TX_Q_CFG;
+
+typedef struct
+{
+ int maxRxPktSize;
+ int rxDefQ;
+ int txDescrNum[MV_ETH_TX_Q_NUM];
+ int rxDescrNum[MV_ETH_RX_Q_NUM];
+ void *osHandle;
+} MV_ETH_PORT_INIT;
+
+typedef struct
+{
+ MV_BOOL isLinkUp;
+ MV_ETH_PORT_SPEED speed;
+ MV_ETH_PORT_DUPLEX duplex;
+ MV_ETH_PORT_FC flowControl;
+
+} MV_ETH_PORT_STATUS;
+
+typedef enum
+{
+ MV_ETH_DISABLE_HEADER_MODE = 0,
+ MV_ETH_ENABLE_HEADER_MODE_PRI_2_1 = 1,
+ MV_ETH_ENABLE_HEADER_MODE_PRI_DBNUM = 2,
+ MV_ETH_ENABLE_HEADER_MODE_PRI_SPID = 3
+} MV_ETH_HEADER_MODE;
+
+
+/* ethernet.h API list */
+void mvEthHalInit(void);
+void mvEthMemAttrGet(MV_BOOL* pIsSram, MV_BOOL* pIsSwCoher);
+
+/* Port Initalization routines */
+void* mvEthPortInit (int port, MV_ETH_PORT_INIT *pPortInit);
+void ethResetTxDescRing(void* pPortHndl, int queue);
+void ethResetRxDescRing(void* pPortHndl, int queue);
+
+void* mvEthPortHndlGet(int port);
+
+void mvEthPortFinish(void* pEthPortHndl);
+MV_STATUS mvEthPortDown(void* pEthPortHndl);
+MV_STATUS mvEthPortDisable(void* pEthPortHndl);
+MV_STATUS mvEthPortUp(void* pEthPortHndl);
+MV_STATUS mvEthPortEnable(void* pEthPortHndl);
+
+/* Port data flow routines */
+MV_PKT_INFO *mvEthPortForceTxDone(void* pEthPortHndl, int txQueue);
+MV_PKT_INFO *mvEthPortForceRx(void* pEthPortHndl, int rxQueue);
+
+/* Port Configuration routines */
+MV_STATUS mvEthDefaultsSet(void* pEthPortHndl);
+MV_STATUS mvEthMaxRxSizeSet(void* pPortHndl, int maxRxSize);
+
+/* Port RX MAC Filtering control routines */
+MV_U8 mvEthMcastCrc8Get(MV_U8* pAddr);
+MV_STATUS mvEthRxFilterModeSet(void* pPortHndl, MV_BOOL isPromisc);
+MV_STATUS mvEthMacAddrSet(void* pPortHandle, MV_U8* pMacAddr, int queue);
+MV_STATUS mvEthMcastAddrSet(void* pPortHandle, MV_U8 *pAddr, int queue);
+
+/* MIB Counters APIs */
+MV_U32 mvEthMibCounterRead(void* pPortHndl, unsigned int mibOffset,
+ MV_U32* pHigh32);
+void mvEthMibCountersClear(void* pPortHandle);
+
+/* TX Scheduling configuration routines */
+MV_STATUS mvEthTxQueueConfig(void* pPortHandle, int txQueue,
+ MV_ETH_PRIO_MODE txPrioMode, int txQuota);
+
+/* RX Dispatching configuration routines */
+MV_STATUS mvEthBpduRxQueue(void* pPortHandle, int bpduQueue);
+MV_STATUS mvEthVlanPrioRxQueue(void* pPortHandle, int vlanPrio, int vlanPrioQueue);
+MV_STATUS mvEthTosToRxqSet(void* pPortHandle, int tos, int rxq);
+int mvEthTosToRxqGet(void* pPortHandle, int tos);
+
+/* Speed, Duplex, FlowControl routines */
+MV_STATUS mvEthSpeedDuplexSet(void* pPortHandle, MV_ETH_PORT_SPEED speed,
+ MV_ETH_PORT_DUPLEX duplex);
+
+MV_STATUS mvEthFlowCtrlSet(void* pPortHandle, MV_ETH_PORT_FC flowControl);
+
+#if (MV_ETH_VERSION >= 4)
+MV_STATUS mvEthEjpModeSet(void* pPortHandle, int mode);
+#endif /* (MV_ETH_VERSION >= 4) */
+
+void mvEthStatusGet(void* pPortHandle, MV_ETH_PORT_STATUS* pStatus);
+
+/* Marvell Header control */
+MV_STATUS mvEthHeaderModeSet(void* pPortHandle, MV_ETH_HEADER_MODE headerMode);
+
+/* PHY routines */
+void mvEthPhyAddrSet(void* pPortHandle, int phyAddr);
+int mvEthPhyAddrGet(void* pPortHandle);
+
+/* Power management routines */
+void mvEthPortPowerDown(int port);
+void mvEthPortPowerUp(int port);
+
+/******************** ETH PRIVATE ************************/
+
+/*#define UNCACHED_TX_BUFFERS*/
+/*#define UNCACHED_RX_BUFFERS*/
+
+
+/* Port attributes */
+/* Size of a Tx/Rx descriptor used in chain list data structure */
+#define ETH_RX_DESC_ALIGNED_SIZE 32
+#define ETH_TX_DESC_ALIGNED_SIZE 32
+
+#define TX_DISABLE_TIMEOUT_MSEC 1000
+#define RX_DISABLE_TIMEOUT_MSEC 1000
+#define TX_FIFO_EMPTY_TIMEOUT_MSEC 10000
+#define PORT_DISABLE_WAIT_TCLOCKS 5000
+
+/* Macros that save access to desc in order to find next desc pointer */
+#define RX_NEXT_DESC_PTR(pRxDescr, pQueueCtrl) \
+ ((pRxDescr) == (pQueueCtrl)->pLastDescr) ? \
+ (ETH_RX_DESC*)((pQueueCtrl)->pFirstDescr) : \
+ (ETH_RX_DESC*)(((MV_ULONG)(pRxDescr)) + ETH_RX_DESC_ALIGNED_SIZE)
+
+#define TX_NEXT_DESC_PTR(pTxDescr, pQueueCtrl) \
+ ((pTxDescr) == (pQueueCtrl)->pLastDescr) ? \
+ (ETH_TX_DESC*)((pQueueCtrl)->pFirstDescr) : \
+ (ETH_TX_DESC*)(((MV_ULONG)(pTxDescr)) + ETH_TX_DESC_ALIGNED_SIZE)
+
+#define RX_PREV_DESC_PTR(pRxDescr, pQueueCtrl) \
+ ((pRxDescr) == (pQueueCtrl)->pFirstDescr) ? \
+ (ETH_RX_DESC*)((pQueueCtrl)->pLastDescr) : \
+ (ETH_RX_DESC*)(((MV_ULONG)(pRxDescr)) - ETH_RX_DESC_ALIGNED_SIZE)
+
+#define TX_PREV_DESC_PTR(pTxDescr, pQueueCtrl) \
+ ((pTxDescr) == (pQueueCtrl)->pFirstDescr) ? \
+ (ETH_TX_DESC*)((pQueueCtrl)->pLastDescr) : \
+ (ETH_TX_DESC*)(((MV_ULONG)(pTxDescr)) - ETH_TX_DESC_ALIGNED_SIZE)
+
+
+/* Queue specific information */
+typedef struct
+{
+ void* pFirstDescr;
+ void* pLastDescr;
+ void* pCurrentDescr;
+ void* pUsedDescr;
+ int resource;
+ MV_BUF_INFO descBuf;
+} ETH_QUEUE_CTRL;
+
+
+/* Ethernet port specific infomation */
+typedef struct _ethPortCtrl
+{
+ int portNo;
+ ETH_QUEUE_CTRL rxQueue[MV_ETH_RX_Q_NUM]; /* Rx ring resource */
+ ETH_QUEUE_CTRL txQueue[MV_ETH_TX_Q_NUM]; /* Tx ring resource */
+
+ MV_ETH_PORT_CFG portConfig;
+ MV_ETH_RX_Q_CFG rxQueueConfig[MV_ETH_RX_Q_NUM];
+ MV_ETH_TX_Q_CFG txQueueConfig[MV_ETH_TX_Q_NUM];
+
+ /* Register images - For DP */
+ MV_U32 portTxQueueCmdReg; /* Port active Tx queues summary */
+ MV_U32 portRxQueueCmdReg; /* Port active Rx queues summary */
+
+ MV_STATE portState;
+
+ MV_U8 mcastCount[256];
+ MV_U32* hashPtr;
+ void *osHandle;
+} ETH_PORT_CTRL;
+
+/************** MACROs ****************/
+
+/* MACROs to Flush / Invalidate TX / RX Buffers */
+#if (ETHER_DRAM_COHER == MV_CACHE_COHER_SW) && !defined(UNCACHED_TX_BUFFERS)
+# define ETH_PACKET_CACHE_FLUSH(pAddr, size) \
+ mvOsCacheClear(NULL, (pAddr), (size)); \
+ /*CPU_PIPE_FLUSH;*/
+#else
+# define ETH_PACKET_CACHE_FLUSH(pAddr, size) \
+ mvOsIoVirtToPhy(NULL, (pAddr));
+#endif /* ETHER_DRAM_COHER == MV_CACHE_COHER_SW */
+
+#if ( (ETHER_DRAM_COHER == MV_CACHE_COHER_SW) && !defined(UNCACHED_RX_BUFFERS) )
+# define ETH_PACKET_CACHE_INVALIDATE(pAddr, size) \
+ mvOsCacheInvalidate (NULL, (pAddr), (size)); \
+ /*CPU_PIPE_FLUSH;*/
+#else
+# define ETH_PACKET_CACHE_INVALIDATE(pAddr, size)
+#endif /* ETHER_DRAM_COHER == MV_CACHE_COHER_SW && !UNCACHED_RX_BUFFERS */
+
+#ifdef ETH_DESCR_UNCACHED
+
+#define ETH_DESCR_FLUSH_INV(pPortCtrl, pDescr)
+#define ETH_DESCR_INV(pPortCtrl, pDescr)
+
+#else
+
+#define ETH_DESCR_FLUSH_INV(pPortCtrl, pDescr) \
+ mvOsCacheLineFlushInv(pPortCtrl->osHandle, (MV_ULONG)(pDescr))
+
+#define ETH_DESCR_INV(pPortCtrl, pDescr) \
+ mvOsCacheLineInv(pPortCtrl->osHandle, (MV_ULONG)(pDescr))
+
+#endif /* ETH_DESCR_UNCACHED */
+
+#include "eth/gbe/mvEthGbe.h"
+
+#endif /* __mvEth_h__ */
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvCompVer.txt
new file mode 100644
index 000000000..38a926440
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.4
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c
new file mode 100644
index 000000000..889d4d9e9
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c
@@ -0,0 +1,362 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "gpp/mvGpp.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+static MV_VOID gppRegSet(MV_U32 group, MV_U32 regOffs,MV_U32 mask,MV_U32 value);
+
+/*******************************************************************************
+* mvGppTypeSet - Enable a GPP (OUT) pin
+*
+* DESCRIPTION:
+*
+* INPUT:
+* group - GPP group number
+* mask - 32bit mask value. Each set bit in the mask means that the type
+* of corresponding GPP will be set. Other GPPs are ignored.
+* value - 32bit value that describes GPP type per pin.
+*
+* OUTPUT:
+* None.
+*
+* EXAMPLE:
+* Set GPP8 to input and GPP15 to output.
+* mvGppTypeSet(0, (GPP8 | GPP15),
+* ((MV_GPP_IN & GPP8) | (MV_GPP_OUT & GPP15)) );
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvGppTypeSet(MV_U32 group, MV_U32 mask, MV_U32 value)
+{
+ if (group >= MV_GPP_MAX_GROUP)
+ {
+ DB(mvOsPrintf("mvGppTypeSet: ERR. invalid group number \n"));
+ return MV_BAD_PARAM;
+ }
+
+ gppRegSet(group, GPP_DATA_OUT_EN_REG(group), mask, value);
+
+ /* Workaround for Erratum FE-MISC-70*/
+ if(mvCtrlRevGet()==MV_88F6XXX_A0_REV && (group == 1))
+ {
+ mask &= 0x2;
+ gppRegSet(0, GPP_DATA_OUT_EN_REG(0), mask, value);
+ } /*End of WA*/
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvGppBlinkEn - Set a GPP (IN) Pin list to blink every ~100ms
+*
+* DESCRIPTION:
+*
+* INPUT:
+* group - GPP group number
+* mask - 32bit mask value. Each set bit in the mask means that the type
+* of corresponding GPP will be set. Other GPPs are ignored.
+* value - 32bit value that describes GPP blink per pin.
+*
+* OUTPUT:
+* None.
+*
+* EXAMPLE:
+* Set GPP8 to be static and GPP15 to be blinking.
+* mvGppBlinkEn(0, (GPP8 | GPP15),
+* ((MV_GPP_OUT_STATIC & GPP8) | (MV_GPP_OUT_BLINK & GPP15)) );
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvGppBlinkEn(MV_U32 group, MV_U32 mask, MV_U32 value)
+{
+ if (group >= MV_GPP_MAX_GROUP)
+ {
+ DB(mvOsPrintf("mvGppBlinkEn: ERR. invalid group number \n"));
+ return MV_BAD_PARAM;
+ }
+
+ gppRegSet(group, GPP_BLINK_EN_REG(group), mask, value);
+
+ return MV_OK;
+
+}
+/*******************************************************************************
+* mvGppPolaritySet - Set a GPP (IN) Pin list Polarity mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+* group - GPP group number
+* mask - 32bit mask value. Each set bit in the mask means that the type
+* of corresponding GPP will be set. Other GPPs are ignored.
+* value - 32bit value that describes GPP polarity per pin.
+*
+* OUTPUT:
+* None.
+*
+* EXAMPLE:
+* Set GPP8 to the actual pin value and GPP15 to be inverted.
+* mvGppPolaritySet(0, (GPP8 | GPP15),
+* ((MV_GPP_IN_ORIGIN & GPP8) | (MV_GPP_IN_INVERT & GPP15)) );
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvGppPolaritySet(MV_U32 group, MV_U32 mask, MV_U32 value)
+{
+ if (group >= MV_GPP_MAX_GROUP)
+ {
+ DB(mvOsPrintf("mvGppPolaritySet: ERR. invalid group number \n"));
+ return MV_BAD_PARAM;
+ }
+
+ gppRegSet(group, GPP_DATA_IN_POL_REG(group), mask, value);
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvGppPolarityGet - Get a value of relevant bits from GPP Polarity register.
+*
+* DESCRIPTION:
+*
+* INPUT:
+* group - GPP group number
+* mask - 32bit mask value. Each set bit in the mask means that the
+* returned value is valid for it.
+*
+* OUTPUT:
+* None.
+*
+* EXAMPLE:
+* Get GPP8 and GPP15 value.
+* mvGppPolarityGet(0, (GPP8 | GPP15));
+*
+* RETURN:
+* 32bit value that describes GPP polatity mode per pin.
+*
+*******************************************************************************/
+MV_U32 mvGppPolarityGet(MV_U32 group, MV_U32 mask)
+{
+ MV_U32 regVal;
+
+ if (group >= MV_GPP_MAX_GROUP)
+ {
+ DB(mvOsPrintf("mvGppActiveSet: Error invalid group number \n"));
+ return MV_ERROR;
+ }
+ regVal = MV_REG_READ(GPP_DATA_IN_POL_REG(group));
+
+ return (regVal & mask);
+}
+
+/*******************************************************************************
+* mvGppValueGet - Get a GPP Pin list value.
+*
+* DESCRIPTION:
+* This function get GPP value.
+*
+* INPUT:
+* group - GPP group number
+* mask - 32bit mask value. Each set bit in the mask means that the
+* returned value is valid for it.
+*
+* OUTPUT:
+* None.
+*
+* EXAMPLE:
+* Get GPP8 and GPP15 value.
+* mvGppValueGet(0, (GPP8 | GPP15));
+*
+* RETURN:
+* 32bit value that describes GPP activity mode per pin.
+*
+*******************************************************************************/
+MV_U32 mvGppValueGet(MV_U32 group, MV_U32 mask)
+{
+ MV_U32 gppData;
+
+ gppData = MV_REG_READ(GPP_DATA_IN_REG(group));
+
+ gppData &= mask;
+
+ return gppData;
+
+}
+
+/*******************************************************************************
+* mvGppValueSet - Set a GPP Pin list value.
+*
+* DESCRIPTION:
+* This function set value for given GPP pin list.
+*
+* INPUT:
+* group - GPP group number
+* mask - 32bit mask value. Each set bit in the mask means that the
+* value of corresponding GPP will be set accordingly. Other GPP
+* are not affected.
+* value - 32bit value that describes GPP value per pin.
+*
+* OUTPUT:
+* None.
+*
+* EXAMPLE:
+* Set GPP8 value of '0' and GPP15 value of '1'.
+* mvGppActiveSet(0, (GPP8 | GPP15), ((0 & GPP8) | (GPP15)) );
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvGppValueSet (MV_U32 group, MV_U32 mask, MV_U32 value)
+{
+ MV_U32 outEnable, tmp;
+ MV_U32 i;
+
+ if (group >= MV_GPP_MAX_GROUP)
+ {
+ DB(mvOsPrintf("mvGppValueSet: Error invalid group number \n"));
+ return MV_BAD_PARAM;
+ }
+
+ /* verify that the gpp pin is configured as output */
+ /* Note that in the register out enabled -> bit = '0'. */
+ outEnable = ~MV_REG_READ(GPP_DATA_OUT_EN_REG(group));
+
+ /* Workaround for Erratum FE-MISC-70*/
+ if(mvCtrlRevGet()==MV_88F6XXX_A0_REV && (group == 1))
+ {
+ tmp = ~MV_REG_READ(GPP_DATA_OUT_EN_REG(0));
+ outEnable &= 0xfffffffd;
+ outEnable |= (tmp & 0x2);
+ } /*End of WA*/
+
+ for (i = 0 ; i < 32 ;i++)
+ {
+ if (((mask & (1 << i)) & (outEnable & (1 << i))) != (mask & (1 << i)))
+ {
+ mvOsPrintf("mvGppValueSet: Err. An attempt to set output "\
+ "value to GPP %d in input mode.\n", i);
+ return MV_ERROR;
+ }
+ }
+
+ gppRegSet(group, GPP_DATA_OUT_REG(group), mask, value);
+
+ return MV_OK;
+
+}
+/*******************************************************************************
+* gppRegSet - Set a specific GPP pin on a specific GPP register
+*
+* DESCRIPTION:
+* This function set a specific GPP pin on a specific GPP register
+*
+* INPUT:
+* regOffs - GPP Register offset
+* group - GPP group number
+* mask - 32bit mask value. Each set bit in the mask means that the
+* value of corresponding GPP will be set accordingly. Other GPP
+* are not affected.
+* value - 32bit value that describes GPP value per pin.
+*
+* OUTPUT:
+* None.
+*
+* EXAMPLE:
+* Set GPP8 value of '0' and GPP15 value of '1'.
+* mvGppActiveSet(0, (GPP8 | GPP15), ((0 & GPP8) | (1 & GPP15)) );
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+static MV_VOID gppRegSet (MV_U32 group, MV_U32 regOffs,MV_U32 mask,MV_U32 value)
+{
+ MV_U32 gppData;
+
+ gppData = MV_REG_READ(regOffs);
+
+ gppData &= ~mask;
+
+ gppData |= (value & mask);
+
+ MV_REG_WRITE(regOffs, gppData);
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h
new file mode 100644
index 000000000..526d324fd
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h
@@ -0,0 +1,118 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvGppH
+#define __INCmvGppH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "gpp/mvGppRegs.h"
+
+/* These macros describes the GPP type. Each of the GPPs pins can */
+/* be assigned to act as a general purpose input or output pin. */
+#define MV_GPP_IN 0xFFFFFFFF /* GPP input */
+#define MV_GPP_OUT 0 /* GPP output */
+
+
+/* These macros describes the GPP Out Enable. */
+#define MV_GPP_OUT_DIS 0xFFFFFFFF /* Out pin disabled*/
+#define MV_GPP_OUT_EN 0 /* Out pin enabled*/
+
+/* These macros describes the GPP Out Blinking. */
+/* When set and the corresponding bit in GPIO Data Out Enable Control */
+/* Register is enabled, the GPIO pin blinks every ~100 ms (a period of */
+/* 2^24 TCLK clocks). */
+#define MV_GPP_OUT_BLINK 0xFFFFFFFF /* Out pin blinking*/
+#define MV_GPP_OUT_STATIC 0 /* Out pin static*/
+
+
+/* These macros describes the GPP Polarity. */
+/* When set to 1 GPIO Data In Register reflects the inverted value of the */
+/* corresponding pin. */
+
+#define MV_GPP_IN_INVERT 0xFFFFFFFF /* Inverted value is got*/
+#define MV_GPP_IN_ORIGIN 0 /* original value is got*/
+
+/* mvGppTypeSet - Set PP pin mode (IN or OUT) */
+MV_STATUS mvGppTypeSet(MV_U32 group, MV_U32 mask, MV_U32 value);
+
+/* mvGppBlinkEn - Set a GPP (IN) Pin list to blink every ~100ms */
+MV_STATUS mvGppBlinkEn(MV_U32 group, MV_U32 mask, MV_U32 value);
+
+/* mvGppPolaritySet - Set a GPP (IN) Pin list Polarity mode. */
+MV_STATUS mvGppPolaritySet(MV_U32 group, MV_U32 mask, MV_U32 value);
+
+/* mvGppPolarityGet - Get the Polarity of a GPP Pin */
+MV_U32 mvGppPolarityGet(MV_U32 group, MV_U32 mask);
+
+/* mvGppValueGet - Get a GPP Pin list value.*/
+MV_U32 mvGppValueGet(MV_U32 group, MV_U32 mask);
+
+
+/* mvGppValueSet - Set a GPP Pin list value. */
+MV_STATUS mvGppValueSet (MV_U32 group, MV_U32 mask, MV_U32 value);
+
+#endif /* #ifndef __INCmvGppH */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h
new file mode 100644
index 000000000..b6fec34fa
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h
@@ -0,0 +1,116 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvGppRegsH
+#define __INCmvGppRegsH
+
+#define MV_GPP0 BIT0
+#define MV_GPP1 BIT1
+#define MV_GPP2 BIT2
+#define MV_GPP3 BIT3
+#define MV_GPP4 BIT4
+#define MV_GPP5 BIT5
+#define MV_GPP6 BIT6
+#define MV_GPP7 BIT7
+#define MV_GPP8 BIT8
+#define MV_GPP9 BIT9
+#define MV_GPP10 BIT10
+#define MV_GPP11 BIT11
+#define MV_GPP12 BIT12
+#define MV_GPP13 BIT13
+#define MV_GPP14 BIT14
+#define MV_GPP15 BIT15
+#define MV_GPP16 BIT16
+#define MV_GPP17 BIT17
+#define MV_GPP18 BIT18
+#define MV_GPP19 BIT19
+#define MV_GPP20 BIT20
+#define MV_GPP21 BIT21
+#define MV_GPP22 BIT22
+#define MV_GPP23 BIT23
+#define MV_GPP24 BIT24
+#define MV_GPP25 BIT25
+#define MV_GPP26 BIT26
+#define MV_GPP27 BIT27
+#define MV_GPP28 BIT28
+#define MV_GPP29 BIT29
+#define MV_GPP30 BIT30
+#define MV_GPP31 BIT31
+
+
+/* registers offsets */
+
+#define GPP_DATA_OUT_REG(grp) ((grp == 0) ? 0x10100 : 0x10140)
+#define GPP_DATA_OUT_EN_REG(grp) ((grp == 0) ? 0x10104 : 0x10144)
+#define GPP_BLINK_EN_REG(grp) ((grp == 0) ? 0x10108 : 0x10148)
+#define GPP_DATA_IN_POL_REG(grp) ((grp == 0) ? 0x1010C : 0x1014c)
+#define GPP_DATA_IN_REG(grp) ((grp == 0) ? 0x10110 : 0x10150)
+#define GPP_INT_CAUSE_REG(grp) ((grp == 0) ? 0x10114 : 0x10154)
+#define GPP_INT_MASK_REG(grp) ((grp == 0) ? 0x10118 : 0x10158)
+#define GPP_INT_LVL_REG(grp) ((grp == 0) ? 0x1011c : 0x1015c)
+
+#define GPP_DATA_OUT_SET_REG 0x10120
+#define GPP_DATA_OUT_CLEAR_REG 0x10124
+
+#endif /* #ifndef __INCmvGppRegsH */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvCompVer.txt
new file mode 100644
index 000000000..85bfa612c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.3
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c
new file mode 100644
index 000000000..672d3e31f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c
@@ -0,0 +1,669 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvPciIf.h"
+#include "ctrlEnv/sys/mvSysPex.h"
+
+#if defined(MV_INCLUDE_PCI)
+#include "ctrlEnv/sys/mvSysPci.h"
+#endif
+
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+
+/*******************************************************************************
+* mvPciInit - Initialize PCI interfaces
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK if function success otherwise MV_ERROR or MV_BAD_PARAM
+*
+*******************************************************************************/
+
+
+MV_STATUS mvPciIfInit(MV_U32 pciIf, PCI_IF_MODE pciIfmode)
+{
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+
+ MV_PCI_MOD pciMod;
+
+ if (PCI_IF_MODE_HOST == pciIfmode)
+ {
+ pciMod = MV_PCI_MOD_HOST;
+ }
+ else if (PCI_IF_MODE_DEVICE == pciIfmode)
+ {
+ pciMod = MV_PCI_MOD_DEVICE;
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Bus %d mode %d neither host nor device!\n",
+ __FUNCTION__, pciIf, pciIfmode);
+ return MV_FAIL;
+ }
+
+ return mvPciInit(pciIf - MV_PCI_START_IF, pciMod);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+
+ MV_PEX_TYPE pexType;
+
+ if (PCI_IF_MODE_HOST == pciIfmode)
+ {
+ pexType = MV_PEX_ROOT_COMPLEX;
+ }
+ else if (PCI_IF_MODE_DEVICE == pciIfmode)
+ {
+ pexType = MV_PEX_END_POINT;
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Bus %d type %d neither root complex nor" \
+ " end point\n", __FUNCTION__, pciIf, pciIfmode);
+ return MV_FAIL;
+ }
+ return mvPexInit(pciIf - MV_PEX_START_IF, pexType);
+
+ #else
+ return MV_OK;
+ #endif
+
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return MV_FAIL;
+
+}
+
+/* PCI configuration space read write */
+
+/*******************************************************************************
+* mvPciConfigRead - Read from configuration space
+*
+* DESCRIPTION:
+* This function performs a 32 bit read from PCI configuration space.
+* It supports both type 0 and type 1 of Configuration Transactions
+* (local and over bridge). In order to read from local bus segment, use
+* bus number retrieved from mvPciLocalBusNumGet(). Other bus numbers
+* will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+* pciIf - PCI interface number.
+* bus - PCI segment bus number.
+* dev - PCI device number.
+* func - Function number.
+* regOffs - Register offset.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit register data, 0xffffffff on error
+*
+*******************************************************************************/
+MV_U32 mvPciIfConfigRead (MV_U32 pciIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+ MV_U32 regOff)
+{
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+ return mvPciConfigRead(pciIf - MV_PCI_START_IF,
+ bus,
+ dev,
+ func,
+ regOff);
+ #else
+ return 0xffffffff;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+ return mvPexConfigRead(pciIf - MV_PEX_START_IF,
+ bus,
+ dev,
+ func,
+ regOff);
+ #else
+ return 0xffffffff;
+ #endif
+
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return 0;
+
+}
+
+/*******************************************************************************
+* mvPciConfigWrite - Write to configuration space
+*
+* DESCRIPTION:
+* This function performs a 32 bit write to PCI configuration space.
+* It supports both type 0 and type 1 of Configuration Transactions
+* (local and over bridge). In order to write to local bus segment, use
+* bus number retrieved from mvPciLocalBusNumGet(). Other bus numbers
+* will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+* pciIf - PCI interface number.
+* bus - PCI segment bus number.
+* dev - PCI device number.
+* func - Function number.
+* regOffs - Register offset.
+* data - 32bit data.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfConfigWrite(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+ return mvPciConfigWrite(pciIf - MV_PCI_START_IF,
+ bus,
+ dev,
+ func,
+ regOff,
+ data);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+ return mvPexConfigWrite(pciIf - MV_PEX_START_IF,
+ bus,
+ dev,
+ func,
+ regOff,
+ data);
+ #else
+ return MV_OK;
+ #endif
+
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return MV_FAIL;
+
+}
+
+/*******************************************************************************
+* mvPciMasterEnable - Enable/disale PCI interface master transactions.
+*
+* DESCRIPTION:
+* This function performs read modified write to PCI command status
+* (offset 0x4) to set/reset bit 2. After this bit is set, the PCI
+* master is allowed to gain ownership on the bus, otherwise it is
+* incapable to do so.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfMasterEnable(MV_U32 pciIf, MV_BOOL enable)
+{
+
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+ return mvPciMasterEnable(pciIf - MV_PCI_START_IF,
+ enable);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+ return mvPexMasterEnable(pciIf - MV_PEX_START_IF,
+ enable);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return MV_FAIL;
+
+}
+
+
+/*******************************************************************************
+* mvPciSlaveEnable - Enable/disale PCI interface slave transactions.
+*
+* DESCRIPTION:
+* This function performs read modified write to PCI command status
+* (offset 0x4) to set/reset bit 0 and 1. After those bits are set,
+* the PCI slave is allowed to respond to PCI IO space access (bit 0)
+* and PCI memory space access (bit 1).
+*
+* INPUT:
+* pciIf - PCI interface number.
+* dev - PCI device number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfSlaveEnable(MV_U32 pciIf,MV_U32 bus, MV_U32 dev, MV_BOOL enable)
+{
+
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+ return mvPciSlaveEnable(pciIf - MV_PCI_START_IF,bus,dev,
+ enable);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+ return mvPexSlaveEnable(pciIf - MV_PEX_START_IF,bus,dev,
+ enable);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return MV_FAIL;
+
+}
+
+/*******************************************************************************
+* mvPciLocalBusNumSet - Set PCI interface local bus number.
+*
+* DESCRIPTION:
+* This function sets given PCI interface its local bus number.
+* Note: In case the PCI interface is PCI-X, the information is read-only.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* busNum - Bus number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_NOT_ALLOWED in case PCI interface is PCI-X.
+* MV_BAD_PARAM on bad parameters ,
+* otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfLocalBusNumSet(MV_U32 pciIf, MV_U32 busNum)
+{
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+ return mvPciLocalBusNumSet(pciIf - MV_PCI_START_IF,
+ busNum);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+ return mvPexLocalBusNumSet(pciIf - MV_PEX_START_IF,
+ busNum);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return MV_FAIL;
+
+}
+
+/*******************************************************************************
+* mvPciLocalBusNumGet - Get PCI interface local bus number.
+*
+* DESCRIPTION:
+* This function gets the local bus number of a given PCI interface.
+*
+* INPUT:
+* pciIf - PCI interface number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Local bus number.0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPciIfLocalBusNumGet(MV_U32 pciIf)
+{
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+ return mvPciLocalBusNumGet(pciIf - MV_PCI_START_IF);
+ #else
+ return 0xFFFFFFFF;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+ return mvPexLocalBusNumGet(pciIf - MV_PEX_START_IF);
+ #else
+ return 0xFFFFFFFF;
+ #endif
+
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n",__FUNCTION__, pciIf);
+ }
+
+ return 0;
+
+}
+
+
+/*******************************************************************************
+* mvPciLocalDevNumSet - Set PCI interface local device number.
+*
+* DESCRIPTION:
+* This function sets given PCI interface its local device number.
+* Note: In case the PCI interface is PCI-X, the information is read-only.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* devNum - Device number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_NOT_ALLOWED in case PCI interface is PCI-X. MV_BAD_PARAM on bad parameters ,
+* otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfLocalDevNumSet(MV_U32 pciIf, MV_U32 devNum)
+{
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+ return mvPciLocalDevNumSet(pciIf - MV_PCI_START_IF,
+ devNum);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+ return mvPexLocalDevNumSet(pciIf - MV_PEX_START_IF,
+ devNum);
+ #else
+ return MV_OK;
+ #endif
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return MV_FAIL;
+
+}
+
+/*******************************************************************************
+* mvPciLocalDevNumGet - Get PCI interface local device number.
+*
+* DESCRIPTION:
+* This function gets the local device number of a given PCI interface.
+*
+* INPUT:
+* pciIf - PCI interface number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Local device number. 0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPciIfLocalDevNumGet(MV_U32 pciIf)
+{
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PCI)
+ return mvPciLocalDevNumGet(pciIf - MV_PCI_START_IF);
+ #else
+ return 0xFFFFFFFF;
+ #endif
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ #if defined(MV_INCLUDE_PEX)
+ return mvPexLocalDevNumGet(pciIf - MV_PEX_START_IF);
+ #else
+ return 0xFFFFFFFF;
+ #endif
+
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return 0;
+
+}
+
+/*******************************************************************************
+* mvPciIfTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+
+PCI_IF_TYPE mvPciIfTypeGet(MV_U32 pciIf)
+{
+
+ if ((pciIf >= MV_PCI_START_IF)&&(pciIf < MV_PCI_MAX_IF + MV_PCI_START_IF))
+ {
+ return PCI_IF_TYPE_CONVEN_PCIX;
+ }
+ else if ((pciIf >= MV_PEX_START_IF) &&
+ (pciIf < MV_PEX_MAX_IF + MV_PEX_START_IF))
+ {
+ return PCI_IF_TYPE_PEX;
+
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return 0xffffffff;
+
+}
+
+/*******************************************************************************
+* mvPciIfTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+*
+*******************************************************************************/
+
+MV_U32 mvPciRealIfNumGet(MV_U32 pciIf)
+{
+
+ PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+ if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+ {
+ return (pciIf - MV_PCI_START_IF);
+ }
+ else if (PCI_IF_TYPE_PEX == pciIfType)
+ {
+ return (pciIf - MV_PEX_START_IF);
+
+ }
+ else
+ {
+ mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+ }
+
+ return 0xffffffff;
+
+}
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h
new file mode 100644
index 000000000..9c2d1604e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h
@@ -0,0 +1,134 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPCIIFH
+#define __INCPCIIFH
+
+#include "mvSysHwConfig.h"
+#include "pci-if/mvPciIfRegs.h"
+#if defined(MV_INCLUDE_PEX)
+#include "pex/mvPex.h"
+#endif
+#if defined(MV_INCLUDE_PCI)
+#include "pci/mvPci.h"
+#endif
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+typedef enum _mvPCIIfType
+{
+ PCI_IF_TYPE_CONVEN_PCIX,
+ PCI_IF_TYPE_PEX
+
+}PCI_IF_TYPE;
+
+typedef enum _mvPCIIfMode
+{
+ PCI_IF_MODE_HOST,
+ PCI_IF_MODE_DEVICE
+}PCI_IF_MODE;
+
+
+/* Global Functions prototypes */
+
+/* mvPciIfInit - Initialize PCI interfaces*/
+MV_STATUS mvPciIfInit(MV_U32 pciIf, PCI_IF_MODE pciIfmode);
+
+/* mvPciIfConfigRead - Read from configuration space */
+MV_U32 mvPciIfConfigRead (MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func,MV_U32 regOff);
+
+/* mvPciIfConfigWrite - Write to configuration space */
+MV_STATUS mvPciIfConfigWrite(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data);
+
+/* mvPciIfMasterEnable - Enable/disale PCI interface master transactions.*/
+MV_STATUS mvPciIfMasterEnable(MV_U32 pciIf, MV_BOOL enable);
+
+/* mvPciIfSlaveEnable - Enable/disale PCI interface slave transactions.*/
+MV_STATUS mvPciIfSlaveEnable(MV_U32 pciIf,MV_U32 bus, MV_U32 dev,
+ MV_BOOL enable);
+
+/* mvPciIfLocalBusNumSet - Set PCI interface local bus number.*/
+MV_STATUS mvPciIfLocalBusNumSet(MV_U32 pciIf, MV_U32 busNum);
+
+/* mvPciIfLocalBusNumGet - Get PCI interface local bus number.*/
+MV_U32 mvPciIfLocalBusNumGet(MV_U32 pciIf);
+
+/* mvPciIfLocalDevNumSet - Set PCI interface local device number.*/
+MV_STATUS mvPciIfLocalDevNumSet(MV_U32 pciIf, MV_U32 devNum);
+
+/* mvPciIfLocalDevNumGet - Get PCI interface local device number.*/
+MV_U32 mvPciIfLocalDevNumGet(MV_U32 pciIf);
+
+/* mvPciIfTypeGet - Get PCI If type*/
+PCI_IF_TYPE mvPciIfTypeGet(MV_U32 pciIf);
+
+MV_U32 mvPciRealIfNumGet(MV_U32 pciIf);
+
+/* mvPciIfAddrDecShow - Display address decode windows attributes */
+MV_VOID mvPciIfAddrDecShow(MV_VOID);
+
+#endif /* #ifndef __INCPCIIFH */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIfRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIfRegs.h
new file mode 100644
index 000000000..08d4d2d90
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIfRegs.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPCIIFREGSH
+#define __INCPCIIFREGSH
+
+
+/* defines */
+#define MAX_PCI_DEVICES 32
+#define MAX_PCI_FUNCS 8
+#define MAX_PCI_BUSSES 128
+
+/***************************************/
+/* PCI Configuration registers */
+/***************************************/
+
+/*********************************************/
+/* PCI Configuration, Function 0, Registers */
+/*********************************************/
+
+
+/* Standard registers */
+#define PCI_DEVICE_AND_VENDOR_ID 0x000
+#define PCI_STATUS_AND_COMMAND 0x004
+#define PCI_CLASS_CODE_AND_REVISION_ID 0x008
+#define PCI_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE 0x00C
+#define PCI_MEMORY_BAR_BASE_ADDR(barNum) (0x010 + ((barNum) << 2))
+#define PCI_SUBSYS_ID_AND_SUBSYS_VENDOR_ID 0x02C
+#define PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030
+#define PCI_CAPABILTY_LIST_POINTER 0x034
+#define PCI_INTERRUPT_PIN_AND_LINE 0x03C
+
+
+/* PCI Device and Vendor ID Register (PDVIR) */
+#define PDVIR_VEN_ID_OFFS 0 /* Vendor ID */
+#define PDVIR_VEN_ID_MASK (0xffff << PDVIR_VEN_ID_OFFS)
+
+#define PDVIR_DEV_ID_OFFS 16 /* Device ID */
+#define PDVIR_DEV_ID_MASK (0xffff << PDVIR_DEV_ID_OFFS)
+
+/* PCI Status and Command Register (PSCR) */
+#define PSCR_IO_EN BIT0 /* IO Enable */
+#define PSCR_MEM_EN BIT1 /* Memory Enable */
+#define PSCR_MASTER_EN BIT2 /* Master Enable */
+#define PSCR_SPECIAL_EN BIT3 /* Special Cycle Enable */
+#define PSCR_MEM_WRI_INV BIT4 /* Memory Write and Invalidate Enable */
+#define PSCR_VGA BIT5 /* VGA Palette Snoops */
+#define PSCR_PERR_EN BIT6 /* Parity Errors Respond Enable */
+#define PSCR_ADDR_STEP BIT7 /* Address Stepping Enable (Wait Cycle En)*/
+#define PSCR_SERR_EN BIT8 /* Ability to assert SERR# line */
+#define PSCR_FAST_BTB_EN BIT9 /* generate fast back-to-back transactions*/
+#define PSCR_CAP_LIST BIT20 /* Capability List Support */
+#define PSCR_66MHZ_EN BIT21 /* 66 MHz Capable */
+#define PSCR_UDF_EN BIT22 /* User definable features */
+#define PSCR_TAR_FAST_BB BIT23 /* fast back-to-back transactions capable */
+#define PSCR_DATA_PERR BIT24 /* Data Parity reported */
+
+#define PSCR_DEVSEL_TIM_OFFS 25 /* DEVSEL timing */
+#define PSCR_DEVSEL_TIM_MASK (0x3 << PSCR_DEVSEL_TIM_OFFS)
+#define PSCR_DEVSEL_TIM_FAST (0x0 << PSCR_DEVSEL_TIM_OFFS)
+#define PSCR_DEVSEL_TIM_MED (0x1 << PSCR_DEVSEL_TIM_OFFS)
+#define PSCR_DEVSEL_TIM_SLOW (0x2 << PSCR_DEVSEL_TIM_OFFS)
+
+#define PSCR_SLAVE_TABORT BIT27 /* Signalled Target Abort */
+#define PSCR_MASTER_TABORT BIT28 /* Recieved Target Abort */
+#define PSCR_MABORT BIT29 /* Recieved Master Abort */
+#define PSCR_SYSERR BIT30 /* Signalled system error */
+#define PSCR_DET_PARERR BIT31 /* Detect Parity Error */
+
+/* PCI configuration register offset=0x08 fields
+ (PCI_CLASS_CODE_AND_REVISION_ID)(PCCRI) */
+
+#define PCCRIR_REVID_OFFS 0 /* Revision ID */
+#define PCCRIR_REVID_MASK (0xff << PCCRIR_REVID_OFFS)
+
+#define PCCRIR_FULL_CLASS_OFFS 8 /* Full Class Code */
+#define PCCRIR_FULL_CLASS_MASK (0xffffff << PCCRIR_FULL_CLASS_OFFS)
+
+#define PCCRIR_PROGIF_OFFS 8 /* Prog .I/F*/
+#define PCCRIR_PROGIF_MASK (0xff << PCCRIR_PROGIF_OFFS)
+
+#define PCCRIR_SUB_CLASS_OFFS 16 /* Sub Class*/
+#define PCCRIR_SUB_CLASS_MASK (0xff << PCCRIR_SUB_CLASS_OFFS)
+
+#define PCCRIR_BASE_CLASS_OFFS 24 /* Base Class*/
+#define PCCRIR_BASE_CLASS_MASK (0xff << PCCRIR_BASE_CLASS_OFFS)
+
+/* PCI configuration register offset=0x0C fields
+ (PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE)(PBHTLTCL) */
+
+#define PBHTLTCLR_CACHELINE_OFFS 0 /* Specifies the cache line size */
+#define PBHTLTCLR_CACHELINE_MASK (0xff << PBHTLTCLR_CACHELINE_OFFS)
+
+#define PBHTLTCLR_LATTIMER_OFFS 8 /* latency timer */
+#define PBHTLTCLR_LATTIMER_MASK (0xff << PBHTLTCLR_LATTIMER_OFFS)
+
+#define PBHTLTCLR_HEADTYPE_FULL_OFFS 16 /* Full Header Type */
+#define PBHTLTCLR_HEADTYPE_FULL_MASK (0xff << PBHTLTCLR_HEADTYPE_FULL_OFFS)
+
+#define PBHTLTCLR_MULTI_FUNC BIT23 /* Multi/Single function */
+
+#define PBHTLTCLR_HEADER_OFFS 16 /* Header type */
+#define PBHTLTCLR_HEADER_MASK (0x7f << PBHTLTCLR_HEADER_OFFS)
+#define PBHTLTCLR_HEADER_STANDARD (0x0 << PBHTLTCLR_HEADER_OFFS)
+#define PBHTLTCLR_HEADER_PCI2PCI_BRIDGE (0x1 << PBHTLTCLR_HEADER_OFFS)
+
+
+#define PBHTLTCLR_BISTCOMP_OFFS 24 /* BIST Completion Code */
+#define PBHTLTCLR_BISTCOMP_MASK (0xf << PBHTLTCLR_BISTCOMP_OFFS)
+
+#define PBHTLTCLR_BISTACT BIT30 /* BIST Activate bit */
+#define PBHTLTCLR_BISTCAP BIT31 /* BIST Capable Bit */
+
+
+/* PCI Bar Base Low Register (PBBLR) */
+#define PBBLR_IOSPACE BIT0 /* Memory Space Indicator */
+
+#define PBBLR_TYPE_OFFS 1 /* BAR Type/Init Val. */
+#define PBBLR_TYPE_MASK (0x3 << PBBLR_TYPE_OFFS)
+#define PBBLR_TYPE_32BIT_ADDR (0x0 << PBBLR_TYPE_OFFS)
+#define PBBLR_TYPE_64BIT_ADDR (0x2 << PBBLR_TYPE_OFFS)
+
+#define PBBLR_PREFETCH_EN BIT3 /* Prefetch Enable */
+
+
+#define PBBLR_MEM_BASE_OFFS 4 /* Memory Bar Base address. Corresponds to
+ address bits [31:4] */
+#define PBBLR_MEM_BASE_MASK (0xfffffff << PBBLR_MEM_BASE_OFFS)
+
+#define PBBLR_IO_BASE_OFFS 2 /* IO Bar Base address. Corresponds to
+ address bits [31:2] */
+#define PBBLR_IO_BASE_MASK (0x3fffffff << PBBLR_IO_BASE_OFFS)
+
+
+#define PBBLR_BASE_OFFS 12 /* Base address. Address bits [31:12] */
+#define PBBLR_BASE_MASK (0xfffff << PBBLR_BASE_OFFS)
+#define PBBLR_BASE_ALIGNMET (1 << PBBLR_BASE_OFFS)
+
+
+/* PCI Bar Base High Fegister (PBBHR) */
+#define PBBHR_BASE_OFFS 0 /* Base address. Address bits [31:12] */
+#define PBBHR_BASE_MASK (0xffffffff << PBBHR_BASE_OFFS)
+
+
+/* PCI configuration register offset=0x2C fields
+ (PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID)(PSISVI) */
+
+#define PSISVIR_VENID_OFFS 0 /* Subsystem Manufacturer Vendor ID Number */
+#define PSISVIR_VENID_MASK (0xffff << PSISVIR_VENID_OFFS)
+
+#define PSISVIR_DEVID_OFFS 16 /* Subsystem Device ID Number */
+#define PSISVIR_DEVID_MASK (0xffff << PSISVIR_DEVID_OFFS)
+
+/* PCI configuration register offset=0x30 fields
+ (PCI_EXPANSION_ROM_BASE_ADDR_REG)(PERBA) */
+
+#define PERBAR_EXPROMEN BIT0 /* Expansion ROM Enable */
+
+#define PERBAR_BASE_OFFS 12 /* Expansion ROM Base Address */
+#define PERBAR_BASE_MASK (0xfffff << PERBAR_BASE_OFFS)
+
+/* PCI configuration register offset=0x34 fields
+ (PCI_CAPABILTY_LIST_POINTER)(PCLP) */
+
+#define PCLPR_CAPPTR_OFFS 0 /* Capability List Pointer */
+#define PCLPR_CAPPTR_MASK (0xff << PCLPR_CAPPTR_OFFS)
+
+/* PCI configuration register offset=0x3C fields
+ (PCI_INTERRUPT_PIN_AND_LINE)(PIPL) */
+
+#define PIPLR_INTLINE_OFFS 0 /* Interrupt line (IRQ) */
+#define PIPLR_INTLINE_MASK (0xff << PIPLR_INTLINE_OFFS)
+
+#define PIPLR_INTPIN_OFFS 8 /* interrupt pin (A,B,C,D) */
+#define PIPLR_INTPIN_MASK (0xff << PIPLR_INTPIN_OFFS)
+
+#define PIPLR_MINGRANT_OFFS 16 /* Minimum Grant on 250 nano seconds units */
+#define PIPLR_MINGRANT_MASK (0xff << PIPLR_MINGRANT_OFFS)
+
+#define PIPLR_MAXLATEN_OFFS 24 /* Maximum latency on 250 nano seconds units */
+#define PIPLR_MAXLATEN_MASK (0xff << PIPLR_MAXLATEN_OFFS)
+
+#endif /* #ifndef __INCPCIIFREGSH */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c
new file mode 100644
index 000000000..f2169793b
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c
@@ -0,0 +1,1006 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/* includes */
+#include "mvPciUtils.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* #define MV_DEBUG */
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+ #define mvOsPrintf printf
+#else
+ #define DB(x)
+#endif
+
+/*
+This module only support scanning of Header type 00h of pci devices
+There is no suppotr for Header type 01h of pci devices ( PCI bridges )
+*/
+
+
+static MV_STATUS pciDetectDevice(MV_U32 pciIf,
+ MV_U32 bus,
+ MV_U32 dev,
+ MV_U32 func,
+ MV_PCI_DEVICE *pPciAgent);
+
+static MV_U32 pciDetectDeviceBars(MV_U32 pciIf,
+ MV_U32 bus,
+ MV_U32 dev,
+ MV_U32 func,
+ MV_PCI_DEVICE *pPciAgent);
+
+
+
+
+
+
+/*******************************************************************************
+* mvPciScan - Scan a PCI interface bus
+*
+* DESCRIPTION:
+* Performs a full scan on a PCI interface and returns all possible details
+* on the agents found on the bus.
+*
+* INPUT:
+* pciIf - PCI Interface
+* pPciAgents - Pointer to an Array of the pci agents to be detected
+* pPciAgentsNum - pPciAgents array maximum number of elements
+*
+* OUTPUT:
+* pPciAgents - Array of the pci agents detected on the bus
+* pPciAgentsNum - Number of pci agents detected on the bus
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+
+MV_STATUS mvPciScan(MV_U32 pciIf,
+ MV_PCI_DEVICE *pPciAgents,
+ MV_U32 *pPciAgentsNum)
+{
+
+ MV_U32 devIndex,funcIndex=0,busIndex=0,detectedDevNum=0;
+ MV_U32 localBus=mvPciIfLocalBusNumGet(pciIf);
+ MV_PCI_DEVICE *pPciDevice;
+ MV_PCI_DEVICE *pMainDevice;
+
+ DB(mvOsPrintf("mvPciScan: PCI interface num %d\n", pciIf));
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPexMaxIfGet())
+ {
+ DB(mvOsPrintf("mvPciScan: ERR. Invalid PCI interface num %d\n", pciIf));
+ return MV_BAD_PARAM;
+ }
+ if (NULL == pPciAgents)
+ {
+ DB(mvOsPrintf("mvPciScan: ERR. pPciAgents=NULL \n"));
+ return MV_BAD_PARAM;
+ }
+ if (NULL == pPciAgentsNum)
+ {
+ DB(mvOsPrintf("mvPciScan: ERR. pPciAgentsNum=NULL \n"));
+ return MV_BAD_PARAM;
+ }
+
+
+ DB(mvOsPrintf("mvPciScan: PCI interface num %d mvPciMasterEnable\n", pciIf));
+ /* Master enable the MV PCI master */
+ if (MV_OK != mvPciIfMasterEnable(pciIf,MV_TRUE))
+ {
+ DB(mvOsPrintf("mvPciScan: ERR. mvPciMasterEnable failed \n"));
+ return MV_ERROR;
+
+ }
+
+ DB(mvOsPrintf("mvPciScan: PCI interface num scan%d\n", pciIf));
+
+ /* go through all busses */
+ for (busIndex=localBus ; busIndex < MAX_PCI_BUSSES ; busIndex++)
+ {
+ /* go through all possible devices on the local bus */
+ for (devIndex=0 ; devIndex < MAX_PCI_DEVICES ; devIndex++)
+ {
+ /* always start with function equal to zero */
+ funcIndex=0;
+
+ pPciDevice=&pPciAgents[detectedDevNum];
+ DB(mvOsPrintf("mvPciScan: PCI interface num scan%d:%d\n", busIndex, devIndex));
+
+ if (MV_ERROR == pciDetectDevice(pciIf,
+ busIndex,
+ devIndex,
+ funcIndex,
+ pPciDevice))
+ {
+ /* no device detected , try the next address */
+ continue;
+ }
+
+ /* We are here ! means we have detected a device*/
+ /* always we start with only one function per device */
+ pMainDevice = pPciDevice;
+ pPciDevice->funtionsNum = 1;
+
+
+ /* move on */
+ detectedDevNum++;
+
+
+ /* check if we have no more room for a new device */
+ if (detectedDevNum == *pPciAgentsNum)
+ {
+ DB(mvOsPrintf("mvPciScan: ERR. array passed too small \n"));
+ return MV_ERROR;
+ }
+
+ /* check the detected device if it is a multi functional device then
+ scan all device functions*/
+ if (pPciDevice->isMultiFunction == MV_TRUE)
+ {
+ /* start with function number 1 because we have already detected
+ function 0 */
+ for (funcIndex=1; funcIndex<MAX_PCI_FUNCS ; funcIndex++)
+ {
+ pPciDevice=&pPciAgents[detectedDevNum];
+
+ if (MV_ERROR == pciDetectDevice(pciIf,
+ busIndex,
+ devIndex,
+ funcIndex,
+ pPciDevice))
+ {
+ /* no device detected means no more functions !*/
+ continue;
+ }
+ /* We are here ! means we have detected a device */
+
+ /* move on */
+ pMainDevice->funtionsNum++;
+ detectedDevNum++;
+
+ /* check if we have no more room for a new device */
+ if (detectedDevNum == *pPciAgentsNum)
+ {
+ DB(mvOsPrintf("mvPciScan: ERR. Array too small\n"));
+ return MV_ERROR;
+ }
+
+
+ }
+ }
+
+ }
+
+ }
+
+ /* return the number of devices actually detected on the bus ! */
+ *pPciAgentsNum = detectedDevNum;
+
+ return MV_OK;
+
+}
+
+
+/*******************************************************************************
+* pciDetectDevice - Detect a pci device parameters
+*
+* DESCRIPTION:
+* This function detect if a pci agent exist on certain address !
+* and if exists then it fills all possible information on the
+* agent
+*
+* INPUT:
+* pciIf - PCI Interface
+* bus - Bus number
+* dev - Device number
+* func - Function number
+*
+*
+*
+* OUTPUT:
+* pPciAgent - pointer to the pci agent filled with its information
+*
+* RETURN:
+* MV_ERROR if no device , MV_OK otherwise
+*
+*******************************************************************************/
+
+static MV_STATUS pciDetectDevice(MV_U32 pciIf,
+ MV_U32 bus,
+ MV_U32 dev,
+ MV_U32 func,
+ MV_PCI_DEVICE *pPciAgent)
+{
+ MV_U32 pciData;
+
+ /* no Parameters checking ! because it is static function and it is assumed
+ that all parameters were checked in the calling function */
+
+
+ /* Try read the PCI Vendor ID and Device ID */
+
+ /* We will scan only ourselves and the PCI slots that exist on the
+ board, because we may have a case that we have one slot that has
+ a Cardbus connector, and because CardBus answers all IDsels we want
+ to scan only this slot and ourseleves.
+
+ */
+ #if defined(MV_INCLUDE_PCI)
+ if ((PCI_IF_TYPE_CONVEN_PCIX == mvPciIfTypeGet(pciIf)) &&
+ (DB_88F5181_DDR1_PRPMC != mvBoardIdGet()) &&
+ (DB_88F5181_DDR1_PEXPCI != mvBoardIdGet()) &&
+ (DB_88F5181_DDR1_MNG != mvBoardIdGet()))
+ {
+
+ if (mvBoardIsOurPciSlot(bus, dev) == MV_FALSE)
+ {
+ return MV_ERROR;
+ }
+ }
+ #endif /* defined(MV_INCLUDE_PCI) */
+
+ pciData = mvPciIfConfigRead(pciIf, bus,dev,func, PCI_DEVICE_AND_VENDOR_ID);
+
+ if (PCI_ERROR_CODE == pciData)
+ {
+ /* no device exist */
+ return MV_ERROR;
+ }
+
+ /* we are here ! means a device is detected */
+
+ /* fill basic information */
+ pPciAgent->busNumber=bus;
+ pPciAgent->deviceNum=dev;
+ pPciAgent->function=func;
+
+ /* Fill the PCI Vendor ID and Device ID */
+
+ pPciAgent->venID = (pciData & PDVIR_VEN_ID_MASK) >> PDVIR_VEN_ID_OFFS;
+ pPciAgent->deviceID = (pciData & PDVIR_DEV_ID_MASK) >> PDVIR_DEV_ID_OFFS;
+
+ /* Read Status and command */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_STATUS_AND_COMMAND);
+
+
+ /* Fill related Status and Command information*/
+
+ if (pciData & PSCR_TAR_FAST_BB)
+ {
+ pPciAgent->isFastB2BCapable = MV_TRUE;
+ }
+ else
+ {
+ pPciAgent->isFastB2BCapable = MV_FALSE;
+ }
+
+ if (pciData & PSCR_CAP_LIST)
+ {
+ pPciAgent->isCapListSupport=MV_TRUE;
+ }
+ else
+ {
+ pPciAgent->isCapListSupport=MV_FALSE;
+ }
+
+ if (pciData & PSCR_66MHZ_EN)
+ {
+ pPciAgent->is66MHZCapable=MV_TRUE;
+ }
+ else
+ {
+ pPciAgent->is66MHZCapable=MV_FALSE;
+ }
+
+ /* Read Class Code and Revision */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_CLASS_CODE_AND_REVISION_ID);
+
+
+ pPciAgent->baseClassCode =
+ (pciData & PCCRIR_BASE_CLASS_MASK) >> PCCRIR_BASE_CLASS_OFFS;
+
+ pPciAgent->subClassCode =
+ (pciData & PCCRIR_SUB_CLASS_MASK) >> PCCRIR_SUB_CLASS_OFFS;
+
+ pPciAgent->progIf =
+ (pciData & PCCRIR_PROGIF_MASK) >> PCCRIR_PROGIF_OFFS;
+
+ pPciAgent->revisionID =
+ (pciData & PCCRIR_REVID_MASK) >> PCCRIR_REVID_OFFS;
+
+ /* Read PCI_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE);
+
+
+
+ pPciAgent->pciCacheLine=
+ (pciData & PBHTLTCLR_CACHELINE_MASK ) >> PBHTLTCLR_CACHELINE_OFFS;
+ pPciAgent->pciLatencyTimer=
+ (pciData & PBHTLTCLR_LATTIMER_MASK) >> PBHTLTCLR_LATTIMER_OFFS;
+
+ switch (pciData & PBHTLTCLR_HEADER_MASK)
+ {
+ case PBHTLTCLR_HEADER_STANDARD:
+
+ pPciAgent->pciHeader=MV_PCI_STANDARD;
+ break;
+ case PBHTLTCLR_HEADER_PCI2PCI_BRIDGE:
+
+ pPciAgent->pciHeader=MV_PCI_PCI2PCI_BRIDGE;
+ break;
+
+ }
+
+ if (pciData & PBHTLTCLR_MULTI_FUNC)
+ {
+ pPciAgent->isMultiFunction=MV_TRUE;
+ }
+ else
+ {
+ pPciAgent->isMultiFunction=MV_FALSE;
+ }
+
+ if (pciData & PBHTLTCLR_BISTCAP)
+ {
+ pPciAgent->isBISTCapable=MV_TRUE;
+ }
+ else
+ {
+ pPciAgent->isBISTCapable=MV_FALSE;
+ }
+
+
+ /* read this device pci bars */
+
+ pciDetectDeviceBars(pciIf,
+ bus,dev,func,
+ pPciAgent);
+
+
+ /* check if we are bridge*/
+ if ((pPciAgent->baseClassCode == PCI_BRIDGE_CLASS)&&
+ (pPciAgent->subClassCode == P2P_BRIDGE_SUB_CLASS_CODE))
+ {
+
+ /* Read P2P_BUSSES_NUM */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ P2P_BUSSES_NUM);
+
+ pPciAgent->p2pPrimBusNum =
+ (pciData & PBM_PRIME_BUS_NUM_MASK) >> PBM_PRIME_BUS_NUM_OFFS;
+
+ pPciAgent->p2pSecBusNum =
+ (pciData & PBM_SEC_BUS_NUM_MASK) >> PBM_SEC_BUS_NUM_OFFS;
+
+ pPciAgent->p2pSubBusNum =
+ (pciData & PBM_SUB_BUS_NUM_MASK) >> PBM_SUB_BUS_NUM_OFFS;
+
+ pPciAgent->p2pSecLatencyTimer =
+ (pciData & PBM_SEC_LAT_TMR_MASK) >> PBM_SEC_LAT_TMR_OFFS;
+
+ /* Read P2P_IO_BASE_LIMIT_SEC_STATUS */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ P2P_IO_BASE_LIMIT_SEC_STATUS);
+
+ pPciAgent->p2pSecStatus =
+ (pciData & PIBLSS_SEC_STATUS_MASK) >> PIBLSS_SEC_STATUS_OFFS;
+
+
+ pPciAgent->p2pIObase =
+ (pciData & PIBLSS_IO_BASE_MASK) << PIBLSS_IO_LIMIT_OFFS;
+
+ /* clear low address (should be zero)*/
+ pPciAgent->p2pIObase &= PIBLSS_HIGH_ADDR_MASK;
+
+ pPciAgent->p2pIOLimit =
+ (pciData & PIBLSS_IO_LIMIT_MASK);
+
+ /* fill low address with 0xfff */
+ pPciAgent->p2pIOLimit |= PIBLSS_LOW_ADDR_MASK;
+
+
+ switch ((pciData & PIBLSS_ADD_CAP_MASK) >> PIBLSS_ADD_CAP_OFFS)
+ {
+ case PIBLSS_ADD_CAP_16BIT:
+
+ pPciAgent->bIO32 = MV_FALSE;
+
+ break;
+ case PIBLSS_ADD_CAP_32BIT:
+
+ pPciAgent->bIO32 = MV_TRUE;
+
+ /* Read P2P_IO_BASE_LIMIT_UPPER_16 */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ P2P_IO_BASE_LIMIT_UPPER_16);
+
+ pPciAgent->p2pIObase |=
+ (pciData & PRBU_IO_UPP_BASE_MASK) << PRBU_IO_UPP_LIMIT_OFFS;
+
+
+ pPciAgent->p2pIOLimit |=
+ (pciData & PRBU_IO_UPP_LIMIT_MASK);
+
+ break;
+
+ }
+
+
+ /* Read P2P_MEM_BASE_LIMIT */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ P2P_MEM_BASE_LIMIT);
+
+ pPciAgent->p2pMemBase =
+ (pciData & PMBL_MEM_BASE_MASK) << PMBL_MEM_LIMIT_OFFS;
+
+ /* clear low address */
+ pPciAgent->p2pMemBase &= PMBL_HIGH_ADDR_MASK;
+
+ pPciAgent->p2pMemLimit =
+ (pciData & PMBL_MEM_LIMIT_MASK);
+
+ /* add 0xfffff */
+ pPciAgent->p2pMemLimit |= PMBL_LOW_ADDR_MASK;
+
+
+ /* Read P2P_PREF_MEM_BASE_LIMIT */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ P2P_PREF_MEM_BASE_LIMIT);
+
+
+ pPciAgent->p2pPrefMemBase =
+ (pciData & PRMBL_PREF_MEM_BASE_MASK) << PRMBL_PREF_MEM_LIMIT_OFFS;
+
+ /* get high address only */
+ pPciAgent->p2pPrefMemBase &= PRMBL_HIGH_ADDR_MASK;
+
+
+
+ pPciAgent->p2pPrefMemLimit =
+ (pciData & PRMBL_PREF_MEM_LIMIT_MASK);
+
+ /* add 0xfffff */
+ pPciAgent->p2pPrefMemLimit |= PRMBL_LOW_ADDR_MASK;
+
+ switch (pciData & PRMBL_ADD_CAP_MASK)
+ {
+ case PRMBL_ADD_CAP_32BIT:
+
+ pPciAgent->bPrefMem64 = MV_FALSE;
+
+ /* Read P2P_PREF_BASE_UPPER_32 */
+ pPciAgent->p2pPrefBaseUpper32Bits = 0;
+
+ /* Read P2P_PREF_LIMIT_UPPER_32 */
+ pPciAgent->p2pPrefLimitUpper32Bits = 0;
+
+ break;
+ case PRMBL_ADD_CAP_64BIT:
+
+ pPciAgent->bPrefMem64 = MV_TRUE;
+
+ /* Read P2P_PREF_BASE_UPPER_32 */
+ pPciAgent->p2pPrefBaseUpper32Bits = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ P2P_PREF_BASE_UPPER_32);
+
+ /* Read P2P_PREF_LIMIT_UPPER_32 */
+ pPciAgent->p2pPrefLimitUpper32Bits = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ P2P_PREF_LIMIT_UPPER_32);
+
+ break;
+
+ }
+
+ }
+ else /* no bridge */
+ {
+ /* Read PCI_SUBSYS_ID_AND_SUBSYS_VENDOR_ID */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_SUBSYS_ID_AND_SUBSYS_VENDOR_ID);
+
+
+ pPciAgent->subSysVenID =
+ (pciData & PSISVIR_VENID_MASK) >> PSISVIR_VENID_OFFS;
+ pPciAgent->subSysID =
+ (pciData & PSISVIR_DEVID_MASK) >> PSISVIR_DEVID_OFFS;
+
+
+ /* Read PCI_EXPANSION_ROM_BASE_ADDR_REG */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_EXPANSION_ROM_BASE_ADDR_REG);
+
+
+ if (pciData & PERBAR_EXPROMEN)
+ {
+ pPciAgent->isExpRom = MV_TRUE;
+ }
+ else
+ {
+ pPciAgent->isExpRom = MV_FALSE;
+ }
+
+ pPciAgent->expRomAddr =
+ (pciData & PERBAR_BASE_MASK) >> PERBAR_BASE_OFFS;
+
+ }
+
+
+ if (MV_TRUE == pPciAgent->isCapListSupport)
+ {
+ /* Read PCI_CAPABILTY_LIST_POINTER */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_CAPABILTY_LIST_POINTER);
+
+ pPciAgent->capListPointer =
+ (pciData & PCLPR_CAPPTR_MASK) >> PCLPR_CAPPTR_OFFS;
+
+ }
+
+ /* Read PCI_INTERRUPT_PIN_AND_LINE */
+ pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_INTERRUPT_PIN_AND_LINE);
+
+
+ pPciAgent->irqLine=
+ (pciData & PIPLR_INTLINE_MASK) >> PIPLR_INTLINE_OFFS;
+
+ pPciAgent->intPin=
+ (MV_PCI_INT_PIN)(pciData & PIPLR_INTPIN_MASK) >> PIPLR_INTPIN_OFFS;
+
+ pPciAgent->minGrant=
+ (pciData & PIPLR_MINGRANT_MASK) >> PIPLR_MINGRANT_OFFS;
+ pPciAgent->maxLatency=
+ (pciData & PIPLR_MAXLATEN_MASK) >> PIPLR_MAXLATEN_OFFS;
+
+ mvPciClassNameGet(pPciAgent->baseClassCode,
+ (MV_8 *)pPciAgent->type);
+
+ return MV_OK;
+
+
+}
+
+/*******************************************************************************
+* pciDetectDeviceBars - Detect a pci device bars
+*
+* DESCRIPTION:
+* This function detects all pci agent bars
+*
+* INPUT:
+* pciIf - PCI Interface
+* bus - Bus number
+* dev - Device number
+* func - Function number
+*
+*
+*
+* OUTPUT:
+* pPciAgent - pointer to the pci agent filled with its information
+*
+* RETURN:
+* detected bars number
+*
+*******************************************************************************/
+static MV_U32 pciDetectDeviceBars(MV_U32 pciIf,
+ MV_U32 bus,
+ MV_U32 dev,
+ MV_U32 func,
+ MV_PCI_DEVICE *pPciAgent)
+{
+ MV_U32 pciData,barIndex,detectedBar=0;
+ MV_U32 tmpBaseHigh=0,tmpBaseLow=0;
+ MV_U32 pciMaxBars=0;
+
+ pPciAgent->barsNum=0;
+
+ /* check if we are bridge*/
+ if ((pPciAgent->baseClassCode == PCI_BRIDGE_CLASS)&&
+ (pPciAgent->subClassCode == P2P_BRIDGE_SUB_CLASS_CODE))
+ {
+ pciMaxBars = 2;
+ }
+ else /* no bridge */
+ {
+ pciMaxBars = 6;
+ }
+
+ /* read this device pci bars */
+ for (barIndex = 0 ; barIndex < pciMaxBars ; barIndex++ )
+ {
+ /* Read PCI_MEMORY_BAR_BASE_ADDR */
+ tmpBaseLow = pciData = mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex));
+
+ pPciAgent->pciBar[detectedBar].barOffset =
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex);
+
+ /* check if the bar is 32bit or 64bit bar */
+ switch (pciData & PBBLR_TYPE_MASK)
+ {
+ case PBBLR_TYPE_32BIT_ADDR:
+ pPciAgent->pciBar[detectedBar].barType = PCI_32BIT_BAR;
+ break;
+ case PBBLR_TYPE_64BIT_ADDR:
+ pPciAgent->pciBar[detectedBar].barType = PCI_64BIT_BAR;
+ break;
+
+ }
+
+ /* check if it is memory or IO bar */
+ if (pciData & PBBLR_IOSPACE)
+ {
+ pPciAgent->pciBar[detectedBar].barMapping=PCI_IO_BAR;
+ }
+ else
+ {
+ pPciAgent->pciBar[detectedBar].barMapping=PCI_MEMORY_BAR;
+ }
+
+ /* if it is memory bar then check if it is prefetchable */
+ if (PCI_MEMORY_BAR == pPciAgent->pciBar[detectedBar].barMapping)
+ {
+ if (pciData & PBBLR_PREFETCH_EN)
+ {
+ pPciAgent->pciBar[detectedBar].isPrefetchable = MV_TRUE;
+ }
+ else
+ {
+ pPciAgent->pciBar[detectedBar].isPrefetchable = MV_FALSE;
+ }
+
+ pPciAgent->pciBar[detectedBar].barBaseLow =
+ pciData & PBBLR_MEM_BASE_MASK;
+
+
+ }
+ else /* IO Bar */
+ {
+ pPciAgent->pciBar[detectedBar].barBaseLow =
+ pciData & PBBLR_IO_BASE_MASK;
+
+ }
+
+ pPciAgent->pciBar[detectedBar].barBaseHigh=0;
+
+ if (PCI_64BIT_BAR == pPciAgent->pciBar[detectedBar].barType)
+ {
+ barIndex++;
+
+ tmpBaseHigh = pPciAgent->pciBar[detectedBar].barBaseHigh =
+ mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex));
+
+
+ }
+
+ /* calculating full base address (64bit) */
+ pPciAgent->pciBar[detectedBar].barBaseAddr =
+ (MV_U64)pPciAgent->pciBar[detectedBar].barBaseHigh;
+
+ pPciAgent->pciBar[detectedBar].barBaseAddr <<= 32;
+
+ pPciAgent->pciBar[detectedBar].barBaseAddr |=
+ (MV_U64)pPciAgent->pciBar[detectedBar].barBaseLow;
+
+
+
+ /* get the sizes of the the bar */
+
+ pPciAgent->pciBar[detectedBar].barSizeHigh=0;
+
+ if ((PCI_64BIT_BAR == pPciAgent->pciBar[detectedBar].barType) &&
+ (PCI_MEMORY_BAR == pPciAgent->pciBar[detectedBar].barMapping))
+
+ {
+ /* write oxffffffff to the bar to get the size */
+ /* start with sizelow ( original value was saved in tmpBaseLow ) */
+ mvPciIfConfigWrite(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex-1),
+ 0xffffffff);
+
+ /* read size */
+ pPciAgent->pciBar[detectedBar].barSizeLow =
+ mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex-1));
+
+
+
+ /* restore original value */
+ mvPciIfConfigWrite(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex-1),
+ tmpBaseLow);
+
+
+ /* now do the same for BaseHigh */
+
+ /* write oxffffffff to the bar to get the size */
+ mvPciIfConfigWrite(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex),
+ 0xffffffff);
+
+ /* read size */
+ pPciAgent->pciBar[detectedBar].barSizeHigh =
+ mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex));
+
+ /* restore original value */
+ mvPciIfConfigWrite(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex),
+ tmpBaseHigh);
+
+ if ((0 == pPciAgent->pciBar[detectedBar].barSizeLow)&&
+ (0 == pPciAgent->pciBar[detectedBar].barSizeHigh))
+ {
+ /* this bar is not applicable for this device,
+ ignore all previous settings and check the next bar*/
+
+ /* we though this was a 64bit bar , and it seems this
+ was wrong ! so decrement barIndex */
+ barIndex--;
+ continue;
+ }
+
+ /* calculate the full 64 bit size */
+
+ if (0 != pPciAgent->pciBar[detectedBar].barSizeHigh)
+ {
+ pPciAgent->pciBar[detectedBar].barSizeLow &= PBBLR_MEM_BASE_MASK;
+
+ pPciAgent->pciBar[detectedBar].barSizeLow =
+ ~pPciAgent->pciBar[detectedBar].barSizeLow + 1;
+
+ pPciAgent->pciBar[detectedBar].barSizeHigh = 0;
+
+ }
+ else
+ {
+
+ pPciAgent->pciBar[detectedBar].barSizeLow &= PBBLR_MEM_BASE_MASK;
+
+ pPciAgent->pciBar[detectedBar].barSizeLow =
+ ~pPciAgent->pciBar[detectedBar].barSizeLow + 1;
+
+ pPciAgent->pciBar[detectedBar].barSizeHigh = 0;
+
+ }
+
+
+
+ }
+ else /* 32bit bar */
+ {
+ /* write oxffffffff to the bar to get the size */
+ mvPciIfConfigWrite(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex),
+ 0xffffffff);
+
+ /* read size */
+ pPciAgent->pciBar[detectedBar].barSizeLow =
+ mvPciIfConfigRead(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex));
+
+ if (0 == pPciAgent->pciBar[detectedBar].barSizeLow)
+ {
+ /* this bar is not applicable for this device,
+ ignore all previous settings and check the next bar*/
+ continue;
+ }
+
+
+ /* restore original value */
+ mvPciIfConfigWrite(pciIf,
+ bus,dev,func,
+ PCI_MEMORY_BAR_BASE_ADDR(barIndex),
+ tmpBaseLow);
+
+ /* calculate size low */
+
+ if (PCI_MEMORY_BAR == pPciAgent->pciBar[detectedBar].barMapping)
+ {
+ pPciAgent->pciBar[detectedBar].barSizeLow &= PBBLR_MEM_BASE_MASK;
+ }
+ else
+ {
+ pPciAgent->pciBar[detectedBar].barSizeLow &= PBBLR_IO_BASE_MASK;
+ }
+
+ pPciAgent->pciBar[detectedBar].barSizeLow =
+ ~pPciAgent->pciBar[detectedBar].barSizeLow + 1;
+
+ pPciAgent->pciBar[detectedBar].barSizeHigh = 0;
+ pPciAgent->pciBar[detectedBar].barSize =
+ (MV_U64)pPciAgent->pciBar[detectedBar].barSizeLow;
+
+
+ }
+
+ /* we are here ! this means we have already detected a bar for
+ this device , now move on */
+
+ detectedBar++;
+ pPciAgent->barsNum++;
+ }
+
+ return detectedBar;
+}
+
+
+/*******************************************************************************
+* mvPciClassNameGet - get PCI class name
+*
+* DESCRIPTION:
+* This function returns the PCI class name
+*
+* INPUT:
+* baseClassCode - Base Class Code.
+*
+* OUTPUT:
+* pType - the class name
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciClassNameGet(MV_U32 baseClassCode, MV_8 *pType)
+{
+
+ switch(baseClassCode)
+ {
+ case 0x0:
+ strcpy(pType,"Old generation device");
+ break;
+ case 0x1:
+ strcpy(pType,"Mass storage controller");
+ break;
+ case 0x2:
+ strcpy(pType,"Network controller");
+ break;
+ case 0x3:
+ strcpy(pType,"Display controller");
+ break;
+ case 0x4:
+ strcpy(pType,"Multimedia device");
+ break;
+ case 0x5:
+ strcpy(pType,"Memory controller");
+ break;
+ case 0x6:
+ strcpy(pType,"Bridge Device");
+ break;
+ case 0x7:
+ strcpy(pType,"Simple Communication controllers");
+ break;
+ case 0x8:
+ strcpy(pType,"Base system peripherals");
+ break;
+ case 0x9:
+ strcpy(pType,"Input Devices");
+ break;
+ case 0xa:
+ strcpy(pType,"Docking stations");
+ break;
+ case 0xb:
+ strcpy(pType,"Processors");
+ break;
+ case 0xc:
+ strcpy(pType,"Serial bus controllers");
+ break;
+ case 0xd:
+ strcpy(pType,"Wireless controllers");
+ break;
+ case 0xe:
+ strcpy(pType,"Intelligent I/O controllers");
+ break;
+ case 0xf:
+ strcpy(pType,"Satellite communication controllers");
+ break;
+ case 0x10:
+ strcpy(pType,"Encryption/Decryption controllers");
+ break;
+ case 0x11:
+ strcpy(pType,"Data acquisition and signal processing controllers");
+ break;
+ default:
+ strcpy(pType,"Unknown device");
+ break;
+ }
+
+ return MV_OK;
+
+}
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h
new file mode 100644
index 000000000..2ee0b1738
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h
@@ -0,0 +1,323 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvPciUtilsh
+#define __INCmvPciUtilsh
+
+/*
+This module only support scanning of Header type 00h of pci devices
+There is no suppotr for Header type 01h of pci devices ( PCI bridges )
+*/
+
+/* includes */
+#include "mvSysHwConfig.h"
+#include "pci-if/mvPciIf.h"
+#include "pci/mvPciRegs.h"
+
+
+
+/* PCI base address low bar mask */
+#define PCI_ERROR_CODE 0xffffffff
+
+#define PCI_BRIDGE_CLASS 0x6
+#define P2P_BRIDGE_SUB_CLASS_CODE 0x4
+
+
+#define P2P_BUSSES_NUM 0x18
+#define P2P_IO_BASE_LIMIT_SEC_STATUS 0x1C
+#define P2P_MEM_BASE_LIMIT 0x20
+#define P2P_PREF_MEM_BASE_LIMIT 0x24
+#define P2P_PREF_BASE_UPPER_32 0x28
+#define P2P_PREF_LIMIT_UPPER_32 0x2C
+#define P2P_IO_BASE_LIMIT_UPPER_16 0x30
+#define P2P_EXP_ROM 0x38
+
+/* P2P_BUSSES_NUM (PBM) */
+
+#define PBM_PRIME_BUS_NUM_OFFS 0
+#define PBM_PRIME_BUS_NUM_MASK (0xff << PBM_PRIME_BUS_NUM_OFFS)
+
+#define PBM_SEC_BUS_NUM_OFFS 8
+#define PBM_SEC_BUS_NUM_MASK (0xff << PBM_SEC_BUS_NUM_OFFS)
+
+#define PBM_SUB_BUS_NUM_OFFS 16
+#define PBM_SUB_BUS_NUM_MASK (0xff << PBM_SUB_BUS_NUM_OFFS)
+
+#define PBM_SEC_LAT_TMR_OFFS 24
+#define PBM_SEC_LAT_TMR_MASK (0xff << PBM_SEC_LAT_TMR_OFFS)
+
+/* P2P_IO_BASE_LIMIT_SEC_STATUS (PIBLSS) */
+
+#define PIBLSS_IO_BASE_OFFS 0
+#define PIBLSS_IO_BASE_MASK (0xff << PIBLSS_IO_BASE_OFFS)
+
+#define PIBLSS_ADD_CAP_OFFS 0
+#define PIBLSS_ADD_CAP_MASK (0x3 << PIBLSS_ADD_CAP_OFFS)
+#define PIBLSS_ADD_CAP_16BIT (0x0 << PIBLSS_ADD_CAP_OFFS)
+#define PIBLSS_ADD_CAP_32BIT (0x1 << PIBLSS_ADD_CAP_OFFS)
+
+#define PIBLSS_LOW_ADDR_OFFS 0
+#define PIBLSS_LOW_ADDR_MASK (0xFFF << PIBLSS_LOW_ADDR_OFFS)
+
+#define PIBLSS_HIGH_ADDR_OFFS 12
+#define PIBLSS_HIGH_ADDR_MASK (0xF << PIBLSS_HIGH_ADDR_OFFS)
+
+#define PIBLSS_IO_LIMIT_OFFS 8
+#define PIBLSS_IO_LIMIT_MASK (0xff << PIBLSS_IO_LIMIT_OFFS)
+
+#define PIBLSS_SEC_STATUS_OFFS 16
+#define PIBLSS_SEC_STATUS_MASK (0xffff << PIBLSS_SEC_STATUS_OFFS)
+
+
+/* P2P_MEM_BASE_LIMIT (PMBL)*/
+
+#define PMBL_MEM_BASE_OFFS 0
+#define PMBL_MEM_BASE_MASK (0xffff << PMBL_MEM_BASE_OFFS)
+
+#define PMBL_MEM_LIMIT_OFFS 16
+#define PMBL_MEM_LIMIT_MASK (0xffff << PMBL_MEM_LIMIT_OFFS)
+
+
+#define PMBL_LOW_ADDR_OFFS 0
+#define PMBL_LOW_ADDR_MASK (0xFFFFF << PMBL_LOW_ADDR_OFFS)
+
+#define PMBL_HIGH_ADDR_OFFS 20
+#define PMBL_HIGH_ADDR_MASK (0xFFF << PMBL_HIGH_ADDR_OFFS)
+
+
+/* P2P_PREF_MEM_BASE_LIMIT (PRMBL) */
+
+#define PRMBL_PREF_MEM_BASE_OFFS 0
+#define PRMBL_PREF_MEM_BASE_MASK (0xffff << PRMBL_PREF_MEM_BASE_OFFS)
+
+#define PRMBL_PREF_MEM_LIMIT_OFFS 16
+#define PRMBL_PREF_MEM_LIMIT_MASK (0xffff<<PRMBL_PREF_MEM_LIMIT_OFFS)
+
+#define PRMBL_LOW_ADDR_OFFS 0
+#define PRMBL_LOW_ADDR_MASK (0xFFFFF << PRMBL_LOW_ADDR_OFFS)
+
+#define PRMBL_HIGH_ADDR_OFFS 20
+#define PRMBL_HIGH_ADDR_MASK (0xFFF << PRMBL_HIGH_ADDR_OFFS)
+
+#define PRMBL_ADD_CAP_OFFS 0
+#define PRMBL_ADD_CAP_MASK (0xf << PRMBL_ADD_CAP_OFFS)
+#define PRMBL_ADD_CAP_32BIT (0x0 << PRMBL_ADD_CAP_OFFS)
+#define PRMBL_ADD_CAP_64BIT (0x1 << PRMBL_ADD_CAP_OFFS)
+
+/* P2P_IO_BASE_LIMIT_UPPER_16 (PIBLU) */
+
+#define PRBU_IO_UPP_BASE_OFFS 0
+#define PRBU_IO_UPP_BASE_MASK (0xffff << PRBU_IO_UPP_BASE_OFFS)
+
+#define PRBU_IO_UPP_LIMIT_OFFS 16
+#define PRBU_IO_UPP_LIMIT_MASK (0xffff << PRBU_IO_UPP_LIMIT_OFFS)
+
+
+/* typedefs */
+
+typedef enum _mvPciBarMapping
+{
+ PCI_MEMORY_BAR,
+ PCI_IO_BAR,
+ PCI_NO_MAPPING
+}MV_PCI_BAR_MAPPING;
+
+typedef enum _mvPciBarType
+{
+ PCI_32BIT_BAR,
+ PCI_64BIT_BAR
+}MV_PCI_BAR_TYPE;
+
+typedef enum _mvPciIntPin
+{
+ MV_PCI_INTA = 1,
+ MV_PCI_INTB = 2,
+ MV_PCI_INTC = 3,
+ MV_PCI_INTD = 4
+}MV_PCI_INT_PIN;
+
+typedef enum _mvPciHeader
+{
+ MV_PCI_STANDARD,
+ MV_PCI_PCI2PCI_BRIDGE
+
+}MV_PCI_HEADER;
+
+
+/* BAR structure */
+typedef struct _pciBar
+{
+ MV_U32 barOffset;
+ MV_U32 barBaseLow;
+ MV_U32 barBaseHigh;
+ MV_U32 barSizeLow;
+ MV_U32 barSizeHigh;
+ /* The 'barBaseAddr' is a 64-bit variable
+ that will contain the TOTAL base address
+ value achived by combining both the 'barBaseLow'
+ and the 'barBaseHigh' parameters as follows:
+
+ BIT: 63 31 0
+ | | |
+ barBaseHigh barBaseLow */
+ MV_U64 barBaseAddr;
+ /* The 'barSize' is a 64-bit variable
+ that will contain the TOTAL size achived
+ by combining both the 'barSizeLow' and
+ the 'barSizeHigh' parameters as follows:
+
+ BIT: 63 31 0
+ | | |
+ barSizeHigh barSizeLow
+
+ NOTE: The total size described above
+ is AFTER the size calculation as
+ described in PCI spec rev2.2 */
+ MV_U64 barSize;
+ MV_BOOL isPrefetchable;
+ MV_PCI_BAR_TYPE barType;
+ MV_PCI_BAR_MAPPING barMapping;
+
+
+} PCI_BAR;
+
+/* Device information structure */
+typedef struct _mvPciDevice
+{
+ /* Device specific information */
+ MV_U32 busNumber; /* Pci agent bus number */
+ MV_U32 deviceNum; /* Pci agent device number */
+ MV_U32 function; /* Pci agent function number */
+
+ MV_U32 venID; /* Pci agent Vendor ID */
+ MV_U32 deviceID; /* Pci agent Device ID */
+
+ MV_BOOL isFastB2BCapable; /* Capability of Fast Back to Back
+ transactions */
+ MV_BOOL isCapListSupport; /* Support of Capability list */
+ MV_BOOL is66MHZCapable; /* 66MHZ support */
+
+ MV_U32 baseClassCode; /* Pci agent base Class Code */
+ MV_U32 subClassCode; /* Pci agent sub Class Code */
+ MV_U32 progIf; /* Pci agent Programing interface */
+ MV_U32 revisionID;
+
+ PCI_BAR pciBar[6]; /* Pci agent bar list */
+
+ MV_U32 p2pPrimBusNum; /* P2P Primary Bus number*/
+ MV_U32 p2pSecBusNum; /* P2P Secondary Bus Number*/
+ MV_U32 p2pSubBusNum; /* P2P Subordinate bus Number */
+ MV_U32 p2pSecLatencyTimer; /* P2P Econdary Latency Timer*/
+ MV_U32 p2pIObase; /* P2P IO Base */
+ MV_U32 p2pIOLimit; /* P2P IO Linit */
+ MV_BOOL bIO32;
+ MV_U32 p2pSecStatus; /* P2P Secondary Status */
+ MV_U32 p2pMemBase; /* P2P Memory Space */
+ MV_U32 p2pMemLimit; /* P2P Memory Limit*/
+ MV_U32 p2pPrefMemBase; /* P2P Prefetchable Mem Base*/
+ MV_U32 p2pPrefMemLimit; /* P2P Prefetchable Memory Limit*/
+ MV_BOOL bPrefMem64;
+ MV_U32 p2pPrefBaseUpper32Bits;/* P2P Prefetchable upper 32 bits*/
+ MV_U32 p2pPrefLimitUpper32Bits;/* P2P prefetchable limit upper 32*/
+
+
+ MV_U32 pciCacheLine; /* Pci agent cache line */
+ MV_U32 pciLatencyTimer; /* Pci agent Latency timer */
+ MV_PCI_HEADER pciHeader; /* Pci agent header type*/
+ MV_BOOL isMultiFunction; /* Multi function support */
+ MV_BOOL isBISTCapable; /* Self test capable */
+
+ MV_U32 subSysID; /* Sub System ID */
+ MV_U32 subSysVenID; /* Sub System Vendor ID */
+
+ MV_BOOL isExpRom; /* Expantion Rom support */
+ MV_U32 expRomAddr; /* Expantion Rom pointer */
+
+ MV_U32 capListPointer; /* Capability list pointer */
+
+ MV_U32 irqLine; /* IRQ line */
+ MV_PCI_INT_PIN intPin; /* Interrupt pin */
+ MV_U32 minGrant; /* Minimum grant*/
+ MV_U32 maxLatency; /* Maximum latency*/
+
+ MV_U32 funtionsNum; /* pci agent total functions number */
+
+ MV_U32 barsNum;
+ MV_U8 type[60]; /* class name of the pci agent */
+
+
+} MV_PCI_DEVICE;
+
+/* PCI gloabl functions */
+MV_STATUS mvPciClassNameGet(MV_U32 classCode, MV_8 *pType);
+
+
+/* Performs a full scan on both PCIs and returns all possible details on the
+ agents found on the bus. */
+MV_STATUS mvPciScan(MV_U32 pciIf,
+ MV_PCI_DEVICE *pPciAgents,
+ MV_U32 *pPciAgentsNum);
+
+
+#endif /* #ifndef __INCmvPciUtilsh */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvCompVer.txt
new file mode 100644
index 000000000..7b6fe369c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.2
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c
new file mode 100644
index 000000000..4a087343d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c
@@ -0,0 +1,1047 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "pci/mvPci.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+
+
+MV_VOID mvPciHalInit(MV_U32 pciIf, MV_PCI_MOD pciIfmod)
+{
+ if (MV_PCI_MOD_HOST == pciIfmod)
+ {
+
+ mvPciLocalBusNumSet(pciIf, PCI_HOST_BUS_NUM(pciIf));
+ mvPciLocalDevNumSet(pciIf, PCI_HOST_DEV_NUM(pciIf));
+
+ /* Local device master Enable */
+ mvPciMasterEnable(pciIf, MV_TRUE);
+
+ /* Local device slave Enable */
+ mvPciSlaveEnable(pciIf, mvPciLocalBusNumGet(pciIf),
+ mvPciLocalDevNumGet(pciIf), MV_TRUE);
+ }
+ /* enable CPU-2-PCI ordering */
+ MV_REG_BIT_SET(PCI_CMD_REG(0), PCR_CPU_TO_PCI_ORDER_EN);
+}
+
+/*******************************************************************************
+* mvPciCommandSet - Set PCI comman register value.
+*
+* DESCRIPTION:
+* This function sets a given PCI interface with its command register
+* value.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* command - 32bit value to be written to comamnd register.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM if pciIf is not in range otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciCommandSet(MV_U32 pciIf, MV_U32 command)
+{
+ MV_U32 locBusNum, locDevNum, regVal;
+
+ locBusNum = mvPciLocalBusNumGet(pciIf);
+ locDevNum = mvPciLocalDevNumGet(pciIf);
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciCommandSet: ERR. Invalid PCI IF num %d\n", pciIf);
+ return MV_BAD_PARAM;
+ }
+
+ /* Set command register */
+ MV_REG_WRITE(PCI_CMD_REG(pciIf), command);
+
+ /* Upodate device max outstanding split tarnsaction */
+ if ((command & PCR_CPU_TO_PCI_ORDER_EN) &&
+ (command & PCR_PCI_TO_CPU_ORDER_EN))
+ {
+ /* Read PCI-X command register */
+ regVal = mvPciConfigRead (pciIf, locBusNum, locDevNum, 0, PCIX_COMMAND);
+
+ /* clear bits 22:20 */
+ regVal &= 0xff8fffff;
+
+ /* set reset value */
+ regVal |= (0x3 << 20);
+
+ /* Write back the value */
+ mvPciConfigWrite (pciIf, locBusNum, locDevNum, 0, PCIX_COMMAND, regVal);
+ }
+
+ return MV_OK;
+
+
+}
+
+
+/*******************************************************************************
+* mvPciModeGet - Get PCI interface mode.
+*
+* DESCRIPTION:
+* This function returns the given PCI interface mode.
+*
+* INPUT:
+* pciIf - PCI interface number.
+*
+* OUTPUT:
+* pPciMode - Pointer to PCI mode structure.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciModeGet(MV_U32 pciIf, MV_PCI_MODE *pPciMode)
+{
+ MV_U32 pciMode;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciModeGet: ERR. Invalid PCI interface %d\n", pciIf);
+ return MV_BAD_PARAM;
+ }
+ if (NULL == pPciMode)
+ {
+ mvOsPrintf("mvPciModeGet: ERR. pPciMode = NULL \n");
+ return MV_BAD_PARAM;
+ }
+
+ /* Read pci mode register */
+ pciMode = MV_REG_READ(PCI_MODE_REG(pciIf));
+
+ switch (pciMode & PMR_PCI_MODE_MASK)
+ {
+ case PMR_PCI_MODE_CONV:
+ pPciMode->pciType = MV_PCI_CONV;
+
+ if (MV_REG_READ(PCI_DLL_CTRL_REG(pciIf)) & PDC_DLL_EN)
+ {
+ pPciMode->pciSpeed = 66000000; /* 66MHZ */
+ }
+ else
+ {
+ pPciMode->pciSpeed = 33000000; /* 33MHZ */
+ }
+
+ break;
+
+ case PMR_PCI_MODE_PCIX_66MHZ:
+ pPciMode->pciType = MV_PCIX;
+ pPciMode->pciSpeed = 66000000; /* 66MHZ */
+ break;
+
+ case PMR_PCI_MODE_PCIX_100MHZ:
+ pPciMode->pciType = MV_PCIX;
+ pPciMode->pciSpeed = 100000000; /* 100MHZ */
+ break;
+
+ case PMR_PCI_MODE_PCIX_133MHZ:
+ pPciMode->pciType = MV_PCIX;
+ pPciMode->pciSpeed = 133000000; /* 133MHZ */
+ break;
+
+ default:
+ {
+ mvOsPrintf("mvPciModeGet: ERR. Non existing mode !!\n");
+ return MV_ERROR;
+ }
+ }
+
+ switch (pciMode & PMR_PCI_64_MASK)
+ {
+ case PMR_PCI_64_64BIT:
+ pPciMode->pciWidth = MV_PCI_64;
+ break;
+
+ case PMR_PCI_64_32BIT:
+ pPciMode->pciWidth = MV_PCI_32;
+ break;
+
+ default:
+ {
+ mvOsPrintf("mvPciModeGet: ERR. Non existing mode !!\n");
+ return MV_ERROR;
+ }
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvPciRetrySet - Set PCI retry counters
+*
+* DESCRIPTION:
+* This function specifies the number of times the PCI controller
+* retries a transaction before it quits.
+* Applies to the PCI Master when acting as a requester.
+* Applies to the PCI slave when acting as a completer (PCI-X mode).
+* A 0x00 value means a "retry forever".
+*
+* INPUT:
+* pciIf - PCI interface number.
+* counter - Number of times PCI controller retry. Use counter value
+* up to PRR_RETRY_CNTR_MAX.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciRetrySet(MV_U32 pciIf, MV_U32 counter)
+{
+ MV_U32 pciRetry;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciRetrySet: ERR. Invalid PCI interface %d\n", pciIf);
+ return MV_BAD_PARAM;
+ }
+
+ if (counter >= PRR_RETRY_CNTR_MAX)
+ {
+ mvOsPrintf("mvPciRetrySet: ERR. Invalid counter: %d\n", counter);
+ return MV_BAD_PARAM;
+
+ }
+
+ /* Reading PCI retry register */
+ pciRetry = MV_REG_READ(PCI_RETRY_REG(pciIf));
+
+ pciRetry &= ~PRR_RETRY_CNTR_MASK;
+
+ pciRetry |= (counter << PRR_RETRY_CNTR_OFFS);
+
+ /* write new value */
+ MV_REG_WRITE(PCI_RETRY_REG(pciIf), pciRetry);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciDiscardTimerSet - Set PCI discard timer
+*
+* DESCRIPTION:
+* This function set PCI discard timer.
+* In conventional PCI mode:
+* Specifies the number of PCLK cycles the PCI slave keeps a non-accessed
+* read buffers (non-completed delayed read) before invalidate the buffer.
+* Set to '0' to disable the timer. The PCI slave waits for delayed
+* read completion forever.
+* In PCI-X mode:
+* Specifies the number of PCLK cycles the PCI master waits for split
+* completion transaction, before it invalidates the pre-allocated read
+* buffer.
+* Set to '0' to disable the timer. The PCI master waits for split
+* completion forever.
+* NOTE: Must be set to a number greater than MV_PCI_MAX_DISCARD_CLK,
+* unless using the "wait for ever" setting 0x0.
+* NOTE: Must not be updated while there are pending read requests.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* pClkCycles - Number of PCI clock cycles.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciDiscardTimerSet(MV_U32 pciIf, MV_U32 pClkCycles)
+{
+ MV_U32 pciDiscardTimer;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciDiscardTimerSet: ERR. Invalid PCI interface %d\n",
+ pciIf);
+ return MV_BAD_PARAM;
+ }
+
+ if (pClkCycles >= PDTR_TIMER_MIN)
+ {
+ mvOsPrintf("mvPciDiscardTimerSet: ERR. Invalid Clk value: %d\n",
+ pClkCycles);
+ return MV_BAD_PARAM;
+
+ }
+
+ /* Read PCI Discard Timer */
+ pciDiscardTimer = MV_REG_READ(PCI_DISCARD_TIMER_REG(pciIf));
+
+ pciDiscardTimer &= ~PDTR_TIMER_MASK;
+
+ pciDiscardTimer |= (pClkCycles << PDTR_TIMER_OFFS);
+
+ /* Write new value */
+ MV_REG_WRITE(PCI_DISCARD_TIMER_REG(pciIf), pciDiscardTimer);
+
+ return MV_OK;
+
+}
+
+/* PCI Arbiter routines */
+
+/*******************************************************************************
+* mvPciArbEnable - PCI arbiter enable/disable
+*
+* DESCRIPTION:
+* This fuction enable/disables a given PCI interface arbiter.
+* NOTE: Arbiter setting can not be changed while in work. It should only
+* be set once.
+* INPUT:
+* pciIf - PCI interface number.
+* enable - Enable/disable parameter. If enable = MV_TRUE then enable.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvPciArbEnable(MV_U32 pciIf, MV_BOOL enable)
+{
+ MV_U32 regVal;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciArbEnable: ERR. Invalid PCI interface %d\n", pciIf);
+ return MV_ERROR;
+ }
+
+ /* Set PCI Arbiter Control register according to default configuration */
+ regVal = MV_REG_READ(PCI_ARBITER_CTRL_REG(pciIf));
+
+ /* Make sure arbiter disabled before changing its values */
+ MV_REG_BIT_RESET(PCI_ARBITER_CTRL_REG(pciIf), PACR_ARB_ENABLE);
+
+ regVal &= ~PCI_ARBITER_CTRL_DEFAULT_MASK;
+
+ regVal |= PCI_ARBITER_CTRL_DEFAULT; /* Set default configuration */
+
+ if (MV_TRUE == enable)
+ {
+ regVal |= PACR_ARB_ENABLE;
+ }
+ else
+ {
+ regVal &= ~PACR_ARB_ENABLE;
+ }
+
+ /* Write to register */
+ MV_REG_WRITE(PCI_ARBITER_CTRL_REG(pciIf), regVal);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciArbParkDis - Disable arbiter parking on agent
+*
+* DESCRIPTION:
+* This function disables the PCI arbiter from parking on the given agent
+* list.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* pciAgentMask - When a bit in the mask is set to '1', parking on
+* the associated PCI master is disabled. Mask bit
+* refers to bit 0 - 6. For example disable parking on PCI
+* agent 3 set pciAgentMask 0x4 (bit 3 is set).
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+MV_STATUS mvPciArbParkDis(MV_U32 pciIf, MV_U32 pciAgentMask)
+{
+ MV_U32 pciArbiterCtrl;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciArbParkDis: ERR. Invalid PCI interface %d\n", pciIf);
+ return MV_ERROR;
+ }
+
+ /* Reading Arbiter Control register */
+ pciArbiterCtrl = MV_REG_READ(PCI_ARBITER_CTRL_REG(pciIf));
+
+ /* Arbiter must be disabled before changing parking */
+ MV_REG_BIT_RESET(PCI_ARBITER_CTRL_REG(pciIf), PACR_ARB_ENABLE);
+
+ /* do the change */
+ pciArbiterCtrl &= ~PACR_PARK_DIS_MASK;
+ pciArbiterCtrl |= (pciAgentMask << PACR_PARK_DIS_OFFS);
+
+ /* writing new value ( if th earbiter was enabled before the change */
+ /* here it will be reenabled */
+ MV_REG_WRITE(PCI_ARBITER_CTRL_REG(pciIf), pciArbiterCtrl);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciArbBrokDetectSet - Set PCI arbiter broken detection
+*
+* DESCRIPTION:
+* This function sets the maximum number of cycles that the arbiter
+* waits for a PCI master to respond to its grant assertion. If a
+* PCI agent fails to respond within this time, the PCI arbiter aborts
+* the transaction and performs a new arbitration cycle.
+* NOTE: Value must be greater than '1' for conventional PCI and
+* greater than '5' for PCI-X.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* pClkCycles - Number of PCI clock cycles. If equal to '0' the broken
+* master detection is disabled.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciArbBrokDetectSet(MV_U32 pciIf, MV_U32 pClkCycles)
+{
+ MV_U32 pciArbiterCtrl;
+ MV_U32 pciMode;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciArbBrokDetectSet: ERR. Invalid PCI interface %d\n",
+ pciIf);
+ return MV_BAD_PARAM;
+ }
+
+ /* Checking PCI mode and if pClkCycles is legal value */
+ pciMode = MV_REG_READ(PCI_MODE_REG(pciIf));
+ pciMode &= PMR_PCI_MODE_MASK;
+
+ if (PMR_PCI_MODE_CONV == pciMode)
+ {
+ if (pClkCycles < PACR_BROKEN_VAL_CONV_MIN)
+ return MV_ERROR;
+ }
+ else
+ {
+ if (pClkCycles < PACR_BROKEN_VAL_PCIX_MIN)
+ return MV_ERROR;
+ }
+
+ pClkCycles <<= PACR_BROKEN_VAL_OFFS;
+
+ /* Reading Arbiter Control register */
+ pciArbiterCtrl = MV_REG_READ(PCI_ARBITER_CTRL_REG(pciIf));
+ pciArbiterCtrl &= ~PACR_BROKEN_VAL_MASK;
+ pciArbiterCtrl |= pClkCycles;
+
+ /* Arbiter must be disabled before changing broken detection */
+ MV_REG_BIT_RESET(PCI_ARBITER_CTRL_REG(pciIf), PACR_ARB_ENABLE);
+
+ /* writing new value ( if th earbiter was enabled before the change */
+ /* here it will be reenabled */
+
+ MV_REG_WRITE(PCI_ARBITER_CTRL_REG(pciIf), pciArbiterCtrl);
+
+ return MV_OK;
+}
+
+/* PCI configuration space read write */
+
+/*******************************************************************************
+* mvPciConfigRead - Read from configuration space
+*
+* DESCRIPTION:
+* This function performs a 32 bit read from PCI configuration space.
+* It supports both type 0 and type 1 of Configuration Transactions
+* (local and over bridge). In order to read from local bus segment, use
+* bus number retrieved from mvPciLocalBusNumGet(). Other bus numbers
+* will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+* pciIf - PCI interface number.
+* bus - PCI segment bus number.
+* dev - PCI device number.
+* func - Function number.
+* regOffs - Register offset.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit register data, 0xffffffff on error
+*
+*******************************************************************************/
+MV_U32 mvPciConfigRead (MV_U32 pciIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+ MV_U32 regOff)
+{
+ MV_U32 pciData = 0;
+
+ /* Parameter checking */
+ if (PCI_DEFAULT_IF != pciIf)
+ {
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciConfigRead: ERR. Invalid PCI interface %d\n",pciIf);
+ return 0xFFFFFFFF;
+ }
+ }
+
+ if (dev >= MAX_PCI_DEVICES)
+ {
+ DB(mvOsPrintf("mvPciConfigRead: ERR. device number illigal %d\n", dev));
+ return 0xFFFFFFFF;
+ }
+
+ if (func >= MAX_PCI_FUNCS)
+ {
+ DB(mvOsPrintf("mvPciConfigRead: ERR. function number illigal %d\n", func));
+ return 0xFFFFFFFF;
+ }
+
+ if (bus >= MAX_PCI_BUSSES)
+ {
+ DB(mvOsPrintf("mvPciConfigRead: ERR. bus number illigal %d\n", bus));
+ return MV_ERROR;
+ }
+
+
+ /* Creating PCI address to be passed */
+ pciData |= (bus << PCAR_BUS_NUM_OFFS);
+ pciData |= (dev << PCAR_DEVICE_NUM_OFFS);
+ pciData |= (func << PCAR_FUNC_NUM_OFFS);
+ pciData |= (regOff & PCAR_REG_NUM_MASK);
+
+ pciData |= PCAR_CONFIG_EN;
+
+ /* Write the address to the PCI configuration address register */
+ MV_REG_WRITE(PCI_CONFIG_ADDR_REG(pciIf), pciData);
+
+ /* In order to let the PCI controller absorbed the address of the read */
+ /* transaction we perform a validity check that the address was written */
+ if(pciData != MV_REG_READ(PCI_CONFIG_ADDR_REG(pciIf)))
+ {
+ return MV_ERROR;
+ }
+ /* Read the Data returned in the PCI Data register */
+ pciData = MV_REG_READ(PCI_CONFIG_DATA_REG(pciIf));
+
+ return pciData;
+}
+
+/*******************************************************************************
+* mvPciConfigWrite - Write to configuration space
+*
+* DESCRIPTION:
+* This function performs a 32 bit write to PCI configuration space.
+* It supports both type 0 and type 1 of Configuration Transactions
+* (local and over bridge). In order to write to local bus segment, use
+* bus number retrieved from mvPciLocalBusNumGet(). Other bus numbers
+* will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+* pciIf - PCI interface number.
+* bus - PCI segment bus number.
+* dev - PCI device number.
+* func - Function number.
+* regOffs - Register offset.
+* data - 32bit data.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciConfigWrite(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+ MV_U32 pciData = 0;
+
+ /* Parameter checking */
+ if (PCI_DEFAULT_IF != pciIf)
+ {
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciConfigWrite: ERR. Invalid PCI interface %d\n",
+ pciIf);
+ return 0xFFFFFFFF;
+ }
+ }
+
+ if (dev >= MAX_PCI_DEVICES)
+ {
+ mvOsPrintf("mvPciConfigWrite: ERR. device number illigal %d\n",dev);
+ return MV_BAD_PARAM;
+ }
+
+ if (func >= MAX_PCI_FUNCS)
+ {
+ mvOsPrintf("mvPciConfigWrite: ERR. function number illigal %d\n", func);
+ return MV_ERROR;
+ }
+
+ if (bus >= MAX_PCI_BUSSES)
+ {
+ mvOsPrintf("mvPciConfigWrite: ERR. bus number illigal %d\n", bus);
+ return MV_ERROR;
+ }
+
+ /* Creating PCI address to be passed */
+ pciData |= (bus << PCAR_BUS_NUM_OFFS);
+ pciData |= (dev << PCAR_DEVICE_NUM_OFFS);
+ pciData |= (func << PCAR_FUNC_NUM_OFFS);
+ pciData |= (regOff & PCAR_REG_NUM_MASK);
+
+ pciData |= PCAR_CONFIG_EN;
+
+ /* Write the address to the PCI configuration address register */
+ MV_REG_WRITE(PCI_CONFIG_ADDR_REG(pciIf), pciData);
+
+ /* In order to let the PCI controller absorbed the address of the read */
+ /* transaction we perform a validity check that the address was written */
+ if(pciData != MV_REG_READ(PCI_CONFIG_ADDR_REG(pciIf)))
+ {
+ return MV_ERROR;
+ }
+
+ /* Write the Data passed to the PCI Data register */
+ MV_REG_WRITE(PCI_CONFIG_DATA_REG(pciIf), data);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvPciMasterEnable - Enable/disale PCI interface master transactions.
+*
+* DESCRIPTION:
+* This function performs read modified write to PCI command status
+* (offset 0x4) to set/reset bit 2. After this bit is set, the PCI
+* master is allowed to gain ownership on the bus, otherwise it is
+* incapable to do so.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciMasterEnable(MV_U32 pciIf, MV_BOOL enable)
+{
+ MV_U32 pciCommandStatus;
+ MV_U32 RegOffs;
+ MV_U32 localBus;
+ MV_U32 localDev;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciMasterEnable: ERR. Invalid PCI interface %d\n", pciIf);
+ return MV_ERROR;
+ }
+
+ localBus = mvPciLocalBusNumGet(pciIf);
+ localDev = mvPciLocalDevNumGet(pciIf);
+
+ RegOffs = PCI_STATUS_AND_COMMAND;
+
+ pciCommandStatus = mvPciConfigRead(pciIf, localBus, localDev, 0, RegOffs);
+
+ if (MV_TRUE == enable)
+ {
+ pciCommandStatus |= PSCR_MASTER_EN;
+ }
+ else
+ {
+ pciCommandStatus &= ~PSCR_MASTER_EN;
+ }
+
+ mvPciConfigWrite(pciIf, localBus, localDev, 0, RegOffs, pciCommandStatus);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciSlaveEnable - Enable/disale PCI interface slave transactions.
+*
+* DESCRIPTION:
+* This function performs read modified write to PCI command status
+* (offset 0x4) to set/reset bit 0 and 1. After those bits are set,
+* the PCI slave is allowed to respond to PCI IO space access (bit 0)
+* and PCI memory space access (bit 1).
+*
+* INPUT:
+* pciIf - PCI interface number.
+* dev - PCI device number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciSlaveEnable(MV_U32 pciIf, MV_U32 bus, MV_U32 dev, MV_BOOL enable)
+{
+ MV_U32 pciCommandStatus;
+ MV_U32 RegOffs;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciSlaveEnable: ERR. Invalid PCI interface %d\n", pciIf);
+ return MV_BAD_PARAM;
+ }
+ if (dev >= MAX_PCI_DEVICES)
+ {
+ mvOsPrintf("mvPciLocalDevNumSet: ERR. device number illigal %d\n", dev);
+ return MV_BAD_PARAM;
+
+ }
+
+ RegOffs = PCI_STATUS_AND_COMMAND;
+
+ pciCommandStatus=mvPciConfigRead(pciIf, bus, dev, 0, RegOffs);
+
+ if (MV_TRUE == enable)
+ {
+ pciCommandStatus |= (PSCR_IO_EN | PSCR_MEM_EN);
+ }
+ else
+ {
+ pciCommandStatus &= ~(PSCR_IO_EN | PSCR_MEM_EN);
+ }
+
+ mvPciConfigWrite(pciIf, bus, dev, 0, RegOffs, pciCommandStatus);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvPciLocalBusNumSet - Set PCI interface local bus number.
+*
+* DESCRIPTION:
+* This function sets given PCI interface its local bus number.
+* Note: In case the PCI interface is PCI-X, the information is read-only.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* busNum - Bus number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_NOT_ALLOWED in case PCI interface is PCI-X.
+* MV_BAD_PARAM on bad parameters ,
+* otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciLocalBusNumSet(MV_U32 pciIf, MV_U32 busNum)
+{
+ MV_U32 pciP2PConfig;
+ MV_PCI_MODE pciMode;
+ MV_U32 localBus;
+ MV_U32 localDev;
+
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciLocalBusNumSet: ERR. Invalid PCI interface %d\n",pciIf);
+ return MV_BAD_PARAM;
+ }
+ if (busNum >= MAX_PCI_BUSSES)
+ {
+ mvOsPrintf("mvPciLocalBusNumSet: ERR. bus number illigal %d\n", busNum);
+ return MV_ERROR;
+
+ }
+
+ localBus = mvPciLocalBusNumGet(pciIf);
+ localDev = mvPciLocalDevNumGet(pciIf);
+
+
+ /* PCI interface mode */
+ mvPciModeGet(pciIf, &pciMode);
+
+ /* if PCI type is PCI-X then it is not allowed to change the dev number */
+ if (MV_PCIX == pciMode.pciType)
+ {
+ pciP2PConfig = mvPciConfigRead(pciIf, localBus, localDev, 0, PCIX_STATUS );
+
+ pciP2PConfig &= ~PXS_BN_MASK;
+
+ pciP2PConfig |= (busNum << PXS_BN_OFFS) & PXS_BN_MASK;
+
+ mvPciConfigWrite(pciIf, localBus, localDev, 0, PCIX_STATUS,pciP2PConfig );
+
+ }
+ else
+ {
+ pciP2PConfig = MV_REG_READ(PCI_P2P_CONFIG_REG(pciIf));
+
+ pciP2PConfig &= ~PPCR_BUS_NUM_MASK;
+
+ pciP2PConfig |= (busNum << PPCR_BUS_NUM_OFFS) & PPCR_BUS_NUM_MASK;
+
+ MV_REG_WRITE(PCI_P2P_CONFIG_REG(pciIf), pciP2PConfig);
+
+ }
+
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciLocalBusNumGet - Get PCI interface local bus number.
+*
+* DESCRIPTION:
+* This function gets the local bus number of a given PCI interface.
+*
+* INPUT:
+* pciIf - PCI interface number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Local bus number.0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPciLocalBusNumGet(MV_U32 pciIf)
+{
+ MV_U32 pciP2PConfig;
+
+ /* Parameter checking */
+ if (PCI_DEFAULT_IF != pciIf)
+ {
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciLocalBusNumGet: ERR. Invalid PCI interface %d\n",
+ pciIf);
+ return 0xFFFFFFFF;
+ }
+ }
+
+ pciP2PConfig = MV_REG_READ(PCI_P2P_CONFIG_REG(pciIf));
+ pciP2PConfig &= PPCR_BUS_NUM_MASK;
+ return (pciP2PConfig >> PPCR_BUS_NUM_OFFS);
+}
+
+
+/*******************************************************************************
+* mvPciLocalDevNumSet - Set PCI interface local device number.
+*
+* DESCRIPTION:
+* This function sets given PCI interface its local device number.
+* Note: In case the PCI interface is PCI-X, the information is read-only.
+*
+* INPUT:
+* pciIf - PCI interface number.
+* devNum - Device number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_NOT_ALLOWED in case PCI interface is PCI-X. MV_BAD_PARAM on bad parameters ,
+* otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciLocalDevNumSet(MV_U32 pciIf, MV_U32 devNum)
+{
+ MV_U32 pciP2PConfig;
+ MV_PCI_MODE pciMode;
+ MV_U32 localBus;
+ MV_U32 localDev;
+
+ /* Parameter checking */
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciLocalDevNumSet: ERR. Invalid PCI interface %d\n",pciIf);
+ return MV_BAD_PARAM;
+ }
+ if (devNum >= MAX_PCI_DEVICES)
+ {
+ mvOsPrintf("mvPciLocalDevNumSet: ERR. device number illigal %d\n",
+ devNum);
+ return MV_BAD_PARAM;
+
+ }
+
+ localBus = mvPciLocalBusNumGet(pciIf);
+ localDev = mvPciLocalDevNumGet(pciIf);
+
+ /* PCI interface mode */
+ mvPciModeGet(pciIf, &pciMode);
+
+ /* if PCI type is PCIX then it is not allowed to change the dev number */
+ if (MV_PCIX == pciMode.pciType)
+ {
+ pciP2PConfig = mvPciConfigRead(pciIf, localBus, localDev, 0, PCIX_STATUS );
+
+ pciP2PConfig &= ~PXS_DN_MASK;
+
+ pciP2PConfig |= (devNum << PXS_DN_OFFS) & PXS_DN_MASK;
+
+ mvPciConfigWrite(pciIf,localBus, localDev, 0, PCIX_STATUS,pciP2PConfig );
+ }
+ else
+ {
+ pciP2PConfig = MV_REG_READ(PCI_P2P_CONFIG_REG(pciIf));
+
+ pciP2PConfig &= ~PPCR_DEV_NUM_MASK;
+
+ pciP2PConfig |= (devNum << PPCR_DEV_NUM_OFFS) & PPCR_DEV_NUM_MASK;
+
+ MV_REG_WRITE(PCI_P2P_CONFIG_REG(pciIf), pciP2PConfig);
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvPciLocalDevNumGet - Get PCI interface local device number.
+*
+* DESCRIPTION:
+* This function gets the local device number of a given PCI interface.
+*
+* INPUT:
+* pciIf - PCI interface number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Local device number. 0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPciLocalDevNumGet(MV_U32 pciIf)
+{
+ MV_U32 pciP2PConfig;
+
+ /* Parameter checking */
+
+ if (PCI_DEFAULT_IF != pciIf)
+ {
+ if (pciIf >= mvCtrlPciMaxIfGet())
+ {
+ mvOsPrintf("mvPciLocalDevNumGet: ERR. Invalid PCI interface %d\n",
+ pciIf);
+ return 0xFFFFFFFF;
+ }
+ }
+
+ pciP2PConfig = MV_REG_READ(PCI_P2P_CONFIG_REG(pciIf));
+
+ pciP2PConfig &= PPCR_DEV_NUM_MASK;
+
+ return (pciP2PConfig >> PPCR_DEV_NUM_OFFS);
+}
+
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h
new file mode 100644
index 000000000..474633627
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h
@@ -0,0 +1,185 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCPCIH
+#define __INCPCIH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "pci/mvPciRegs.h"
+
+
+/* NOTE not supported in this driver:
+
+ Built In Self Test (BIST)
+ Vital Product Data (VPD)
+ Message Signaled Interrupt (MSI)
+ Power Management
+ Compact PCI Hot Swap
+ Header retarget
+
+Registers not supported:
+1) PCI DLL Status and Control (PCI0 0x1D20, PCI1 0x1DA0)
+2) PCI/MPP Pads Calibration (CI0/MPP[31:16] 0x1D1C, PCI1/MPP[15:0] 0X1D9C)
+*/
+
+/* defines */
+/* The number of supported PCI interfaces depend on Marvell controller */
+/* device number. This device number ID is located on the PCI unit */
+/* configuration header. This creates a loop where calling PCI */
+/* configuration read/write routine results a call to get PCI configuration */
+/* information etc. This macro defines a default PCI interface. This PCI */
+/* interface is sure to exist. */
+#define PCI_DEFAULT_IF 0
+
+
+/* typedefs */
+/* The Marvell controller supports both conventional PCI and PCI-X. */
+/* This enumeration describes the PCI type. */
+typedef enum _mvPciType
+{
+ MV_PCI_CONV, /* Conventional PCI */
+ MV_PCIX /* PCI-X */
+}MV_PCI_TYPE;
+
+typedef enum _mvPciMod
+{
+ MV_PCI_MOD_HOST,
+ MV_PCI_MOD_DEVICE
+}MV_PCI_MOD;
+
+
+/* The Marvell controller supports both PCI width of 32 and 64 bit. */
+/* This enumerator describes PCI width */
+typedef enum _mvPciWidth
+{
+ MV_PCI_32, /* PCI width 32bit */
+ MV_PCI_64 /* PCI width 64bit */
+}MV_PCI_WIDTH;
+
+/* This structure describes the PCI unit configured type, speed and width. */
+typedef struct _mvPciMode
+{
+ MV_PCI_TYPE pciType; /* PCI type */
+ MV_U32 pciSpeed; /* Assuming PCI base clock on board is 33MHz */
+ MV_PCI_WIDTH pciWidth; /* PCI bus width */
+}MV_PCI_MODE;
+
+/* mvPciInit - Initialize PCI interfaces*/
+MV_VOID mvPciHalInit(MV_U32 pciIf, MV_PCI_MOD pciIfmod);
+
+/* mvPciCommandSet - Set PCI comman register value.*/
+MV_STATUS mvPciCommandSet(MV_U32 pciIf, MV_U32 command);
+
+/* mvPciModeGet - Get PCI interface mode.*/
+MV_STATUS mvPciModeGet(MV_U32 pciIf, MV_PCI_MODE *pPciMode);
+
+/* mvPciRetrySet - Set PCI retry counters*/
+MV_STATUS mvPciRetrySet(MV_U32 pciIf, MV_U32 counter);
+
+/* mvPciDiscardTimerSet - Set PCI discard timer*/
+MV_STATUS mvPciDiscardTimerSet(MV_U32 pciIf, MV_U32 pClkCycles);
+
+/* mvPciArbEnable - PCI arbiter enable/disable*/
+MV_STATUS mvPciArbEnable(MV_U32 pciIf, MV_BOOL enable);
+
+/* mvPciArbParkDis - Disable arbiter parking on agent */
+MV_STATUS mvPciArbParkDis(MV_U32 pciIf, MV_U32 pciAgentMask);
+
+/* mvPciArbBrokDetectSet - Set PCI arbiter broken detection */
+MV_STATUS mvPciArbBrokDetectSet(MV_U32 pciIf, MV_U32 pClkCycles);
+
+/* mvPciConfigRead - Read from configuration space */
+MV_U32 mvPciConfigRead (MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func,MV_U32 regOff);
+
+/* mvPciConfigWrite - Write to configuration space */
+MV_STATUS mvPciConfigWrite(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data);
+
+/* mvPciMasterEnable - Enable/disale PCI interface master transactions.*/
+MV_STATUS mvPciMasterEnable(MV_U32 pciIf, MV_BOOL enable);
+
+/* mvPciSlaveEnable - Enable/disale PCI interface slave transactions.*/
+MV_STATUS mvPciSlaveEnable(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,MV_BOOL enable);
+
+/* mvPciLocalBusNumSet - Set PCI interface local bus number.*/
+MV_STATUS mvPciLocalBusNumSet(MV_U32 pciIf, MV_U32 busNum);
+
+/* mvPciLocalBusNumGet - Get PCI interface local bus number.*/
+MV_U32 mvPciLocalBusNumGet(MV_U32 pciIf);
+
+/* mvPciLocalDevNumSet - Set PCI interface local device number.*/
+MV_STATUS mvPciLocalDevNumSet(MV_U32 pciIf, MV_U32 devNum);
+
+/* mvPciLocalDevNumGet - Get PCI interface local device number.*/
+MV_U32 mvPciLocalDevNumGet(MV_U32 pciIf);
+
+
+#endif /* #ifndef __INCPCIH */
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h
new file mode 100644
index 000000000..89d0ef12d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h
@@ -0,0 +1,411 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPCIREGSH
+#define __INCPCIREGSH
+
+
+#include "pci-if/mvPciIfRegs.h"
+/* defines */
+#define MAX_PCI_DEVICES 32
+#define MAX_PCI_FUNCS 8
+#define MAX_PCI_BUSSES 128
+
+/* enumerators */
+
+/* This enumerator described the possible PCI slave targets. */
+/* PCI slave targets are designated memory/IO address spaces that the */
+/* PCI slave targets can access. They are also refered as "targets" */
+/* this enumeratoe order is determined by the content of :
+ PCI_BASE_ADDR_ENABLE_REG */
+
+
+/* registers offsetes defines */
+
+
+
+/*************************/
+/* PCI control registers */
+/*************************/
+/* maen : should add new registers */
+#define PCI_CMD_REG(pciIf) (0x30c00 + ((pciIf) * 0x80))
+#define PCI_MODE_REG(pciIf) (0x30d00 + ((pciIf) * 0x80))
+#define PCI_RETRY_REG(pciIf) (0x30c04 + ((pciIf) * 0x80))
+#define PCI_DISCARD_TIMER_REG(pciIf) (0x30d04 + ((pciIf) * 0x80))
+#define PCI_ARBITER_CTRL_REG(pciIf) (0x31d00 + ((pciIf) * 0x80))
+#define PCI_P2P_CONFIG_REG(pciIf) (0x31d14 + ((pciIf) * 0x80))
+#define PCI_ACCESS_CTRL_BASEL_REG(pciIf, targetWin) \
+ (0x31e00 + ((pciIf) * 0x80) + ((targetWin) * 0x10))
+#define PCI_ACCESS_CTRL_BASEH_REG(pciIf, targetWin) \
+ (0x31e04 + ((pciIf) * 0x80) + ((targetWin) * 0x10))
+#define PCI_ACCESS_CTRL_SIZE_REG(pciIf, targetWin) \
+ (0x31e08 + ((pciIf) * 0x80) + ((targetWin) * 0x10))
+
+#define PCI_DLL_CTRL_REG(pciIf) (0x31d20 + ((pciIf) * 0x80))
+
+/* PCI Dll Control (PDC)*/
+#define PDC_DLL_EN BIT0
+
+
+/* PCI Command Register (PCR) */
+#define PCR_MASTER_BYTE_SWAP_EN BIT0
+#define PCR_MASTER_WR_COMBINE_EN BIT4
+#define PCR_MASTER_RD_COMBINE_EN BIT5
+#define PCR_MASTER_WR_TRIG_WHOLE BIT6
+#define PCR_MASTER_RD_TRIG_WHOLE BIT7
+#define PCR_MASTER_MEM_RD_LINE_EN BIT8
+#define PCR_MASTER_MEM_RD_MULT_EN BIT9
+#define PCR_MASTER_WORD_SWAP_EN BIT10
+#define PCR_SLAVE_WORD_SWAP_EN BIT11
+#define PCR_NS_ACCORDING_RCV_TRANS BIT14
+#define PCR_MASTER_PCIX_REQ64N_EN BIT15
+#define PCR_SLAVE_BYTE_SWAP_EN BIT16
+#define PCR_MASTER_DAC_EN BIT17
+#define PCR_MASTER_M64_ALLIGN BIT18
+#define PCR_ERRORS_PROPAGATION_EN BIT19
+#define PCR_SLAVE_SWAP_ENABLE BIT20
+#define PCR_MASTER_SWAP_ENABLE BIT21
+#define PCR_MASTER_INT_SWAP_EN BIT22
+#define PCR_LOOP_BACK_ENABLE BIT23
+#define PCR_SLAVE_INTREG_SWAP_OFFS 24
+#define PCR_SLAVE_INTREG_SWAP_MASK 0x3
+#define PCR_SLAVE_INTREG_BYTE_SWAP \
+ (MV_BYTE_SWAP << PCR_SLAVE_INT_REG_SWAP_MASK)
+#define PCR_SLAVE_INTREG_NO_SWAP \
+ (MV_NO_SWAP << PCR_SLAVE_INT_REG_SWAP_MASK)
+#define PCR_SLAVE_INTREG_BYTE_WORD \
+ (MV_BYTE_WORD_SWAP << PCR_SLAVE_INT_REG_SWAP_MASK)
+#define PCR_SLAVE_INTREG_WORD_SWAP \
+ (MV_WORD_SWAP << PCR_SLAVE_INT_REG_SWAP_MASK)
+#define PCR_RESET_REASSERTION_EN BIT26
+#define PCR_PCI_TO_CPU_REG_ORDER_EN BIT28
+#define PCR_CPU_TO_PCI_ORDER_EN BIT29
+#define PCR_PCI_TO_CPU_ORDER_EN BIT30
+
+/* PCI Mode Register (PMR) */
+#define PMR_PCI_ID_OFFS 0 /* PCI Interface ID */
+#define PMR_PCI_ID_MASK (0x1 << PMR_PCI_ID_OFFS)
+#define PMR_PCI_ID_PCI(pciNum) ((pciNum) << PCI_MODE_PCIID_OFFS)
+
+#define PMR_PCI_64_OFFS 2 /* 64-bit PCI Interface */
+#define PMR_PCI_64_MASK (0x1 << PMR_PCI_64_OFFS)
+#define PMR_PCI_64_64BIT (0x1 << PMR_PCI_64_OFFS)
+#define PMR_PCI_64_32BIT (0x0 << PMR_PCI_64_OFFS)
+
+#define PMR_PCI_MODE_OFFS 4 /* PCI interface mode of operation */
+#define PMR_PCI_MODE_MASK (0x3 << PMR_PCI_MODE_OFFS)
+#define PMR_PCI_MODE_CONV (0x0 << PMR_PCI_MODE_OFFS)
+#define PMR_PCI_MODE_PCIX_66MHZ (0x1 << PMR_PCI_MODE_OFFS)
+#define PMR_PCI_MODE_PCIX_100MHZ (0x2 << PMR_PCI_MODE_OFFS)
+#define PMR_PCI_MODE_PCIX_133MHZ (0x3 << PMR_PCI_MODE_OFFS)
+
+#define PMR_EXP_ROM_SUPPORT BIT8 /* Expansion ROM Active */
+
+#define PMR_PCI_RESET_OFFS 31 /* PCI Interface Reset Indication */
+#define PMR_PCI_RESET_MASK (0x1 << PMR_PCI_RESET_OFFS)
+#define PMR_PCI_RESET_PCIXRST (0x0 << PMR_PCI_RESET_OFFS)
+
+
+/* PCI Retry Register (PRR) */
+#define PRR_RETRY_CNTR_OFFS 16 /* Retry Counter */
+#define PRR_RETRY_CNTR_MAX 0xff
+#define PRR_RETRY_CNTR_MASK (PRR_RETRY_CNTR_MAX << PRR_RETRY_CNTR_OFFS)
+
+
+/* PCI Discard Timer Register (PDTR) */
+#define PDTR_TIMER_OFFS 0 /* Timer */
+#define PDTR_TIMER_MAX 0xffff
+#define PDTR_TIMER_MIN 0x7F
+#define PDTR_TIMER_MASK (PDTR_TIMER_MAX << PDTR_TIMER_OFFS)
+
+
+/* PCI Arbiter Control Register (PACR) */
+#define PACR_BROKEN_DETECT_EN BIT1 /* Broken Detection Enable */
+
+#define PACR_BROKEN_VAL_OFFS 3 /* Broken Value */
+#define PACR_BROKEN_VAL_MASK (0xf << PACR_BROKEN_VAL_OFFS)
+#define PACR_BROKEN_VAL_CONV_MIN 0x2
+#define PACR_BROKEN_VAL_PCIX_MIN 0x6
+
+#define PACR_PARK_DIS_OFFS 14 /* Parking Disable */
+#define PACR_PARK_DIS_MAX_AGENT 0x3f
+#define PACR_PARK_DIS_MASK (PACR_PARK_DIS_MAX_AGENT<<PACR_PARK_DIS_OFFS)
+#define PACR_PARK_DIS(agent) ((1 << (agent)) << PACR_PARK_DIS_OFFS)
+
+#define PACR_ARB_ENABLE BIT31 /* Enable Internal Arbiter */
+
+
+/* PCI P2P Configuration Register (PPCR) */
+#define PPCR_2ND_BUS_L_OFFS 0 /* 2nd PCI Interface Bus Range Lower */
+#define PPCR_2ND_BUS_L_MASK (0xff << PPCR_2ND_BUS_L_OFFS)
+
+#define PPCR_2ND_BUS_H_OFFS 8 /* 2nd PCI Interface Bus Range Upper */
+#define PPCR_2ND_BUS_H_MASK (0xff << PPCR_2ND_BUS_H_OFFS)
+
+#define PPCR_BUS_NUM_OFFS 16 /* The PCI interface's Bus number */
+#define PPCR_BUS_NUM_MASK (0xff << PPCR_BUS_NUM_OFFS)
+
+#define PPCR_DEV_NUM_OFFS 24 /* The PCI interface’s Device number */
+#define PPCR_DEV_NUM_MASK (0xff << PPCR_DEV_NUM_OFFS)
+
+
+/* PCI Access Control Base Low Register (PACBLR) */
+#define PACBLR_EN BIT0 /* Access control window enable */
+
+#define PACBLR_ACCPROT BIT4 /* Access Protect */
+#define PACBLR_WRPROT BIT5 /* Write Protect */
+
+#define PACBLR_PCISWAP_OFFS 6 /* PCI slave Data Swap Control */
+#define PACBLR_PCISWAP_MASK (0x3 << PACBLR_PCISWAP_OFFS)
+#define PACBLR_PCISWAP_BYTE (0x0 << PACBLR_PCISWAP_OFFS)
+#define PACBLR_PCISWAP_NO_SWAP (0x1 << PACBLR_PCISWAP_OFFS)
+#define PACBLR_PCISWAP_BYTE_WORD (0x2 << PACBLR_PCISWAP_OFFS)
+#define PACBLR_PCISWAP_WORD (0x3 << PACBLR_PCISWAP_OFFS)
+
+#define PACBLR_RDMBURST_OFFS 8 /* Read Max Burst */
+#define PACBLR_RDMBURST_MASK (0x3 << PACBLR_RDMBURST_OFFS)
+#define PACBLR_RDMBURST_32BYTE (0x0 << PACBLR_RDMBURST_OFFS)
+#define PACBLR_RDMBURST_64BYTE (0x1 << PACBLR_RDMBURST_OFFS)
+#define PACBLR_RDMBURST_128BYTE (0x2 << PACBLR_RDMBURST_OFFS)
+
+#define PACBLR_RDSIZE_OFFS 10 /* Typical PCI read transaction Size. */
+#define PACBLR_RDSIZE_MASK (0x3 << PACBLR_RDSIZE_OFFS)
+#define PACBLR_RDSIZE_32BYTE (0x0 << PACBLR_RDSIZE_OFFS)
+#define PACBLR_RDSIZE_64BYTE (0x1 << PACBLR_RDSIZE_OFFS)
+#define PACBLR_RDSIZE_128BYTE (0x2 << PACBLR_RDSIZE_OFFS)
+#define PACBLR_RDSIZE_256BYTE (0x3 << PACBLR_RDSIZE_OFFS)
+
+#define PACBLR_BASE_L_OFFS 12 /* Corresponds to address bits [31:12] */
+#define PACBLR_BASE_L_MASK (0xfffff << PACBLR_BASE_L_OFFS)
+#define PACBLR_BASE_L_ALIGNMENT (1 << PACBLR_BASE_L_OFFS)
+#define PACBLR_BASE_ALIGN_UP(base) \
+ ((base+PACBLR_BASE_L_ALIGNMENT)&PACBLR_BASE_L_MASK)
+#define PACBLR_BASE_ALIGN_DOWN(base) (base & PACBLR_BASE_L_MASK)
+
+
+/* PCI Access Control Base High Register (PACBHR) */
+#define PACBHR_BASE_H_OFFS 0 /* Corresponds to address bits [63:32] */
+#define PACBHR_CTRL_BASE_H_MASK (0xffffffff << PACBHR_BASE_H_OFFS)
+
+/* PCI Access Control Size Register (PACSR) */
+#define PACSR_WRMBURST_OFFS 8 /* Write Max Burst */
+#define PACSR_WRMBURST_MASK (0x3 << PACSR_WRMBURST_OFFS)
+#define PACSR_WRMBURST_32BYTE (0x0 << PACSR_WRMBURST_OFFS)
+#define PACSR_WRMBURST_64BYTE (0x1 << PACSR_WRMBURST_OFFS)
+#define PACSR_WRMBURST_128BYTE (0x2 << PACSR_WRMBURST_OFFS)
+
+#define PACSR_PCI_ORDERING BIT11 /* PCI Ordering required */
+
+#define PACSR_SIZE_OFFS 12 /* PCI access window size */
+#define PACSR_SIZE_MASK (0xfffff << PACSR_SIZE_OFFS)
+#define PACSR_SIZE_ALIGNMENT (1 << PACSR_SIZE_OFFS)
+#define PACSR_SIZE_ALIGN_UP(size) \
+ ((size+PACSR_SIZE_ALIGNMENT)&PACSR_SIZE_MASK)
+#define PACSR_SIZE_ALIGN_DOWN(size) (size & PACSR_SIZE_MASK)
+
+
+/***************************************/
+/* PCI Configuration Access Registers */
+/***************************************/
+
+#define PCI_CONFIG_ADDR_REG(pciIf) (0x30C78 - ((pciIf) * 0x80) )
+#define PCI_CONFIG_DATA_REG(pciIf) (0x30C7C - ((pciIf) * 0x80) )
+#define PCI_INT_ACK_REG(pciIf) (0x30C34 + ((pciIf) * 0x80) )
+
+/* PCI Configuration Address Register (PCAR) */
+#define PCAR_REG_NUM_OFFS 2
+#define PCAR_REG_NUM_MASK (0x3F << PCAR_REG_NUM_OFFS)
+
+#define PCAR_FUNC_NUM_OFFS 8
+#define PCAR_FUNC_NUM_MASK (0x7 << PCAR_FUNC_NUM_OFFS)
+
+#define PCAR_DEVICE_NUM_OFFS 11
+#define PCAR_DEVICE_NUM_MASK (0x1F << PCAR_DEVICE_NUM_OFFS)
+
+#define PCAR_BUS_NUM_OFFS 16
+#define PCAR_BUS_NUM_MASK (0xFF << PCAR_BUS_NUM_OFFS)
+
+#define PCAR_CONFIG_EN BIT31
+
+
+/***************************************/
+/* PCI Configuration registers */
+/***************************************/
+
+/*********************************************/
+/* PCI Configuration, Function 0, Registers */
+/*********************************************/
+
+/* Marvell Specific */
+#define PCI_SCS0_BASE_ADDR_LOW 0x010
+#define PCI_SCS0_BASE_ADDR_HIGH 0x014
+#define PCI_SCS1_BASE_ADDR_LOW 0x018
+#define PCI_SCS1_BASE_ADDR_HIGH 0x01C
+#define PCI_INTER_REG_MEM_MAPPED_BASE_ADDR_L 0x020
+#define PCI_INTER_REG_MEM_MAPPED_BASE_ADDR_H 0x024
+
+/* capability list */
+#define PCI_POWER_MNG_CAPABILITY 0x040
+#define PCI_POWER_MNG_STATUS_CONTROL 0x044
+#define PCI_VPD_ADDRESS_REG 0x048
+#define PCI_VPD_DATA_REG 0x04c
+#define PCI_MSI_MESSAGE_CONTROL 0x050
+#define PCI_MSI_MESSAGE_ADDR 0x054
+#define PCI_MSI_MESSAGE_UPPER_ADDR 0x058
+#define PCI_MSI_MESSAGE_DATA 0x05c
+#define PCIX_COMMAND 0x060
+#define PCIX_STATUS 0x064
+#define PCI_COMPACT_PCI_HOT_SWAP 0x068
+
+
+/*********************************************/
+/* PCI Configuration, Function 1, Registers */
+/*********************************************/
+
+#define PCI_SCS2_BASE_ADDR_LOW 0x10
+#define PCI_SCS2_BASE_ADDR_HIGH 0x14
+#define PCI_SCS3_BASE_ADDR_LOW 0x18
+#define PCI_SCS3_BASE_ADDR_HIGH 0x1c
+
+
+/***********************************************/
+/* PCI Configuration, Function 2, Registers */
+/***********************************************/
+
+#define PCI_DEVCS0_BASE_ADDR_LOW 0x10
+#define PCI_DEVCS0_BASE_ADDR_HIGH 0x14
+#define PCI_DEVCS1_BASE_ADDR_LOW 0x18
+#define PCI_DEVCS1_BASE_ADDR_HIGH 0x1c
+#define PCI_DEVCS2_BASE_ADDR_LOW 0x20
+#define PCI_DEVCS2_BASE_ADDR_HIGH 0x24
+
+/***********************************************/
+/* PCI Configuration, Function 3, Registers */
+/***********************************************/
+
+#define PCI_BOOTCS_BASE_ADDR_LOW 0x18
+#define PCI_BOOTCS_BASE_ADDR_HIGH 0x1c
+
+/***********************************************/
+/* PCI Configuration, Function 4, Registers */
+/***********************************************/
+
+#define PCI_P2P_MEM0_BASE_ADDR_LOW 0x10
+#define PCI_P2P_MEM0_BASE_ADDR_HIGH 0x14
+#define PCI_P2P_IO_BASE_ADDR 0x20
+#define PCI_INTER_REGS_IO_MAPPED_BASE_ADDR 0x24
+
+/* PCIX_STATUS register fields (PXS) */
+
+#define PXS_FN_OFFS 0 /* Description Number */
+#define PXS_FN_MASK (0x7 << PXS_FN_OFFS)
+
+#define PXS_DN_OFFS 3 /* Device Number */
+#define PXS_DN_MASK (0x1f << PXS_DN_OFFS)
+
+#define PXS_BN_OFFS 8 /* Bus Number */
+#define PXS_BN_MASK (0xff << PXS_BN_OFFS)
+
+
+/* PCI Error Report Register Map */
+#define PCI_SERRN_MASK_REG(pciIf) (0x30c28 + (pciIf * 0x80))
+#define PCI_CAUSE_REG(pciIf) (0x31d58 + (pciIf * 0x80))
+#define PCI_MASK_REG(pciIf) (0x31d5C + (pciIf * 0x80))
+#define PCI_ERROR_ADDR_LOW_REG(pciIf) (0x31d40 + (pciIf * 0x80))
+#define PCI_ERROR_ADDR_HIGH_REG(pciIf) (0x31d44 + (pciIf * 0x80))
+#define PCI_ERROR_ATTRIBUTE_REG(pciIf) (0x31d48 + (pciIf * 0x80))
+#define PCI_ERROR_COMMAND_REG(pciIf) (0x31d50 + (pciIf * 0x80))
+
+/* PCI Interrupt Cause Register (PICR) */
+#define PICR_ERR_SEL_OFFS 27
+#define PICR_ERR_SEL_MASK (0x1f << PICR_ERR_SEL_OFFS)
+
+/* PCI Error Command Register (PECR) */
+#define PECR_ERR_CMD_OFFS 0
+#define PECR_ERR_CMD_MASK (0xf << PECR_ERR_CMD_OFFS)
+#define PECR_DAC BIT4
+
+
+/* defaults */
+/* Set bits means value is about to change according to new value */
+#define PCI_COMMAND_DEFAULT_MASK 0xffffdff1
+#define PCI_COMMAND_DEFAULT \
+ (PCR_MASTER_WR_TRIG_WHOLE | \
+ PCR_MASTER_RD_TRIG_WHOLE | \
+ PCR_MASTER_MEM_RD_LINE_EN | \
+ PCR_MASTER_MEM_RD_MULT_EN | \
+ PCR_NS_ACCORDING_RCV_TRANS | \
+ PCR_MASTER_PCIX_REQ64N_EN | \
+ PCR_MASTER_DAC_EN | \
+ PCR_MASTER_M64_ALLIGN | \
+ PCR_ERRORS_PROPAGATION_EN)
+
+
+#define PCI_ARBITER_CTRL_DEFAULT_MASK 0x801fc07a
+#define PCI_ARBITER_CTRL_DEFAULT \
+ (PACR_BROKEN_VAL_PCIX_MIN << PACR_BROKEN_VAL_OFFS)
+
+
+#endif /* #ifndef __INCPCIREGSH */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvCompVer.txt
new file mode 100644
index 000000000..38a926440
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.4
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c
new file mode 100644
index 000000000..068aac2bf
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c
@@ -0,0 +1,1143 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "pex/mvPex.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* defines */
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+MV_STATUS mvPexHalInit(MV_U32 pexIf, MV_PEX_TYPE pexType)
+{
+ MV_PEX_MODE pexMode;
+ MV_U32 regVal;
+ MV_U32 status;
+
+ /* First implement Guideline (GL# PCI Express-2) Wrong Default Value */
+ /* to Transmitter Output Current (TXAMP) Relevant for: 88F5181-A1/B0/B1 */
+ /* and 88F5281-B0 and above, 88F5182, 88F5082, 88F5181L, 88F6082/L */
+
+ if ((mvCtrlModelGet() != MV_1281_DEV_ID) &&
+ (mvCtrlModelGet() != MV_6281_DEV_ID) &&
+ (mvCtrlModelGet() != MV_6192_DEV_ID) &&
+ (mvCtrlModelGet() != MV_6190_DEV_ID) &&
+ (mvCtrlModelGet() != MV_6180_DEV_ID) &&
+ (mvCtrlModelGet() != MV_6183_DEV_ID) &&
+ (mvCtrlModelGet() != MV_6183L_DEV_ID) &&
+ (mvCtrlModelGet() != MV_78100_DEV_ID) &&
+ (mvCtrlModelGet() != MV_78200_DEV_ID) &&
+ (mvCtrlModelGet() != MV_76100_DEV_ID) &&
+ (mvCtrlModelGet() != MV_78XX0_DEV_ID))
+ {
+
+ /* Read current value of TXAMP */
+ MV_REG_WRITE(0x41b00, 0x80820000); /* Write the read command */
+
+ regVal = MV_REG_READ(0x41b00); /* Extract the data */
+
+ /* Prepare new data for write */
+ regVal &= ~0x7; /* Clear bits [2:0] */
+ regVal |= 0x4; /* Set the new value */
+ regVal &= ~0x80000000; /* Set "write" command */
+ MV_REG_WRITE(0x41b00, regVal); /* Write the write command */
+
+ }
+ else
+ {
+ /* Implement 1.0V termination GL for 88F1281 device only */
+ /* BIT0 - Common mode feedback */
+ /* BIT3 - TxBuf, extra drive for 1.0V termination */
+ if (mvCtrlModelGet() == MV_1281_DEV_ID)
+ {
+ MV_REG_WRITE(0x41b00, 0x80860000); /* Write the read command */
+ regVal = MV_REG_READ(0x41b00); /* Extract the data */
+ regVal |= (BIT0 | BIT3);
+ regVal &= ~0x80000000; /* Set "write" command */
+ MV_REG_WRITE(0x41b00, regVal); /* Write the write command */
+
+ MV_REG_WRITE(0x31b00, 0x80860000); /* Write the read command */
+ regVal = MV_REG_READ(0x31b00); /* Extract the data */
+ regVal |= (BIT0 | BIT3);
+ regVal &= ~0x80000000; /* Set "write" command */
+ MV_REG_WRITE(0x31b00, regVal); /* Write the write command */
+ }
+ }
+
+ if( mvPexModeGet(pexIf, &pexMode) != MV_OK)
+ {
+ mvOsPrintf("PEX init ERR. mvPexModeGet failed (pexType=%d)\n",pexMode.pexType);
+ return MV_ERROR;
+ }
+
+ /* Check that required PEX type is the one set in reset time */
+ if (pexType != pexMode.pexType)
+ {
+ /* No Link. Shut down the Phy */
+ mvPexPowerDown(pexIf);
+ mvOsPrintf("PEX init ERR. PEX type sampled mismatch (%d,%d)\n",pexType,pexMode.pexType);
+ return MV_ERROR;
+ }
+
+ if (MV_PEX_ROOT_COMPLEX == pexType)
+ {
+ mvPexLocalBusNumSet(pexIf, PEX_HOST_BUS_NUM(pexIf));
+ mvPexLocalDevNumSet(pexIf, PEX_HOST_DEV_NUM(pexIf));
+
+ /* Local device master Enable */
+ mvPexMasterEnable(pexIf, MV_TRUE);
+
+ /* Local device slave Enable */
+ mvPexSlaveEnable(pexIf, mvPexLocalBusNumGet(pexIf),
+ mvPexLocalDevNumGet(pexIf), MV_TRUE);
+ /* Interrupt disable */
+ status = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_STATUS_AND_COMMAND));
+ status |= PXSAC_INT_DIS;
+ MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_STATUS_AND_COMMAND), status);
+ }
+
+ /* now wait 500 ms to be sure the link is valid (spec compliant) */
+ mvOsDelay(500);
+ /* Check if we have link */
+ if (MV_REG_READ(PEX_STATUS_REG(pexIf)) & PXSR_DL_DOWN)
+ {
+ mvOsPrintf("PEX%d interface detected no Link.\n",pexIf);
+ return MV_NO_SUCH;
+ }
+
+ if (MV_PEX_WITDH_X1 == pexMode.pexWidth)
+ {
+ mvOsPrintf("PEX%d interface detected Link X1\n",pexIf);
+ }
+ else
+ {
+ mvOsPrintf("PEX%d interface detected Link X4\n",pexIf);
+ }
+
+#ifdef PCIE_VIRTUAL_BRIDGE_SUPPORT
+ mvPexVrtBrgInit(pexIf);
+#endif
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvPexModeGet - Get Pex Mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+* pexIf - PEX interface number.
+*
+* OUTPUT:
+* pexMode - Pex mode structure
+*
+* RETURN:
+* MV_OK on success , MV_ERROR otherwise
+*
+*******************************************************************************/
+MV_U32 mvPexModeGet(MV_U32 pexIf,MV_PEX_MODE *pexMode)
+{
+ MV_U32 pexData;
+
+ /* Parameter checking */
+ if (PEX_DEFAULT_IF != pexIf)
+ {
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexModeGet: ERR. Invalid PEX interface %d\n",pexIf);
+ return MV_ERROR;
+ }
+ }
+
+ pexData = MV_REG_READ(PEX_CTRL_REG(pexIf));
+
+ switch (pexData & PXCR_DEV_TYPE_CTRL_MASK)
+ {
+ case PXCR_DEV_TYPE_CTRL_CMPLX:
+ pexMode->pexType = MV_PEX_ROOT_COMPLEX;
+ break;
+ case PXCR_DEV_TYPE_CTRL_POINT:
+ pexMode->pexType = MV_PEX_END_POINT;
+ break;
+
+ }
+
+ /* Check if we have link */
+ if (MV_REG_READ(PEX_STATUS_REG(pexIf)) & PXSR_DL_DOWN)
+ {
+ pexMode->pexLinkUp = MV_FALSE;
+
+ /* If there is no link, the auto negotiation data is worthless */
+ pexMode->pexWidth = MV_PEX_WITDH_INVALID;
+ }
+ else
+ {
+ pexMode->pexLinkUp = MV_TRUE;
+
+ /* We have link. The link width is now valid */
+ pexData = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_LINK_CTRL_STAT_REG));
+ pexMode->pexWidth = ((pexData & PXLCSR_NEG_LNK_WDTH_MASK) >>
+ PXLCSR_NEG_LNK_WDTH_OFFS);
+ }
+
+ return MV_OK;
+}
+
+
+/* PEX configuration space read write */
+
+/*******************************************************************************
+* mvPexConfigRead - Read from configuration space
+*
+* DESCRIPTION:
+* This function performs a 32 bit read from PEX configuration space.
+* It supports both type 0 and type 1 of Configuration Transactions
+* (local and over bridge). In order to read from local bus segment, use
+* bus number retrieved from mvPexLocalBusNumGet(). Other bus numbers
+* will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+* pexIf - PEX interface number.
+* bus - PEX segment bus number.
+* dev - PEX device number.
+* func - Function number.
+* regOffs - Register offset.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* 32bit register data, 0xffffffff on error
+*
+*******************************************************************************/
+MV_U32 mvPexConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+ MV_U32 regOff)
+{
+#if defined(PCIE_VIRTUAL_BRIDGE_SUPPORT)
+ return mvPexVrtBrgConfigRead (pexIf, bus, dev, func, regOff);
+}
+
+MV_U32 mvPexHwConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+ MV_U32 regOff)
+{
+#endif
+ MV_U32 pexData = 0;
+ MV_U32 localDev,localBus;
+
+ /* Parameter checking */
+ if (PEX_DEFAULT_IF != pexIf)
+ {
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexConfigRead: ERR. Invalid PEX interface %d\n",pexIf);
+ return 0xFFFFFFFF;
+ }
+ }
+
+ if (dev >= MAX_PEX_DEVICES)
+ {
+ DB(mvOsPrintf("mvPexConfigRead: ERR. device number illigal %d\n", dev));
+ return 0xFFFFFFFF;
+ }
+
+ if (func >= MAX_PEX_FUNCS)
+ {
+ DB(mvOsPrintf("mvPexConfigRead: ERR. function num illigal %d\n", func));
+ return 0xFFFFFFFF;
+ }
+
+ if (bus >= MAX_PEX_BUSSES)
+ {
+ DB(mvOsPrintf("mvPexConfigRead: ERR. bus number illigal %d\n", bus));
+ return MV_ERROR;
+ }
+
+ DB(mvOsPrintf("mvPexConfigRead: pexIf %d, bus %d, dev %d, func %d, regOff 0x%x\n",
+ pexIf, bus, dev, func, regOff));
+
+ localDev = mvPexLocalDevNumGet(pexIf);
+ localBus = mvPexLocalBusNumGet(pexIf);
+
+ /* Speed up the process. In case on no link, return MV_ERROR */
+ if ((dev != localDev) || (bus != localBus))
+ {
+ pexData = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+ if ((pexData & PXSR_DL_DOWN))
+ {
+ return MV_ERROR;
+ }
+ }
+
+ /* in PCI Express we have only one device number */
+ /* and this number is the first number we encounter
+ else that the localDev*/
+ /* spec pex define return on config read/write on any device */
+ if (bus == localBus)
+ {
+ if (localDev == 0)
+ {
+ /* if local dev is 0 then the first number we encounter
+ after 0 is 1 */
+ if ((dev != 1)&&(dev != localDev))
+ {
+ return MV_ERROR;
+ }
+ }
+ else
+ {
+ /* if local dev is not 0 then the first number we encounter
+ is 0 */
+
+ if ((dev != 0)&&(dev != localDev))
+ {
+ return MV_ERROR;
+ }
+ }
+ if(func != 0 ) /* i.e bridge */
+ {
+ return MV_ERROR;
+ }
+ }
+
+
+ /* Creating PEX address to be passed */
+ pexData = (bus << PXCAR_BUS_NUM_OFFS);
+ pexData |= (dev << PXCAR_DEVICE_NUM_OFFS);
+ pexData |= (func << PXCAR_FUNC_NUM_OFFS);
+ pexData |= (regOff & PXCAR_REG_NUM_MASK); /* lgacy register space */
+ /* extended register space */
+ pexData |=(((regOff & PXCAR_REAL_EXT_REG_NUM_MASK) >>
+ PXCAR_REAL_EXT_REG_NUM_OFFS) << PXCAR_EXT_REG_NUM_OFFS);
+
+ pexData |= PXCAR_CONFIG_EN;
+
+ /* Write the address to the PEX configuration address register */
+ MV_REG_WRITE(PEX_CFG_ADDR_REG(pexIf), pexData);
+
+ DB(mvOsPrintf("mvPexConfigRead:address pexData=%x ",pexData));
+
+
+ /* In order to let the PEX controller absorbed the address of the read */
+ /* transaction we perform a validity check that the address was written */
+ if(pexData != MV_REG_READ(PEX_CFG_ADDR_REG(pexIf)))
+ {
+ return MV_ERROR;
+ }
+
+ /* cleaning Master Abort */
+ MV_REG_BIT_SET(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_STATUS_AND_COMMAND),
+ PXSAC_MABORT);
+#if 0
+ /* Guideline (GL# PCI Express-1) Erroneous Read Data on Configuration */
+ /* This guideline is relevant for all devices except of the following devices:
+ 88F5281-BO and above, 88F5181L-A0 and above, 88F1281 A0 and above
+ 88F6183 A0 and above, 88F6183L */
+ if ( ( (dev != localDev) || (bus != localBus) ) &&
+ (
+ !(MV_5281_DEV_ID == mvCtrlModelGet())&&
+ !((MV_5181_DEV_ID == mvCtrlModelGet())&& (mvCtrlRevGet() >= MV_5181L_A0_REV))&&
+ !(MV_1281_DEV_ID == mvCtrlModelGet())&&
+ !(MV_6183_DEV_ID == mvCtrlModelGet())&&
+ !(MV_6183L_DEV_ID == mvCtrlModelGet())&&
+ !(MV_6281_DEV_ID == mvCtrlModelGet())&&
+ !(MV_6192_DEV_ID == mvCtrlModelGet())&&
+ !(MV_6190_DEV_ID == mvCtrlModelGet())&&
+ !(MV_6180_DEV_ID == mvCtrlModelGet())&&
+ !(MV_78XX0_DEV_ID == mvCtrlModelGet())
+ ))
+ {
+
+ /* PCI-Express configuration read work-around */
+
+ /* we will use one of the Punit (AHBToMbus) windows to access the xbar
+ and read the data from there */
+ /*
+ Need to configure the 2 free Punit (AHB to MBus bridge)
+ address decoding windows:
+ Configure the flash Window to handle Configuration space requests
+ for PEX0/1:
+ 1. write 0x7931/0x7941 to the flash window and the size,
+ 79-xbar attr (pci cfg), 3/4-xbar target (pex0/1), 1-WinEn
+ 2. write base to flash window
+
+ Configuration transactions from the CPU should write/read the data
+ to/from address of the form:
+ addr[31:28] = 0x5 (for PEX0) or 0x6 (for PEX1)
+ addr[27:24] = extended register number
+ addr[23:16] = bus number
+ addr[15:11] = device number
+ addr[10:8] = function number
+ addr[7:0] = register number
+ */
+
+ #include "ctrlEnv/sys/mvAhbToMbus.h"
+ {
+ MV_U32 winNum;
+ MV_AHB_TO_MBUS_DEC_WIN originWin;
+ MV_U32 pciAddr=0;
+ MV_U32 remapLow=0,remapHigh=0;
+
+ /*
+ We will use DEV_CS2\Flash window for this workarround
+ */
+
+ winNum = mvAhbToMbusWinTargetGet(PEX_CONFIG_RW_WA_TARGET);
+
+ /* save remap values if exist */
+ if ((1 == winNum)||(0 == winNum))
+ {
+ remapLow = MV_REG_READ(AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum));
+ remapHigh = MV_REG_READ(AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum));
+
+ }
+
+
+ /* save the original window values */
+ mvAhbToMbusWinGet(winNum,&originWin);
+
+ if (PEX_CONFIG_RW_WA_USE_ORIGINAL_WIN_VALUES)
+ {
+ /* set the window as xbar window */
+ if (pexIf)
+ {
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum),
+ (0x7931 | (((originWin.addrWin.size >> 16)-1) ) << 16));
+ }
+ else
+ {
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum),
+ (0x7941 | (((originWin.addrWin.size >> 16)-1) ) << 16));
+ }
+
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_BASE_REG(winNum),
+ originWin.addrWin.baseLow);
+
+ /*pciAddr = originWin.addrWin.baseLow;*/
+ pciAddr = (MV_U32)CPU_MEMIO_UNCACHED_ADDR(
+ (MV_U32)originWin.addrWin.baseLow);
+
+ }
+ else
+ {
+ /* set the window as xbar window */
+ if (pexIf)
+ {
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum),
+ (0x7931 | (((PEX_CONFIG_RW_WA_SIZE >> 16)-1) ) << 16));
+ }
+ else
+ {
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum),
+ (0x7941 | (((PEX_CONFIG_RW_WA_SIZE >> 16)-1) ) << 16));
+ }
+
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_BASE_REG(winNum),
+ PEX_CONFIG_RW_WA_BASE);
+
+ pciAddr = (MV_U32)CPU_MEMIO_UNCACHED_ADDR(PEX_CONFIG_RW_WA_BASE);
+ }
+
+
+ /* remap should be as base */
+ if ((1 == winNum)||(0 == winNum))
+ {
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum),pciAddr);
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum),0);
+
+ }
+
+ /* extended register space */
+ pciAddr |= (bus << 16);
+ pciAddr |= (dev << 11);
+ pciAddr |= (func << 8);
+ pciAddr |= (regOff & PXCAR_REG_NUM_MASK); /* lgacy register space */
+
+ pexData = *(MV_U32*)pciAddr;
+ pexData = MV_32BIT_LE(pexData); /* Data always in LE */
+
+ /* restore the original window values */
+ mvAhbToMbusWinSet(winNum,&originWin);
+
+ /* restore original remap values*/
+ if ((1 == winNum)||(0 == winNum))
+ {
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum),remapLow);
+ MV_REG_WRITE(AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum),remapHigh);
+
+ }
+ }
+ }
+ else
+#endif
+ {
+ /* Read the Data returned in the PEX Data register */
+ pexData = MV_REG_READ(PEX_CFG_DATA_REG(pexIf));
+
+ }
+
+ DB(mvOsPrintf("mvPexConfigRead: got : %x \n",pexData));
+
+ return pexData;
+
+}
+
+/*******************************************************************************
+* mvPexConfigWrite - Write to configuration space
+*
+* DESCRIPTION:
+* This function performs a 32 bit write to PEX configuration space.
+* It supports both type 0 and type 1 of Configuration Transactions
+* (local and over bridge). In order to write to local bus segment, use
+* bus number retrieved from mvPexLocalBusNumGet(). Other bus numbers
+* will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+* pexIf - PEX interface number.
+* bus - PEX segment bus number.
+* dev - PEX device number.
+* func - Function number.
+* regOffs - Register offset.
+* data - 32bit data.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+#if defined(PCIE_VIRTUAL_BRIDGE_SUPPORT)
+ return mvPexVrtBrgConfigWrite (pexIf, bus, dev, func, regOff, data);
+}
+
+MV_STATUS mvPexHwConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+#endif
+ MV_U32 pexData = 0;
+ MV_U32 localDev,localBus;
+
+ /* Parameter checking */
+ if (PEX_DEFAULT_IF != pexIf)
+ {
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexConfigWrite: ERR. Invalid PEX interface %d\n",
+ pexIf);
+ return MV_ERROR;
+ }
+ }
+
+ if (dev >= MAX_PEX_DEVICES)
+ {
+ mvOsPrintf("mvPexConfigWrite: ERR. device number illigal %d\n",dev);
+ return MV_BAD_PARAM;
+ }
+
+ if (func >= MAX_PEX_FUNCS)
+ {
+ mvOsPrintf("mvPexConfigWrite: ERR. function number illigal %d\n", func);
+ return MV_ERROR;
+ }
+
+ if (bus >= MAX_PEX_BUSSES)
+ {
+ mvOsPrintf("mvPexConfigWrite: ERR. bus number illigal %d\n", bus);
+ return MV_ERROR;
+ }
+
+
+
+ localDev = mvPexLocalDevNumGet(pexIf);
+ localBus = mvPexLocalBusNumGet(pexIf);
+
+
+ /* in PCI Express we have only one device number other than ourselves*/
+ /* and this number is the first number we encounter
+ else than the localDev that can be any valid dev number*/
+ /* pex spec define return on config read/write on any device */
+ if (bus == localBus)
+ {
+
+ if (localDev == 0)
+ {
+ /* if local dev is 0 then the first number we encounter
+ after 0 is 1 */
+ if ((dev != 1)&&(dev != localDev))
+ {
+ return MV_ERROR;
+ }
+
+ }
+ else
+ {
+ /* if local dev is not 0 then the first number we encounter
+ is 0 */
+
+ if ((dev != 0)&&(dev != localDev))
+ {
+ return MV_ERROR;
+ }
+ }
+
+
+ }
+
+ /* if we are not accessing ourselves , then check the link */
+ if ((dev != localDev) || (bus != localBus) )
+ {
+ /* workarround */
+ /* when no link return MV_ERROR */
+
+ pexData = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+ if ((pexData & PXSR_DL_DOWN))
+ {
+ return MV_ERROR;
+ }
+
+ }
+
+ pexData =0;
+
+ /* Creating PEX address to be passed */
+ pexData |= (bus << PXCAR_BUS_NUM_OFFS);
+ pexData |= (dev << PXCAR_DEVICE_NUM_OFFS);
+ pexData |= (func << PXCAR_FUNC_NUM_OFFS);
+ pexData |= (regOff & PXCAR_REG_NUM_MASK); /* lgacy register space */
+ /* extended register space */
+ pexData |=(((regOff & PXCAR_REAL_EXT_REG_NUM_MASK) >>
+ PXCAR_REAL_EXT_REG_NUM_OFFS) << PXCAR_EXT_REG_NUM_OFFS);
+ pexData |= PXCAR_CONFIG_EN;
+
+ DB(mvOsPrintf("mvPexConfigWrite: If=%x bus=%x func=%x dev=%x regOff=%x data=%x \n",
+ pexIf,bus,func,dev,regOff,data,pexData) );
+
+ /* Write the address to the PEX configuration address register */
+ MV_REG_WRITE(PEX_CFG_ADDR_REG(pexIf), pexData);
+
+ /* Clear CPU pipe. Important where CPU can perform OOO execution */
+ CPU_PIPE_FLUSH;
+
+ /* In order to let the PEX controller absorbed the address of the read */
+ /* transaction we perform a validity check that the address was written */
+ if(pexData != MV_REG_READ(PEX_CFG_ADDR_REG(pexIf)))
+ {
+ return MV_ERROR;
+ }
+
+ /* Write the Data passed to the PEX Data register */
+ MV_REG_WRITE(PEX_CFG_DATA_REG(pexIf), data);
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvPexMasterEnable - Enable/disale PEX interface master transactions.
+*
+* DESCRIPTION:
+* This function performs read modified write to PEX command status
+* (offset 0x4) to set/reset bit 2. After this bit is set, the PEX
+* master is allowed to gain ownership on the bus, otherwise it is
+* incapable to do so.
+*
+* INPUT:
+* pexIf - PEX interface number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexMasterEnable(MV_U32 pexIf, MV_BOOL enable)
+{
+ MV_U32 pexCommandStatus;
+ MV_U32 localBus;
+ MV_U32 localDev;
+
+ /* Parameter checking */
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexMasterEnable: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_ERROR;
+ }
+
+ localBus = mvPexLocalBusNumGet(pexIf);
+ localDev = mvPexLocalDevNumGet(pexIf);
+
+ pexCommandStatus = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf,
+ PEX_STATUS_AND_COMMAND));
+
+
+ if (MV_TRUE == enable)
+ {
+ pexCommandStatus |= PXSAC_MASTER_EN;
+ }
+ else
+ {
+ pexCommandStatus &= ~PXSAC_MASTER_EN;
+ }
+
+
+ MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_STATUS_AND_COMMAND),
+ pexCommandStatus);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPexSlaveEnable - Enable/disale PEX interface slave transactions.
+*
+* DESCRIPTION:
+* This function performs read modified write to PEX command status
+* (offset 0x4) to set/reset bit 0 and 1. After those bits are set,
+* the PEX slave is allowed to respond to PEX IO space access (bit 0)
+* and PEX memory space access (bit 1).
+*
+* INPUT:
+* pexIf - PEX interface number.
+* dev - PEX device number.
+* enable - Enable/disable parameter.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexSlaveEnable(MV_U32 pexIf, MV_U32 bus,MV_U32 dev, MV_BOOL enable)
+{
+ MV_U32 pexCommandStatus;
+ MV_U32 RegOffs;
+
+ /* Parameter checking */
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexSlaveEnable: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+ if (dev >= MAX_PEX_DEVICES)
+ {
+ mvOsPrintf("mvPexLocalDevNumSet: ERR. device number illigal %d\n", dev);
+ return MV_BAD_PARAM;
+
+ }
+
+
+ RegOffs = PEX_STATUS_AND_COMMAND;
+
+ pexCommandStatus = mvPexConfigRead(pexIf, bus, dev, 0, RegOffs);
+
+ if (MV_TRUE == enable)
+ {
+ pexCommandStatus |= (PXSAC_IO_EN | PXSAC_MEM_EN);
+ }
+ else
+ {
+ pexCommandStatus &= ~(PXSAC_IO_EN | PXSAC_MEM_EN);
+ }
+
+ mvPexConfigWrite(pexIf, bus, dev, 0, RegOffs, pexCommandStatus);
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvPexLocalBusNumSet - Set PEX interface local bus number.
+*
+* DESCRIPTION:
+* This function sets given PEX interface its local bus number.
+* Note: In case the PEX interface is PEX-X, the information is read-only.
+*
+* INPUT:
+* pexIf - PEX interface number.
+* busNum - Bus number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_NOT_ALLOWED in case PEX interface is PEX-X.
+* MV_BAD_PARAM on bad parameters ,
+* otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexLocalBusNumSet(MV_U32 pexIf, MV_U32 busNum)
+{
+ MV_U32 pexStatus;
+ MV_U32 localBus;
+ MV_U32 localDev;
+
+
+ /* Parameter checking */
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexLocalBusNumSet: ERR. Invalid PEX interface %d\n",pexIf);
+ return MV_BAD_PARAM;
+ }
+ if (busNum >= MAX_PEX_BUSSES)
+ {
+ mvOsPrintf("mvPexLocalBusNumSet: ERR. bus number illigal %d\n", busNum);
+ return MV_ERROR;
+
+ }
+
+ localBus = mvPexLocalBusNumGet(pexIf);
+ localDev = mvPexLocalDevNumGet(pexIf);
+
+
+
+ pexStatus = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+ pexStatus &= ~PXSR_PEX_BUS_NUM_MASK;
+
+ pexStatus |= (busNum << PXSR_PEX_BUS_NUM_OFFS) & PXSR_PEX_BUS_NUM_MASK;
+
+ MV_REG_WRITE(PEX_STATUS_REG(pexIf), pexStatus);
+
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPexLocalBusNumGet - Get PEX interface local bus number.
+*
+* DESCRIPTION:
+* This function gets the local bus number of a given PEX interface.
+*
+* INPUT:
+* pexIf - PEX interface number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Local bus number.0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPexLocalBusNumGet(MV_U32 pexIf)
+{
+ MV_U32 pexStatus;
+
+ /* Parameter checking */
+ if (PEX_DEFAULT_IF != pexIf)
+ {
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexLocalBusNumGet: ERR. Invalid PEX interface %d\n",pexIf);
+ return 0xFFFFFFFF;
+ }
+ }
+
+
+ pexStatus = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+ pexStatus &= PXSR_PEX_BUS_NUM_MASK;
+
+ return (pexStatus >> PXSR_PEX_BUS_NUM_OFFS);
+
+}
+
+
+/*******************************************************************************
+* mvPexLocalDevNumSet - Set PEX interface local device number.
+*
+* DESCRIPTION:
+* This function sets given PEX interface its local device number.
+* Note: In case the PEX interface is PEX-X, the information is read-only.
+*
+* INPUT:
+* pexIf - PEX interface number.
+* devNum - Device number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_NOT_ALLOWED in case PEX interface is PEX-X.
+* MV_BAD_PARAM on bad parameters ,
+* otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexLocalDevNumSet(MV_U32 pexIf, MV_U32 devNum)
+{
+ MV_U32 pexStatus;
+ MV_U32 localBus;
+ MV_U32 localDev;
+
+ /* Parameter checking */
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexLocalDevNumSet: ERR. Invalid PEX interface %d\n",pexIf);
+ return MV_BAD_PARAM;
+ }
+ if (devNum >= MAX_PEX_DEVICES)
+ {
+ mvOsPrintf("mvPexLocalDevNumSet: ERR. device number illigal %d\n",
+ devNum);
+ return MV_BAD_PARAM;
+
+ }
+
+ localBus = mvPexLocalBusNumGet(pexIf);
+ localDev = mvPexLocalDevNumGet(pexIf);
+
+
+ pexStatus = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+ pexStatus &= ~PXSR_PEX_DEV_NUM_MASK;
+
+ pexStatus |= (devNum << PXSR_PEX_DEV_NUM_OFFS) & PXSR_PEX_DEV_NUM_MASK;
+
+ MV_REG_WRITE(PEX_STATUS_REG(pexIf), pexStatus);
+
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvPexLocalDevNumGet - Get PEX interface local device number.
+*
+* DESCRIPTION:
+* This function gets the local device number of a given PEX interface.
+*
+* INPUT:
+* pexIf - PEX interface number.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Local device number. 0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPexLocalDevNumGet(MV_U32 pexIf)
+{
+ MV_U32 pexStatus;
+
+ /* Parameter checking */
+
+ if (PEX_DEFAULT_IF != pexIf)
+ {
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexLocalDevNumGet: ERR. Invalid PEX interface %d\n",
+ pexIf);
+ return 0xFFFFFFFF;
+ }
+ }
+
+ pexStatus = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+ pexStatus &= PXSR_PEX_DEV_NUM_MASK;
+
+ return (pexStatus >> PXSR_PEX_DEV_NUM_OFFS);
+}
+
+MV_VOID mvPexPhyRegRead(MV_U32 pexIf, MV_U32 regOffset, MV_U16 *value)
+{
+
+ MV_U32 regAddr;
+ if (pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexPhyRegRead: ERR. Invalid PEX interface %d\n", pexIf);
+ return;
+ }
+ regAddr = (BIT31 | ((regOffset & 0x3fff) << 16));
+ MV_REG_WRITE(PEX_PHY_ACCESS_REG(pexIf), regAddr);
+ *value = MV_REG_READ(PEX_PHY_ACCESS_REG(pexIf));
+}
+
+
+MV_VOID mvPexPhyRegWrite(MV_U32 pexIf, MV_U32 regOffset, MV_U16 value)
+{
+
+ MV_U32 regAddr;
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexPhyRegWrite: ERR. Invalid PEX interface %d\n", pexIf);
+ return;
+ }
+ regAddr = (((regOffset & 0x3fff) << 16) | value);
+ MV_REG_WRITE(PEX_PHY_ACCESS_REG(pexIf), regAddr);
+}
+
+/*******************************************************************************
+* mvPexActiveStateLinkPMEnable
+*
+* DESCRIPTION:
+* Enable Active Link State Power Management
+*
+* INPUT:
+* pexIf - PEX interface number.
+* enable - MV_TRUE to enable ASPM, MV_FALSE to disable.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* MV_OK on success , MV_ERROR otherwise
+*
+*******************************************************************************/
+MV_STATUS mvPexActiveStateLinkPMEnable(MV_U32 pexIf, MV_BOOL enable)
+{
+ MV_U32 reg;
+
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexActiveStateLinkPMEnable: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_ERROR;
+ }
+
+ reg = MV_REG_READ(PEX_PWR_MNG_EXT_REG(pexIf)) & ~PXPMER_L1_ASPM_EN_MASK;
+ if(enable == MV_TRUE)
+ reg |= PXPMER_L1_ASPM_EN_MASK;
+ MV_REG_WRITE(PEX_PWR_MNG_EXT_REG(pexIf), reg);
+
+ /* Enable / Disable L0/1 entry */
+ reg = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_LINK_CTRL_STAT_REG))
+ & ~PXLCSR_ASPM_CNT_MASK;
+ if(enable == MV_TRUE)
+ reg |= PXLCSR_ASPM_CNT_L0S_L1S_ENT_SUPP;
+ MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_LINK_CTRL_STAT_REG), reg);
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPexForceX1
+*
+* DESCRIPTION:
+* shut down lanes 1-3 if recognize that attached to an x1 end-point
+* INPUT:
+* pexIf - PEX interface number.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* MV_OK on success , MV_ERROR otherwise
+*
+*******************************************************************************/
+MV_U32 mvPexForceX1(MV_U32 pexIf)
+{
+ MV_U32 regData = 0;
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexForceX1: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_BAD_PARAM;
+ }
+
+ regData = MV_REG_READ(PEX_CTRL_REG(pexIf)) & ~(PXCR_CONF_LINK_MASK) ;
+ regData |= PXCR_CONF_LINK_X1;
+
+ MV_REG_WRITE(PEX_CTRL_REG(pexIf), regData);
+ return MV_OK;
+}
+
+MV_BOOL mvPexIsPowerUp(MV_U32 pexIf)
+{
+ if(pexIf >= mvCtrlPexMaxIfGet())
+ {
+ mvOsPrintf("mvPexIsPowerUp: ERR. Invalid PEX interface %d\n", pexIf);
+ return MV_FALSE;
+ }
+ return mvCtrlPwrClckGet(PEX_UNIT_ID, pexIf);
+}
+
+
+MV_VOID mvPexPowerDown(MV_U32 pexIf)
+{
+ if ( (mvCtrlModelGet() == MV_78XX0_DEV_ID) ||
+ (mvCtrlModelGet() == MV_76100_DEV_ID) ||
+ (mvCtrlModelGet() == MV_78100_DEV_ID) ||
+ (mvCtrlModelGet() == MV_78200_DEV_ID) )
+ {
+ mvCtrlPwrClckSet(PEX_UNIT_ID, pexIf, MV_FALSE);
+ }
+ else
+ {
+ MV_REG_WRITE((0x41B00 -(pexIf)*0x10000), 0x20800087);
+ }
+}
+
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h
new file mode 100644
index 000000000..d8f1cdd9f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPEXH
+#define __INCPEXH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "pex/mvPexRegs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+
+
+/* NOTE not supported in this driver:*/
+
+
+/* defines */
+/* The number of supported PEX interfaces depend on Marvell controller */
+/* device number. This device number ID is located on the PEX unit */
+/* configuration header. This creates a loop where calling PEX */
+/* configuration read/write routine results a call to get PEX configuration */
+/* information etc. This macro defines a default PEX interface. This PEX */
+/* interface is sure to exist. */
+#define PEX_DEFAULT_IF 0
+
+
+/* typedefs */
+/* The Marvell controller supports both root complex and end point devices */
+/* This enumeration describes the PEX type. */
+typedef enum _mvPexType
+{
+ MV_PEX_ROOT_COMPLEX, /* root complex device */
+ MV_PEX_END_POINT /* end point device */
+}MV_PEX_TYPE;
+
+typedef enum _mvPexWidth
+{
+ MV_PEX_WITDH_X1 = 1,
+ MV_PEX_WITDH_X2,
+ MV_PEX_WITDH_X3,
+ MV_PEX_WITDH_X4,
+ MV_PEX_WITDH_INVALID
+}MV_PEX_WIDTH;
+
+/* PEX Bar attributes */
+typedef struct _mvPexMode
+{
+ MV_PEX_TYPE pexType;
+ MV_PEX_WIDTH pexWidth;
+ MV_BOOL pexLinkUp;
+}MV_PEX_MODE;
+
+
+
+/* Global Functions prototypes */
+/* mvPexInit - Initialize PEX interfaces*/
+MV_STATUS mvPexHalInit(MV_U32 pexIf, MV_PEX_TYPE pexType);
+
+/* mvPexModeGet - Get Pex If mode */
+MV_U32 mvPexModeGet(MV_U32 pexIf,MV_PEX_MODE *pexMode);
+
+/* mvPexConfigRead - Read from configuration space */
+MV_U32 mvPexConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func,MV_U32 regOff);
+
+/* mvPexConfigWrite - Write to configuration space */
+MV_STATUS mvPexConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data);
+
+/* mvPexMasterEnable - Enable/disale PEX interface master transactions.*/
+MV_STATUS mvPexMasterEnable(MV_U32 pexIf, MV_BOOL enable);
+
+/* mvPexSlaveEnable - Enable/disale PEX interface slave transactions.*/
+MV_STATUS mvPexSlaveEnable(MV_U32 pexIf, MV_U32 bus,MV_U32 dev, MV_BOOL enable);
+
+/* mvPexLocalBusNumSet - Set PEX interface local bus number.*/
+MV_STATUS mvPexLocalBusNumSet(MV_U32 pexIf, MV_U32 busNum);
+
+/* mvPexLocalBusNumGet - Get PEX interface local bus number.*/
+MV_U32 mvPexLocalBusNumGet(MV_U32 pexIf);
+
+/* mvPexLocalDevNumSet - Set PEX interface local device number.*/
+MV_STATUS mvPexLocalDevNumSet(MV_U32 pexIf, MV_U32 devNum);
+
+/* mvPexLocalDevNumGet - Get PEX interface local device number.*/
+MV_U32 mvPexLocalDevNumGet(MV_U32 pexIf);
+/* mvPexForceX1 - Force PEX interface to X1 mode. */
+MV_U32 mvPexForceX1(MV_U32 pexIf);
+
+/* mvPexIsPowerUp - Is PEX interface Power up? */
+MV_BOOL mvPexIsPowerUp(MV_U32 pexIf);
+
+/* mvPexPowerDown - Power Down */
+MV_VOID mvPexPowerDown(MV_U32 pexIf);
+
+/* mvPexPowerUp - Power Up */
+MV_VOID mvPexPowerUp(MV_U32 pexIf);
+
+/* mvPexPhyRegRead - Pex phy read */
+MV_VOID mvPexPhyRegRead(MV_U32 pexIf, MV_U32 regOffset, MV_U16 *value);
+
+/* mvPexPhyRegWrite - Pex phy write */
+MV_VOID mvPexPhyRegWrite(MV_U32 pexIf, MV_U32 regOffset, MV_U16 value);
+
+MV_STATUS mvPexActiveStateLinkPMEnable(MV_U32 pexIf, MV_BOOL enable);
+
+#endif /* #ifndef __INCPEXH */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h
new file mode 100644
index 000000000..8ac169836
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h
@@ -0,0 +1,751 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPEXREGSH
+#define __INCPEXREGSH
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines */
+#define MAX_PEX_DEVICES 32
+#define MAX_PEX_FUNCS 8
+#define MAX_PEX_BUSSES 256
+
+
+
+/*********************************************************/
+/* PCI Express Configuration Cycles Generation Registers */
+/*********************************************************/
+
+#define PEX_CFG_ADDR_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x18F8)
+#define PEX_CFG_DATA_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x18FC)
+#define PEX_PHY_ACCESS_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1B00)
+/* PCI Express Configuration Address Register */
+/* PEX_CFG_ADDR_REG (PXCAR)*/
+
+#define PXCAR_REG_NUM_OFFS 2
+#define PXCAR_REG_NUM_MAX 0x3F
+#define PXCAR_REG_NUM_MASK (PXCAR_REG_NUM_MAX << PXCAR_REG_NUM_OFFS)
+#define PXCAR_FUNC_NUM_OFFS 8
+#define PXCAR_FUNC_NUM_MAX 0x7
+#define PXCAR_FUNC_NUM_MASK (PXCAR_FUNC_NUM_MAX << PXCAR_FUNC_NUM_OFFS)
+#define PXCAR_DEVICE_NUM_OFFS 11
+#define PXCAR_DEVICE_NUM_MAX 0x1F
+#define PXCAR_DEVICE_NUM_MASK (PXCAR_DEVICE_NUM_MAX << PXCAR_DEVICE_NUM_OFFS)
+#define PXCAR_BUS_NUM_OFFS 16
+#define PXCAR_BUS_NUM_MAX 0xFF
+#define PXCAR_BUS_NUM_MASK (PXCAR_BUS_NUM_MAX << PXCAR_BUS_NUM_OFFS)
+#define PXCAR_EXT_REG_NUM_OFFS 24
+#define PXCAR_EXT_REG_NUM_MAX 0xF
+
+/* in pci express register address is now the legacy register address (8 bits)
+with the new extended register address (more 4 bits) , below is the mask of
+the upper 4 bits of the full register address */
+
+#define PXCAR_REAL_EXT_REG_NUM_OFFS 8
+#define PXCAR_EXT_REG_NUM_MASK (PXCAR_EXT_REG_NUM_MAX << PXCAR_EXT_REG_NUM_OFFS)
+#define PXCAR_CONFIG_EN BIT31
+
+#define PXCAR_REAL_EXT_REG_NUM_OFFS 8
+#define PXCAR_REAL_EXT_REG_NUM_MASK (0xF << PXCAR_REAL_EXT_REG_NUM_OFFS)
+
+/* The traditional PCI spec defined 6-bit field to describe register offset.*/
+/* The new PCI Express extend the register offset by an extra 4-bits. */
+/* The below macro assign 10-bit register offset into the apprpreate */
+/* fields in the CFG_ADDR_REG */
+#define PXCAR_REG_OFFS_SET(regOffs) \
+ ( (regOff & PXCAR_REG_NUM_MASK) | \
+ ( ((regOff & PXCAR_REAL_EXT_REG_NUM_MASK) >> PXCAR_REAL_EXT_REG_NUM_OFFS) << PXCAR_EXT_REG_NUM_OFFS) )
+
+/***********************************/
+/* PCI Express Interrupt registers */
+/***********************************/
+#define PEX_CAUSE_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1900)
+#define PEX_MASK_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1910)
+
+#define PXICR_TX_REQ_IN_DLDOWN_ERR BIT0 /* Transmit request while field */
+ /* <DLDown> of the PCI Express */
+/* PCI Express Interrupt Cause */
+/* PEX_INT_CAUSE_REG (PXICR)*/
+/* PEX_INT_MASK_REG*/
+/*
+NOTE:All bits except bits[27:24] are Read/Write Clear only. A cause bit sets
+upon an error event occurrence. A write of 0 clears the bit. A write of 1 has
+no affect. Bits[24:27} are set and cleared upon reception of interrupt
+emulation messages.
+
+Mask bit per cause bit. If a bit is set to 1, the corresponding event is
+enabled. Mask does not affect setting of the Interrupt Cause register bits;
+it only affects the assertion of the interrupt .*/
+
+
+#define PXICR_MDIS_CAUSE BIT1 /* Attempt to generate PCI transaction
+ while master is disabled */
+#define PXICR_ERR_WRTO_REG_CAUSE BIT3 /* Erroneous write attempt to
+ PCI Express internal register*/
+#define PXICR_HIT_DFLT_WIN_ERR BIT4 /* Hit Default Window Error */
+#define PXICR_RX_RAM_PAR_ERR BIT6 /* Rx RAM Parity Error */
+#define PXICR_TX_RAM_PAR_ERR BIT7 /* Tx RAM Parity Error */
+#define PXICR_COR_ERR_DET BIT8 /* Correctable Error Detected*/
+#define PXICR_NF_ERR_DET BIT9 /* Non-Fatal Error Detected*/
+#define PXICR_FERR_DET BIT10 /* Fatal Error Detected*/
+#define PXICR_DSTATE_CHANGE BIT11 /* Dstate Change Indication*/
+#define PXICR_BIST BIT12 /* PCI-Express BIST activated*/
+#define PXICR_FLW_CTRL_PROT BIT14 /* Flow Control Protocol Error */
+
+#define PXICR_RCV_UR_CA_ERR BIT15 /* Received UR or CA status. */
+#define PXICR_RCV_ERR_FATAL BIT16 /* Received ERR_FATAL message.*/
+#define PXICR_RCV_ERR_NON_FATAL BIT17 /* Received ERR_NONFATAL message*/
+#define PXICR_RCV_ERR_COR BIT18 /* Received ERR_COR message.*/
+#define PXICR_RCV_CRS BIT19 /* Received CRS completion status*/
+#define PXICR_SLV_HOT_RESET BIT20 /* Received Hot Reset Indication*/
+#define PXICR_SLV_DIS_LINK BIT21 /* Slave Disable Link Indication*/
+#define PXICR_SLV_LB BIT22 /* Slave Loopback Indication*/
+#define PXICR_LINK_FAIL BIT23 /* Link Failure indication.*/
+#define PXICR_RCV_INTA BIT24 /* IntA status.*/
+#define PXICR_RCV_INTB BIT25 /* IntB status.*/
+#define PXICR_RCV_INTC BIT26 /* IntC status.*/
+#define PXICR_RCV_INTD BIT27 /* IntD status.*/
+#define PXICR_RCV_PM_PME BIT28 /* Received PM_PME message. */
+
+
+/********************************************/
+/* PCI Express Control and Status Registers */
+/********************************************/
+#define PEX_CTRL_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1A00)
+#define PEX_STATUS_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1A04)
+#define PEX_COMPLT_TMEOUT_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1A10)
+#define PEX_PWR_MNG_EXT_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1A18)
+#define PEX_FLOW_CTRL_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1A20)
+#define PEX_ACK_TMR_4X_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1A30)
+#define PEX_ACK_TMR_1X_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1A40)
+#define PEX_TL_CTRL_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1AB0)
+
+
+#define PEX_RAM_PARITY_CTRL_REG(pexIf) ((PEX_IF_BASE(pexIf)) + 0x1A50)
+/* PCI Express Control Register */
+/* PEX_CTRL_REG (PXCR) */
+
+#define PXCR_CONF_LINK_OFFS 0
+#define PXCR_CONF_LINK_MASK (1 << PXCR_CONF_LINK_OFFS)
+#define PXCR_CONF_LINK_X4 (0 << PXCR_CONF_LINK_OFFS)
+#define PXCR_CONF_LINK_X1 (1 << PXCR_CONF_LINK_OFFS)
+#define PXCR_DEV_TYPE_CTRL_OFFS 1 /*PCI ExpressDevice Type Control*/
+#define PXCR_DEV_TYPE_CTRL_MASK BIT1
+#define PXCR_DEV_TYPE_CTRL_CMPLX (1 << PXCR_DEV_TYPE_CTRL_OFFS)
+#define PXCR_DEV_TYPE_CTRL_POINT (0 << PXCR_DEV_TYPE_CTRL_OFFS)
+#define PXCR_CFG_MAP_TO_MEM_EN BIT2 /* Configuration Header Mapping
+ to Memory Space Enable */
+
+#define PXCR_CFG_MAP_TO_MEM_EN BIT2 /* Configuration Header Mapping
+ to Memory Space Enable*/
+
+#define PXCR_RSRV1_OFFS 5
+#define PXCR_RSRV1_MASK (0x7 << PXCR_RSRV1_OFFS)
+#define PXCR_RSRV1_VAL (0x0 << PXCR_RSRV1_OFFS)
+
+#define PXCR_CONF_MAX_OUTSTND_OFFS 8 /*Maximum outstanding NP requests as a master*/
+#define PXCR_CONF_MAX_OUTSTND_MASK (0x3 << PXCR_CONF_MAX_OUTSTND_OFFS)
+
+
+#define PXCR_CONF_NFTS_OFFS 16 /*number of FTS Ordered-Sets*/
+#define PXCR_CONF_NFTS_MASK (0xff << PXCR_CONF_NFTS_OFFS)
+
+#define PXCR_CONF_MSTR_HOT_RESET BIT24 /*Master Hot-Reset.*/
+#define PXCR_CONF_MSTR_LB BIT26 /* Master Loopback */
+#define PXCR_CONF_MSTR_DIS_SCRMB BIT27 /* Master Disable Scrambling*/
+#define PXCR_CONF_DIRECT_DIS_SCRMB BIT28 /* Direct Disable Scrambling*/
+
+/* PCI Express Status Register */
+/* PEX_STATUS_REG (PXSR) */
+
+#define PXSR_DL_DOWN BIT0 /* DL_Down indication.*/
+
+#define PXSR_PEX_BUS_NUM_OFFS 8 /* Bus Number Indication */
+#define PXSR_PEX_BUS_NUM_MASK (0xff << PXSR_PEX_BUS_NUM_OFFS)
+
+#define PXSR_PEX_DEV_NUM_OFFS 16 /* Device Number Indication */
+#define PXSR_PEX_DEV_NUM_MASK (0x1f << PXSR_PEX_DEV_NUM_OFFS)
+
+#define PXSR_PEX_SLV_HOT_RESET BIT24 /* Slave Hot Reset Indication*/
+#define PXSR_PEX_SLV_DIS_LINK BIT25 /* Slave Disable Link Indication*/
+#define PXSR_PEX_SLV_LB BIT26 /* Slave Loopback Indication*/
+#define PXSR_PEX_SLV_DIS_SCRMB BIT27 /* Slave Disable Scrambling Indication*/
+
+
+/* PCI Express Completion Timeout Register */
+/* PEX_COMPLT_TMEOUT_REG (PXCTR)*/
+
+#define PXCTR_CMP_TO_THRSHLD_OFFS 0 /* Completion Timeout Threshold */
+#define PXCTR_CMP_TO_THRSHLD_MASK (0xffff << PXCTR_CMP_TO_THRSHLD_OFFS)
+
+/* PCI Express Power Management Extended Register */
+/* PEX_PWR_MNG_EXT_REG (PXPMER) */
+
+#define PXPMER_L1_ASPM_EN_OFFS 1
+#define PXPMER_L1_ASPM_EN_MASK (0x1 << PXPMER_L1_ASPM_EN_OFFS)
+
+/* PCI Express Flow Control Register */
+/* PEX_FLOW_CTRL_REG (PXFCR)*/
+
+#define PXFCR_PH_INIT_FC_OFFS 0 /*Posted Headers Flow Control Credit
+ Initial Value.*/
+#define PXFCR_PH_INIT_FC_MASK (0xff << PXFCR_PH_INIT_FC_OFFS)
+
+
+#define PXFCR_NPH_INIT_FC_OFFS 8 /* Classified Non-Posted Headers
+ Flow Control Credit Initial Value*/
+#define PXFCR_NPH_INIT_FC_MASK (0xff << PXFCR_NPH_INIT_FC_OFFS)
+
+#define PXFCR_CH_INIT_FC_OFFS 16 /* Completion Headers Flow Control
+ Credit Initial Value Infinite*/
+
+#define PXFCR_CH_INIT_FC_MASK (0xff << PXFCR_CH_INIT_FC_OFFS)
+
+#define PXFCR_FC_UPDATE_TO_OFFS 24 /* Flow Control Update Timeout */
+#define PXFCR_FC_UPDATE_TO_MASK (0xff << PXFCR_FC_UPDATE_TO_OFFS)
+
+/* PCI Express Acknowledge Timers (4X) Register */
+/* PEX_ACK_TMR_4X_REG (PXAT4R) */
+#define PXAT1R_ACK_LAT_TOX4_OFFS 0 /* Ack Latency Timer Timeout Value */
+#define PXAT1R_ACK_LAT_TOX4_MASK (0xffff << PXAT4R_ACK_LAT_TOX1_OFFS)
+#define PXAT1R_ACK_RPLY_TOX4_OFFS 16 /* Ack Replay Timer Timeout Value */
+#define PXAT1R_ACK_RPLY_TOX4_MASK (0xffff << PXAT1R_ACK_RPLY_TOX1_OFFS)
+
+/* PCI Express Acknowledge Timers (1X) Register */
+/* PEX_ACK_TMR_1X_REG (PXAT1R) */
+
+#define PXAT1R_ACK_LAT_TOX1_OFFS 0 /* Acknowledge Latency Timer Timeout
+ Value for 1X Link*/
+#define PXAT1R_ACK_LAT_TOX1_MASK (0xffff << PXAT1R_ACK_LAT_TOX1_OFFS)
+
+#define PXAT1R_ACK_RPLY_TOX1_OFFS 16 /* Acknowledge Replay Timer Timeout
+ Value for 1X*/
+#define PXAT1R_ACK_RPLY_TOX1_MASK (0xffff << PXAT1R_ACK_RPLY_TOX1_OFFS)
+
+
+/* PCI Express TL Control Register */
+/* PEX_TL_CTRL_REG (PXTCR) */
+
+#define PXTCR_TX_CMP_BUFF_NO_OFFS 8 /*Number of completion buffers in Tx*/
+#define PXTCR_TX_CMP_BUFF_NO_MASK (0xf << PXTCR_TX_CMP_BUFF_NO_OFFS)
+
+/* PCI Express Debug MAC Control Register */
+/* PEX_DEBUG_MAC_CTRL_REG (PXDMCR) */
+
+#define PXDMCR_LINKUP BIT4
+
+
+
+/**********************************************/
+/* PCI Express Configuration Header Registers */
+/**********************************************/
+#define PEX_CFG_DIRECT_ACCESS(pexIf,cfgReg) ((PEX_IF_BASE(pexIf)) + (cfgReg))
+
+#define PEX_DEVICE_AND_VENDOR_ID 0x000
+#define PEX_STATUS_AND_COMMAND 0x004
+#define PEX_CLASS_CODE_AND_REVISION_ID 0x008
+#define PEX_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE 0x00C
+#define PEX_MEMORY_BAR_BASE_ADDR(barNum) (0x010 + ((barNum) << 2))
+#define PEX_MV_BAR_BASE(barNum) (0x010 + (barNum) * 8)
+#define PEX_MV_BAR_BASE_HIGH(barNum) (0x014 + (barNum) * 8)
+#define PEX_BAR0_INTER_REG 0x010
+#define PEX_BAR0_INTER_REG_HIGH 0x014
+#define PEX_BAR1_REG 0x018
+#define PEX_BAR1_REG_HIGH 0x01C
+#define PEX_BAR2_REG 0x020
+#define PEX_BAR2_REG_HIGH 0x024
+
+#define PEX_SUBSYS_ID_AND_SUBSYS_VENDOR_ID 0x02C
+#define PEX_EXPANSION_ROM_BASE_ADDR_REG 0x030
+#define PEX_CAPABILTY_LIST_POINTER 0x034
+#define PEX_INTERRUPT_PIN_AND_LINE 0x03C
+
+/* capability list */
+#define PEX_POWER_MNG_CAPABILITY 0x040
+#define PEX_POWER_MNG_STATUS_CONTROL 0x044
+
+#define PEX_MSI_MESSAGE_CONTROL 0x050
+#define PEX_MSI_MESSAGE_ADDR 0x054
+#define PEX_MSI_MESSAGE_HIGH_ADDR 0x058
+#define PEX_MSI_MESSAGE_DATA 0x05C
+
+#define PEX_CAPABILITY_REG 0x60
+#define PEX_DEV_CAPABILITY_REG 0x64
+#define PEX_DEV_CTRL_STAT_REG 0x68
+#define PEX_LINK_CAPABILITY_REG 0x6C
+#define PEX_LINK_CTRL_STAT_REG 0x70
+
+#define PEX_ADV_ERR_RPRT_HDR_TRGT_REG 0x100
+#define PEX_UNCORRECT_ERR_STAT_REG 0x104
+#define PEX_UNCORRECT_ERR_MASK_REG 0x108
+#define PEX_UNCORRECT_ERR_SERVITY_REG 0x10C
+#define PEX_CORRECT_ERR_STAT_REG 0x110
+#define PEX_CORRECT_ERR_MASK_REG 0x114
+#define PEX_ADV_ERR_CAPABILITY_CTRL_REG 0x118
+#define PEX_HDR_LOG_FIRST_DWORD_REG 0x11C
+#define PEX_HDR_LOG_SECOND_DWORD_REG 0x120
+#define PEX_HDR_LOG_THIRD_DWORD_REG 0x124
+#define PEX_HDR_LOG_FOURTH_DWORD_REG 0x128
+
+
+
+/* PCI Express Device and Vendor ID Register*/
+/*PEX_DEVICE_AND_VENDOR_ID (PXDAVI)*/
+
+#define PXDAVI_VEN_ID_OFFS 0 /* Vendor ID */
+#define PXDAVI_VEN_ID_MASK (0xffff << PXDAVI_VEN_ID_OFFS)
+
+#define PXDAVI_DEV_ID_OFFS 16 /* Device ID */
+#define PXDAVI_DEV_ID_MASK (0xffff << PXDAVI_DEV_ID_OFFS)
+
+
+/* PCI Express Command and Status Register*/
+/*PEX_STATUS_AND_COMMAND (PXSAC)*/
+
+#define PXSAC_IO_EN BIT0 /* IO Enable */
+#define PXSAC_MEM_EN BIT1 /* Memory Enable */
+#define PXSAC_MASTER_EN BIT2 /* Master Enable */
+#define PXSAC_PERR_EN BIT6 /* Parity Errors Respond Enable */
+#define PXSAC_SERR_EN BIT8 /* Ability to assert SERR# line */
+#define PXSAC_INT_DIS BIT10 /* Interrupt Disable */
+#define PXSAC_INT_STAT BIT19 /* Interrupt Status */
+#define PXSAC_CAP_LIST BIT20 /* Capability List Support */
+#define PXSAC_MAS_DATA_PERR BIT24 /* Master Data Parity Error */
+#define PXSAC_SLAVE_TABORT BIT27 /* Signalled Target Abort */
+#define PXSAC_RT_ABORT BIT28 /* Recieved Target Abort */
+#define PXSAC_MABORT BIT29 /* Recieved Master Abort */
+#define PXSAC_SYSERR BIT30 /* Signalled system error */
+#define PXSAC_DET_PARERR BIT31 /* Detect Parity Error */
+
+
+/* PCI Express Class Code and Revision ID Register*/
+/*PEX_CLASS_CODE_AND_REVISION_ID (PXCCARI)*/
+
+#define PXCCARI_REVID_OFFS 0 /* Revision ID */
+#define PXCCARI_REVID_MASK (0xff << PXCCARI_REVID_OFFS)
+
+#define PXCCARI_FULL_CLASS_OFFS 8 /* Full Class Code */
+#define PXCCARI_FULL_CLASS_MASK (0xffffff << PXCCARI_FULL_CLASS_OFFS)
+
+#define PXCCARI_PROGIF_OFFS 8 /* Prog .I/F*/
+#define PXCCARI_PROGIF_MASK (0xff << PXCCARI_PROGIF_OFFS)
+
+#define PXCCARI_SUB_CLASS_OFFS 16 /* Sub Class*/
+#define PXCCARI_SUB_CLASS_MASK (0xff << PXCCARI_SUB_CLASS_OFFS)
+
+#define PXCCARI_BASE_CLASS_OFFS 24 /* Base Class*/
+#define PXCCARI_BASE_CLASS_MASK (0xff << PXCCARI_BASE_CLASS_OFFS)
+
+
+/* PCI Express BIST, Header Type and Cache Line Size Register*/
+/*PEX_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE (PXBHTLTCL)*/
+
+#define PXBHTLTCL_CACHELINE_OFFS 0 /* Specifies the cache line size */
+#define PXBHTLTCL_CACHELINE_MASK (0xff << PXBHTLTCL_CACHELINE_OFFS)
+
+#define PXBHTLTCL_HEADTYPE_FULL_OFFS 16 /* Full Header Type */
+#define PXBHTLTCL_HEADTYPE_FULL_MASK (0xff << PXBHTLTCL_HEADTYPE_FULL_OFFS)
+
+#define PXBHTLTCL_MULTI_FUNC BIT23 /* Multi/Single function */
+
+#define PXBHTLTCL_HEADER_OFFS 16 /* Header type */
+#define PXBHTLTCL_HEADER_MASK (0x7f << PXBHTLTCL_HEADER_OFFS)
+#define PXBHTLTCL_HEADER_STANDARD (0x0 << PXBHTLTCL_HEADER_OFFS)
+#define PXBHTLTCL_HEADER_PCI2PCI_BRIDGE (0x1 << PXBHTLTCL_HEADER_OFFS)
+
+
+#define PXBHTLTCL_BISTCOMP_OFFS 24 /* BIST Completion Code */
+#define PXBHTLTCL_BISTCOMP_MASK (0xf << PXBHTLTCL_BISTCOMP_OFFS)
+
+#define PXBHTLTCL_BISTACT BIT30 /* BIST Activate bit */
+#define PXBHTLTCL_BISTCAP BIT31 /* BIST Capable Bit */
+#define PXBHTLTCL_BISTCAP_OFFS 31
+#define PXBHTLTCL_BISTCAP_MASK BIT31
+#define PXBHTLTCL_BISTCAP_VAL 0
+
+
+/* PCI Express Subsystem Device and Vendor ID */
+/*PEX_SUBSYS_ID_AND_SUBSYS_VENDOR_ID (PXSIASVI)*/
+
+#define PXSIASVI_VENID_OFFS 0 /* Subsystem Manufacturer Vendor ID Number */
+#define PXSIASVI_VENID_MASK (0xffff << PXSIASVI_VENID_OFFS)
+
+#define PXSIASVI_DEVID_OFFS 16 /* Subsystem Device ID Number */
+#define PXSIASVI_DEVID_MASK (0xffff << PXSIASVI_DEVID_OFFS)
+
+
+/* PCI Express Capability List Pointer Register*/
+/*PEX_CAPABILTY_LIST_POINTER (PXCLP)*/
+
+#define PXCLP_CAPPTR_OFFS 0 /* Capability List Pointer */
+#define PXCLP_CAPPTR_MASK (0xff << PXCLP_CAPPTR_OFFS)
+
+/* PCI Express Interrupt Pin and Line Register */
+/*PEX_INTERRUPT_PIN_AND_LINE (PXIPAL)*/
+
+#define PXIPAL_INTLINE_OFFS 0 /* Interrupt line (IRQ) */
+#define PXIPAL_INTLINE_MASK (0xff << PXIPAL_INTLINE_OFFS)
+
+#define PXIPAL_INTPIN_OFFS 8 /* interrupt pin (A,B,C,D) */
+#define PXIPAL_INTPIN_MASK (0xff << PXIPAL_INTPIN_OFFS)
+
+
+/* PCI Express Power Management Capability Header Register*/
+/*PEX_POWER_MNG_CAPABILITY (PXPMC)*/
+
+#define PXPMC_CAP_ID_OFFS 0 /* Capability ID */
+#define PXPMC_CAP_ID_MASK (0xff << PXPMC_CAP_ID_OFFS)
+
+#define PXPMC_NEXT_PTR_OFFS 8 /* Next Item Pointer */
+#define PXPMC_NEXT_PTR_MASK (0xff << PXPMC_NEXT_PTR_OFFS)
+
+#define PXPMC_PMC_VER_OFFS 16 /* PCI Power Management Capability Version*/
+#define PXPMC_PMC_VER_MASK (0x7 << PXPMC_PMC_VER_OFFS)
+
+#define PXPMC_DSI BIT21/* Device Specific Initialization */
+
+#define PXPMC_AUX_CUR_OFFS 22 /* Auxiliary Current Requirements */
+#define PXPMC_AUX_CUR_MASK (0x7 << PXPMC_AUX_CUR_OFFS)
+
+#define PXPMC_D1_SUP BIT25 /* D1 Power Management support*/
+
+#define PXPMC_D2_SUP BIT26 /* D2 Power Management support*/
+
+#define PXPMC_PME_SUP_OFFS 27 /* PM Event generation support*/
+#define PXPMC_PME_SUP_MASK (0x1f << PXPMC_PME_SUP_OFFS)
+
+/* PCI Express Power Management Control and Status Register*/
+/*PEX_POWER_MNG_STATUS_CONTROL (PXPMSC)*/
+
+#define PXPMSC_PM_STATE_OFFS 0 /* Power State */
+#define PXPMSC_PM_STATE_MASK (0x3 << PXPMSC_PM_STATE_OFFS)
+#define PXPMSC_PM_STATE_D0 (0x0 << PXPMSC_PM_STATE_OFFS)
+#define PXPMSC_PM_STATE_D1 (0x1 << PXPMSC_PM_STATE_OFFS)
+#define PXPMSC_PM_STATE_D2 (0x2 << PXPMSC_PM_STATE_OFFS)
+#define PXPMSC_PM_STATE_D3 (0x3 << PXPMSC_PM_STATE_OFFS)
+
+#define PXPMSC_PME_EN BIT8/* PM_PME Message Generation Enable */
+
+#define PXPMSC_PM_DATA_SEL_OFFS 9 /* Data Select*/
+#define PXPMSC_PM_DATA_SEL_MASK (0xf << PXPMSC_PM_DATA_SEL_OFFS)
+
+#define PXPMSC_PM_DATA_SCALE_OFFS 13 /* Data Scale */
+#define PXPMSC_PM_DATA_SCALE_MASK (0x3 << PXPMSC_PM_DATA_SCALE_OFFS)
+
+#define PXPMSC_PME_STAT BIT15/* PME Status */
+
+#define PXPMSC_PM_DATA_OFFS 24 /* State Data */
+#define PXPMSC_PM_DATA_MASK (0xff << PXPMSC_PM_DATA_OFFS)
+
+
+/* PCI Express MSI Message Control Register*/
+/*PEX_MSI_MESSAGE_CONTROL (PXMMC)*/
+
+#define PXMMC_CAP_ID_OFFS 0 /* Capability ID */
+#define PXMMC_CAP_ID_MASK (0xff << PXMMC_CAP_ID_OFFS)
+
+#define PXMMC_NEXT_PTR_OFFS 8 /* Next Item Pointer */
+#define PXMMC_NEXT_PTR_MASK (0xff << PXMMC_NEXT_PTR_OFFS)
+
+#define PXMMC_MSI_EN BIT18 /* MSI Enable */
+
+#define PXMMC_MULTI_CAP_OFFS 17 /* Multiple Message Capable */
+#define PXMMC_MULTI_CAP_MASK (0x7 << PXMMC_MULTI_CAP_OFFS)
+
+#define PXMMC_MULTI_EN_OFFS 20 /* Multiple Messages Enable */
+#define PXMMC_MULTI_EN_MASK (0x7 << PXMMC_MULTI_EN_OFFS)
+
+#define PXMMC_ADDR64 BIT23 /* 64-bit Addressing Capable */
+
+
+/* PCI Express MSI Message Address Register*/
+/*PEX_MSI_MESSAGE_ADDR (PXMMA)*/
+
+#define PXMMA_MSI_ADDR_OFFS 2 /* Message Address corresponds to
+ Address[31:2] of the MSI MWr TLP*/
+#define PXMMA_MSI_ADDR_MASK (0x3fffffff << PXMMA_MSI_ADDR_OFFS)
+
+
+/* PCI Express MSI Message Address (High) Register */
+/*PEX_MSI_MESSAGE_HIGH_ADDR (PXMMHA)*/
+
+#define PXMMA_MSI_ADDR_H_OFFS 0 /* Message Upper Address corresponds to
+ Address[63:32] of the MSI MWr TLP*/
+#define PXMMA_MSI_ADDR_H_MASK (0xffffffff << PXMMA_MSI_ADDR_H_OFFS )
+
+
+/* PCI Express MSI Message Data Register*/
+/*PEX_MSI_MESSAGE_DATA (PXMMD)*/
+
+#define PXMMD_MSI_DATA_OFFS 0 /* Message Data */
+#define PXMMD_MSI_DATA_MASK (0xffff << PXMMD_MSI_DATA_OFFS )
+
+
+/* PCI Express Capability Register*/
+/*PEX_CAPABILITY_REG (PXCR)*/
+
+#define PXCR_CAP_ID_OFFS 0 /* Capability ID*/
+#define PXCR_CAP_ID_MASK (0xff << PXCR_CAP_ID_OFFS)
+
+#define PXCR_NEXT_PTR_OFFS 8 /* Next Item Pointer*/
+#define PXCR_NEXT_PTR_MASK (0xff << PXCR_NEXT_PTR_OFFS)
+
+#define PXCR_CAP_VER_OFFS 16 /* Capability Version*/
+#define PXCR_CAP_VER_MASK (0xf << PXCR_CAP_VER_OFFS)
+
+#define PXCR_DEV_TYPE_OFFS 20 /* Device/Port Type*/
+#define PXCR_DEV_TYPE_MASK (0xf << PXCR_DEV_TYPE_OFFS)
+
+#define PXCR_SLOT_IMP BIT24 /* Slot Implemented*/
+
+#define PXCR_INT_MSG_NUM_OFFS 25 /* Interrupt Message Number*/
+#define PXCR_INT_MSG_NUM_MASK (0x1f << PXCR_INT_MSG_NUM_OFFS)
+
+
+/* PCI Express Device Capabilities Register */
+/*PEX_DEV_CAPABILITY_REG (PXDCR)*/
+
+#define PXDCR_MAX_PLD_SIZE_SUP_OFFS 0 /* Maximum Payload Size Supported*/
+#define PXDCR_MAX_PLD_SIZE_SUP_MASK (0x7 << PXDCR_MAX_PLD_SIZE_SUP_OFFS)
+
+#define PXDCR_EP_L0S_ACC_LAT_OFFS 6/* Endpoint L0s Acceptable Latency*/
+#define PXDCR_EP_L0S_ACC_LAT_MASK (0x7 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_64NS_LESS (0x0 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_64NS_128NS (0x1 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_128NS_256NS (0x2 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_256NS_512NS (0x3 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_512NS_1US (0x4 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_1US_2US (0x5 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_2US_4US (0x6 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_4US_MORE (0x7 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+
+#define PXDCR_EP_L1_ACC_LAT_OFFS 9 /* Endpoint L1 Acceptable Latency*/
+#define PXDCR_EP_L1_ACC_LAT_MASK (0x7 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_64NS_LESS (0x0 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_64NS_128NS (0x1 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_128NS_256NS (0x2 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_256NS_512NS (0x3 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_512NS_1US (0x4 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_1US_2US (0x5 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_2US_4US (0x6 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_4US_MORE (0x7 << PXDCR_EP_L1_ACC_LAT_OFFS)
+
+
+#define PXDCR_ATT_BUT_PRS_OFFS 12 /* Attention Button Present*/
+#define PXDCR_ATT_BUT_PRS_MASK BIT12
+#define PXDCR_ATT_BUT_PRS_IMPLEMENTED BIT12
+
+#define PXDCR_ATT_IND_PRS_OFFS 13 /* Attention Indicator Present*/
+#define PXDCR_ATT_IND_PRS_MASK BIT13
+#define PXDCR_ATT_IND_PRS_IMPLEMENTED BIT13
+
+#define PXDCR_PWR_IND_PRS_OFFS 14/* Power Indicator Present*/
+#define PXDCR_PWR_IND_PRS_MASK BIT14
+#define PXDCR_PWR_IND_PRS_IMPLEMENTED BIT14
+
+#define PXDCR_CAP_SPL_VAL_OFFS 18 /*Captured Slot Power Limit
+ Value*/
+#define PXDCR_CAP_SPL_VAL_MASK (0xff << PXDCR_CAP_SPL_VAL_OFFS)
+
+#define PXDCR_CAP_SP_LSCL_OFFS 26 /* Captured Slot Power Limit
+ Scale */
+#define PXDCR_CAP_SP_LSCL_MASK (0x3 << PXDCR_CAP_SP_LSCL_OFFS)
+
+/* PCI Express Device Control Status Register */
+/*PEX_DEV_CTRL_STAT_REG (PXDCSR)*/
+
+#define PXDCSR_COR_ERR_REP_EN BIT0 /* Correctable Error Reporting Enable*/
+#define PXDCSR_NF_ERR_REP_EN BIT1 /* Non-Fatal Error Reporting Enable*/
+#define PXDCSR_F_ERR_REP_EN BIT2 /* Fatal Error Reporting Enable*/
+#define PXDCSR_UR_REP_EN BIT3 /* Unsupported Request (UR)
+ Reporting Enable*/
+#define PXDCSR_EN_RO BIT4 /* Enable Relaxed Ordering*/
+
+#define PXDCSR_MAX_PLD_SZ_OFFS 5 /* Maximum Payload Size*/
+#define PXDCSR_MAX_PLD_SZ_MASK (0x7 << PXDCSR_MAX_PLD_SZ_OFFS)
+#define PXDCSR_MAX_PLD_SZ_128B (0x0 << PXDCSR_MAX_PLD_SZ_OFFS)
+#define PXDCSR_EN_NS BIT11 /* Enable No Snoop*/
+
+#define PXDCSR_MAX_RD_RQ_SZ_OFFS 12 /* Maximum Read Request Size*/
+#define PXDCSR_MAX_RD_RQ_SZ_MASK (0x7 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_128B (0x0 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_256B (0x1 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_512B (0x2 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_1KB (0x3 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_2KB (0x4 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_4KB (0x5 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+
+#define PXDCSR_COR_ERR_DET BIT16 /* Correctable Error Detected*/
+#define PXDCSR_NF_ERR_DET BIT17 /* Non-Fatal Error Detected.*/
+#define PXDCSR_F_ERR_DET BIT18 /* Fatal Error Detected.*/
+#define PXDCSR_UR_DET BIT19 /* Unsupported Request Detected */
+#define PXDCSR_AUX_PWR_DET BIT20 /* Reserved*/
+
+#define PXDCSR_TRANS_PEND_OFFS 21 /* Transactions Pending*/
+#define PXDCSR_TRANS_PEND_MASK BIT21
+#define PXDCSR_TRANS_PEND_NOT_COMPLETED (0x1 << PXDCSR_TRANS_PEND_OFFS)
+
+
+/* PCI Express Link Capabilities Register*/
+/*PEX_LINK_CAPABILITY_REG (PXLCR)*/
+
+#define PXLCR_MAX_LINK_SPD_OFFS 0 /* Maximum Link Speed*/
+#define PXLCR_MAX_LINK_SPD_MASK (0xf << PXLCR_MAX_LINK_SPD_OFFS)
+
+#define PXLCR_MAX_LNK_WDTH_OFFS 3 /* Maximum Link Width*/
+#define PXLCR_MAX_LNK_WDTH_MASK (0x3f << PXLCR_MAX_LNK_WDTH_OFFS)
+
+#define PXLCR_ASPM_SUP_OFFS 10 /* Active State Link PM Support*/
+#define PXLCR_ASPM_SUP_MASK (0x3 << PXLCR_ASPM_SUP_OFFS)
+
+#define PXLCR_L0S_EXT_LAT_OFFS 12 /* L0s Exit Latency*/
+#define PXLCR_L0S_EXT_LAT_MASK (0x7 << PXLCR_L0S_EXT_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_64NS_LESS (0x0 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_64NS_128NS (0x1 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_128NS_256NS (0x2 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_256NS_512NS (0x3 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_512NS_1US (0x4 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_1US_2US (0x5 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_2US_4US (0x6 << PXDCR_EP_L1_ACC_LAT_OFFS)
+
+#define PXLCR_POR_TNUM_OFFS 24 /* Port Number */
+#define PXLCR_POR_TNUM_MASK (0xff << PXLCR_POR_TNUM_OFFS)
+
+/* PCI Express Link Control Status Register */
+/*PEX_LINK_CTRL_STAT_REG (PXLCSR)*/
+
+#define PXLCSR_ASPM_CNT_OFFS 0 /* Active State Link PM Control */
+#define PXLCSR_ASPM_CNT_MASK (0x3 << PXLCSR_ASPM_CNT_OFFS)
+#define PXLCSR_ASPM_CNT_DISABLED (0x0 << PXLCSR_ASPM_CNT_OFFS)
+#define PXLCSR_ASPM_CNT_L0S_ENT_SUPP (0x1 << PXLCSR_ASPM_CNT_OFFS)
+#define PXLCSR_ASPM_CNT_L1S_ENT_SUPP (0x2 << PXLCSR_ASPM_CNT_OFFS)
+#define PXLCSR_ASPM_CNT_L0S_L1S_ENT_SUPP (0x3 << PXLCSR_ASPM_CNT_OFFS)
+
+#define PXLCSR_RCB_OFFS 3 /* Read Completion Boundary */
+#define PXLCSR_RCB_MASK BIT3
+#define PXLCSR_RCB_64B (0 << PXLCSR_RCB_OFFS)
+#define PXLCSR_RCB_128B (1 << PXLCSR_RCB_OFFS)
+
+#define PXLCSR_LNK_DIS BIT4 /* Link Disable */
+#define PXLCSR_RETRN_LNK BIT5 /* Retrain Link */
+#define PXLCSR_CMN_CLK_CFG BIT6 /* Common Clock Configuration */
+#define PXLCSR_EXTD_SNC BIT7 /* Extended Sync */
+
+#define PXLCSR_LNK_SPD_OFFS 16 /* Link Speed */
+#define PXLCSR_LNK_SPD_MASK (0xf << PXLCSR_LNK_SPD_OFFS)
+
+#define PXLCSR_NEG_LNK_WDTH_OFFS 20 /* Negotiated Link Width */
+#define PXLCSR_NEG_LNK_WDTH_MASK (0x3f << PXLCSR_NEG_LNK_WDTH_OFFS)
+#define PXLCSR_NEG_LNK_WDTH_X1 (0x1 << PXLCSR_NEG_LNK_WDTH_OFFS)
+
+#define PXLCSR_LNK_TRN BIT27 /* Link Training */
+
+#define PXLCSR_SLT_CLK_CFG_OFFS 28 /* Slot Clock Configuration */
+#define PXLCSR_SLT_CLK_CFG_MASK BIT28
+#define PXLCSR_SLT_CLK_CFG_INDPNT (0x0 << PXLCSR_SLT_CLK_CFG_OFFS)
+#define PXLCSR_SLT_CLK_CFG_REF (0x1 << PXLCSR_SLT_CLK_CFG_OFFS)
+
+/* PCI Express Advanced Error Report Header Register */
+/*PEX_ADV_ERR_RPRT_HDR_TRGT_REG (PXAERHTR)*/
+
+/* PCI Express Uncorrectable Error Status Register*/
+/*PEX_UNCORRECT_ERR_STAT_REG (PXUESR)*/
+
+/* PCI Express Uncorrectable Error Mask Register */
+/*PEX_UNCORRECT_ERR_MASK_REG (PXUEMR)*/
+
+/* PCI Express Uncorrectable Error Severity Register */
+/*PEX_UNCORRECT_ERR_SERVITY_REG (PXUESR)*/
+
+/* PCI Express Correctable Error Status Register */
+/*PEX_CORRECT_ERR_STAT_REG (PXCESR)*/
+
+/* PCI Express Correctable Error Mask Register */
+/*PEX_CORRECT_ERR_MASK_REG (PXCEMR)*/
+
+/* PCI Express Advanced Error Capability and Control Register*/
+/*PEX_ADV_ERR_CAPABILITY_CTRL_REG (PXAECCR)*/
+
+/* PCI Express Header Log First DWORD Register*/
+/*PEX_HDR_LOG_FIRST_DWORD_REG (PXHLFDR)*/
+
+/* PCI Express Header Log Second DWORD Register*/
+/*PEX_HDR_LOG_SECOND_DWORD_REG (PXHLSDR)*/
+
+/* PCI Express Header Log Third DWORD Register*/
+/*PEX_HDR_LOG_THIRD_DWORD_REG (PXHLTDR)*/
+
+/* PCI Express Header Log Fourth DWORD Register*/
+/*PEX_HDR_LOG_FOURTH_DWORD_REG (PXHLFDR)*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* #ifndef __INCPEXREGSH */
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c
new file mode 100644
index 000000000..19c871ae4
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c
@@ -0,0 +1,313 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvPex.h"
+
+//#define MV_DEBUG
+/* defines */
+#ifdef MV_DEBUG
+ #define DB(x) x
+#else
+ #define DB(x)
+#endif
+
+/* locals */
+typedef struct
+{
+ MV_U32 data;
+ MV_U32 mask;
+}PEX_HEADER_DATA;
+
+/* local function forwad decleration */
+MV_U32 mvPexHwConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+ MV_U32 regOff);
+MV_STATUS mvPexHwConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data);
+void resetPexConfig(MV_U32 pexIf, MV_U32 bus, MV_U32 dev);
+
+
+PEX_HEADER_DATA configHdr[16] =
+{
+{0x888811ab, 0x00000000}, /*[device ID, vendor ID] */
+{0x00100007, 0x0000ffff}, /*[status register, command register] */
+{0x0604000e, 0x00000000}, /*[programming interface, sub class code, class code, revision ID] */
+{0x00010008, 0x00000000}, /*[BIST, header type, latency time, cache line] */
+{0x00000000, 0x00000000}, /*[base address 0] */
+{0x00000000, 0x00000000}, /*[base address 1] */
+{0x00000000, 0x00ffffff}, /*[secondary latency timersubordinate bus number, secondary bus number, primary bus number] */
+{0x0000f101, 0x00000000}, /*[secondary status ,IO limit, IO base] */
+{0x9ff0a000, 0x00000000}, /*[memory limit, memory base] */
+{0x0001fff1, 0x00000000}, /*[prefetch memory limit, prefetch memory base] */
+{0xffffffff, 0x00000000}, /*[prefetch memory base upper] */
+{0x00000000, 0x00000000}, /*[prefetch memory limit upper] */
+{0xeffff000, 0x00000000}, /*[IO limit upper 16 bits, IO base upper 16 bits] */
+{0x00000000, 0x00000000}, /*[reserved, capability pointer] */
+{0x00000000, 0x00000000}, /*[expansion ROM base address] */
+{0x00000000, 0x000000FF}, /*[bridge control, interrupt pin, interrupt line] */
+};
+
+
+#define HEADER_WRITE(data, offset) configHdr[offset/4].data = ((configHdr[offset/4].data & ~configHdr[offset/4].mask) | \
+ (data & configHdr[offset/4].mask))
+#define HEADER_READ(offset) configHdr[offset/4].data
+
+/*******************************************************************************
+* mvVrtBrgPexInit - Initialize PEX interfaces
+*
+* DESCRIPTION:
+*
+* This function is responsible of intialization of the Pex Interface , It
+* configure the Pex Bars and Windows in the following manner:
+*
+* Assumptions :
+* Bar0 is always internal registers bar
+* Bar1 is always the DRAM bar
+* Bar2 is always the Device bar
+*
+* 1) Sets the Internal registers bar base by obtaining the base from
+* the CPU Interface
+* 2) Sets the DRAM bar base and size by getting the base and size from
+* the CPU Interface when the size is the sum of all enabled DRAM
+* chip selects and the base is the base of CS0 .
+* 3) Sets the Device bar base and size by getting these values from the
+* CPU Interface when the base is the base of the lowest base of the
+* Device chip selects, and the
+*
+*
+* INPUT:
+*
+* pexIf - PEX interface number.
+*
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK if function success otherwise MV_ERROR or MV_BAD_PARAM
+*
+*******************************************************************************/
+MV_STATUS mvPexVrtBrgInit(MV_U32 pexIf)
+{
+ /* reset PEX tree to recover previous U-boot/Boot configurations */
+ MV_U32 localBus = mvPexLocalBusNumGet(pexIf);
+
+
+ resetPexConfig(pexIf, localBus, 1);
+ return MV_OK;
+}
+
+
+MV_U32 mvPexVrtBrgConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+ MV_U32 regOff)
+{
+
+ MV_U32 localBus = mvPexLocalBusNumGet(pexIf);
+ MV_U32 localDev = mvPexLocalDevNumGet(pexIf);
+ MV_U32 val;
+ if(bus == localBus)
+ {
+ if(dev > 1)
+ {
+/* on the local device allow only device #0 & #1 */
+ return 0xffffffff;
+ }
+ else
+ if (dev == localDev)
+ {
+ /* read the memory controller registers */
+ return mvPexHwConfigRead (pexIf, bus, dev, func, regOff);
+ }
+ else
+ {
+ /* access the virtual brg header */
+ return HEADER_READ(regOff);
+ }
+ }
+ else
+ if(bus == (localBus + 1))
+ {
+ /* access the device behind the virtual bridge */
+ if((dev == localDev) || (dev > 1))
+ {
+ return 0xffffffff;
+ }
+ else
+ {
+ /* access the device behind the virtual bridge, in this case
+ * change the bus number to the local bus number in order to
+ * generate type 0 config cycle
+ */
+ mvPexLocalBusNumSet(pexIf, bus);
+ mvPexLocalDevNumSet(pexIf, 1);
+ val = mvPexHwConfigRead (pexIf, bus, 0, func, regOff);
+ mvPexLocalBusNumSet(pexIf, localBus);
+ mvPexLocalDevNumSet(pexIf, localDev);
+ return val;
+ }
+ }
+ /* for all other devices use the HW function to get the
+ * requested registers
+ */
+ mvPexLocalDevNumSet(pexIf, 1);
+ val = mvPexHwConfigRead (pexIf, bus, dev, func, regOff);
+ mvPexLocalDevNumSet(pexIf, localDev);
+ return val;
+}
+
+
+MV_STATUS mvPexVrtBrgConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+ MV_U32 localBus = mvPexLocalBusNumGet(pexIf);
+ MV_U32 localDev = mvPexLocalDevNumGet(pexIf);
+ MV_STATUS status;
+
+ if(bus == localBus)
+ {
+ if(dev > 1)
+ {
+ /* on the local device allow only device #0 & #1 */
+ return MV_ERROR;
+ }
+ else
+ if (dev == localDev)
+ {
+ /* read the memory controller registers */
+ return mvPexHwConfigWrite (pexIf, bus, dev, func, regOff, data);
+ }
+ else
+ {
+ /* access the virtual brg header */
+ HEADER_WRITE(data, regOff);
+ return MV_OK;
+ }
+ }
+ else
+ if(bus == (localBus + 1))
+ {
+ /* access the device behind the virtual bridge */
+ if((dev == localDev) || (dev > 1))
+ {
+ return MV_ERROR;
+ }
+ else
+ {
+ /* access the device behind the virtual bridge, in this case
+ * change the bus number to the local bus number in order to
+ * generate type 0 config cycle
+ */
+ //return mvPexHwConfigWrite (pexIf, localBus, dev, func, regOff, data);
+ mvPexLocalBusNumSet(pexIf, bus);
+ mvPexLocalDevNumSet(pexIf, 1);
+ status = mvPexHwConfigWrite (pexIf, bus, 0, func, regOff, data);
+ mvPexLocalBusNumSet(pexIf, localBus);
+ mvPexLocalDevNumSet(pexIf, localDev);
+ return status;
+
+ }
+ }
+ /* for all other devices use the HW function to get the
+ * requested registers
+ */
+ mvPexLocalDevNumSet(pexIf, 1);
+ status = mvPexHwConfigWrite (pexIf, bus, dev, func, regOff, data);
+ mvPexLocalDevNumSet(pexIf, localDev);
+ return status;
+}
+
+
+
+
+void resetPexConfig(MV_U32 pexIf, MV_U32 bus, MV_U32 dev)
+{
+ MV_U32 tData;
+ MV_U32 i;
+
+ /* restore the PEX configuration to initialization state */
+ /* in case PEX P2P call recursive and reset config */
+ tData = mvPexHwConfigRead (pexIf, bus, dev, 0x0, 0x0);
+ if(tData != 0xffffffff)
+ {
+ /* agent had been found - check whether P2P */
+ tData = mvPexHwConfigRead (pexIf, bus, dev, 0x0, 0x8);
+ if((tData & 0xffff0000) == 0x06040000)
+ {/* P2P */
+ /* get the sec bus and the subordinate */
+ MV_U32 secBus;
+ tData = mvPexHwConfigRead (pexIf, bus, dev, 0x0, 0x18);
+ secBus = ((tData >> 8) & 0xff);
+ /* now scan on sec bus */
+ for(i = 0;i < 0xff;i++)
+ {
+ resetPexConfig(pexIf, secBus, i);
+ }
+ /* now reset this device */
+ DB(mvOsPrintf("Reset bus %d dev %d\n", bus, dev));
+ mvPexHwConfigWrite(pexIf, bus, dev, 0x0, 0x18, 0x0);
+ DB(mvOsPrintf("Reset bus %d dev %d\n", bus, dev));
+ }
+ }
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h
new file mode 100644
index 000000000..82eb72d50
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCVRTBRGPEXH
+#define __INCVRTBRGPEXH
+
+
+/* Global Functions prototypes */
+/* mvPexInit - Initialize PEX interfaces*/
+MV_STATUS mvPexVrtBrgInit(MV_U32 pexIf);
+
+/* mvPexConfigRead - Read from configuration space */
+MV_U32 mvPexVrtBrgConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func,MV_U32 regOff);
+
+/* mvPexConfigWrite - Write to configuration space */
+MV_STATUS mvPexVrtBrgConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+ MV_U32 func, MV_U32 regOff, MV_U32 data);
+
+
+#endif /* #ifndef __INCPEXH */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvCompVer.txt
new file mode 100644
index 000000000..85bfa612c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.3
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.c
new file mode 100644
index 000000000..6c5bc191c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.c
@@ -0,0 +1,1522 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvOs.h"
+#include "sflash/mvSFlash.h"
+#include "sflash/mvSFlashSpec.h"
+#include "spi/mvSpi.h"
+#include "spi/mvSpiCmnd.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/*#define MV_DEBUG*/
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+/* Globals */
+static MV_SFLASH_DEVICE_PARAMS sflash[] = {
+ /* ST M25P32 SPI flash, 4MB, 64 sectors of 64K each */
+ {
+ MV_M25P_WREN_CMND_OPCD,
+ MV_M25P_WRDI_CMND_OPCD,
+ MV_M25P_RDID_CMND_OPCD,
+ MV_M25P_RDSR_CMND_OPCD,
+ MV_M25P_WRSR_CMND_OPCD,
+ MV_M25P_READ_CMND_OPCD,
+ MV_M25P_FAST_RD_CMND_OPCD,
+ MV_M25P_PP_CMND_OPCD,
+ MV_M25P_SE_CMND_OPCD,
+ MV_M25P_BE_CMND_OPCD,
+ MV_M25P_RES_CMND_OPCD,
+ MV_SFLASH_NO_SPECIFIC_OPCD, /* power save not supported */
+ MV_M25P32_SECTOR_SIZE,
+ MV_M25P32_SECTOR_NUMBER,
+ MV_M25P_PAGE_SIZE,
+ "ST M25P32",
+ MV_M25PXXX_ST_MANF_ID,
+ MV_M25P32_DEVICE_ID,
+ MV_M25P32_MAX_SPI_FREQ,
+ MV_M25P32_MAX_FAST_SPI_FREQ,
+ MV_M25P32_FAST_READ_DUMMY_BYTES
+ },
+ /* ST M25P64 SPI flash, 8MB, 128 sectors of 64K each */
+ {
+ MV_M25P_WREN_CMND_OPCD,
+ MV_M25P_WRDI_CMND_OPCD,
+ MV_M25P_RDID_CMND_OPCD,
+ MV_M25P_RDSR_CMND_OPCD,
+ MV_M25P_WRSR_CMND_OPCD,
+ MV_M25P_READ_CMND_OPCD,
+ MV_M25P_FAST_RD_CMND_OPCD,
+ MV_M25P_PP_CMND_OPCD,
+ MV_M25P_SE_CMND_OPCD,
+ MV_M25P_BE_CMND_OPCD,
+ MV_M25P_RES_CMND_OPCD,
+ MV_SFLASH_NO_SPECIFIC_OPCD, /* power save not supported */
+ MV_M25P64_SECTOR_SIZE,
+ MV_M25P64_SECTOR_NUMBER,
+ MV_M25P_PAGE_SIZE,
+ "ST M25P64",
+ MV_M25PXXX_ST_MANF_ID,
+ MV_M25P64_DEVICE_ID,
+ MV_M25P64_MAX_SPI_FREQ,
+ MV_M25P64_MAX_FAST_SPI_FREQ,
+ MV_M25P64_FAST_READ_DUMMY_BYTES
+ },
+ /* ST M25P128 SPI flash, 16MB, 64 sectors of 256K each */
+ {
+ MV_M25P_WREN_CMND_OPCD,
+ MV_M25P_WRDI_CMND_OPCD,
+ MV_M25P_RDID_CMND_OPCD,
+ MV_M25P_RDSR_CMND_OPCD,
+ MV_M25P_WRSR_CMND_OPCD,
+ MV_M25P_READ_CMND_OPCD,
+ MV_M25P_FAST_RD_CMND_OPCD,
+ MV_M25P_PP_CMND_OPCD,
+ MV_M25P_SE_CMND_OPCD,
+ MV_M25P_BE_CMND_OPCD,
+ MV_M25P_RES_CMND_OPCD,
+ MV_SFLASH_NO_SPECIFIC_OPCD, /* power save not supported */
+ MV_M25P128_SECTOR_SIZE,
+ MV_M25P128_SECTOR_NUMBER,
+ MV_M25P_PAGE_SIZE,
+ "ST M25P128",
+ MV_M25PXXX_ST_MANF_ID,
+ MV_M25P128_DEVICE_ID,
+ MV_M25P128_MAX_SPI_FREQ,
+ MV_M25P128_MAX_FAST_SPI_FREQ,
+ MV_M25P128_FAST_READ_DUMMY_BYTES
+ },
+ /* Macronix MXIC MX25L6405 SPI flash, 8MB, 128 sectors of 64K each */
+ {
+ MV_MX25L_WREN_CMND_OPCD,
+ MV_MX25L_WRDI_CMND_OPCD,
+ MV_MX25L_RDID_CMND_OPCD,
+ MV_MX25L_RDSR_CMND_OPCD,
+ MV_MX25L_WRSR_CMND_OPCD,
+ MV_MX25L_READ_CMND_OPCD,
+ MV_MX25L_FAST_RD_CMND_OPCD,
+ MV_MX25L_PP_CMND_OPCD,
+ MV_MX25L_SE_CMND_OPCD,
+ MV_MX25L_BE_CMND_OPCD,
+ MV_MX25L_RES_CMND_OPCD,
+ MV_MX25L_DP_CMND_OPCD,
+ MV_MX25L6405_SECTOR_SIZE,
+ MV_MX25L6405_SECTOR_NUMBER,
+ MV_MXIC_PAGE_SIZE,
+ "MXIC MX25L6405",
+ MV_MXIC_MANF_ID,
+ MV_MX25L6405_DEVICE_ID,
+ MV_MX25L6405_MAX_SPI_FREQ,
+ MV_MX25L6405_MAX_FAST_SPI_FREQ,
+ MV_MX25L6405_FAST_READ_DUMMY_BYTES
+ },
+ /* SPANSION S25FL128P SPI flash, 16MB, 64 sectors of 256K each */
+ {
+ MV_S25FL_WREN_CMND_OPCD,
+ MV_S25FL_WRDI_CMND_OPCD,
+ MV_S25FL_RDID_CMND_OPCD,
+ MV_S25FL_RDSR_CMND_OPCD,
+ MV_S25FL_WRSR_CMND_OPCD,
+ MV_S25FL_READ_CMND_OPCD,
+ MV_S25FL_FAST_RD_CMND_OPCD,
+ MV_S25FL_PP_CMND_OPCD,
+ MV_S25FL_SE_CMND_OPCD,
+ MV_S25FL_BE_CMND_OPCD,
+ MV_S25FL_RES_CMND_OPCD,
+ MV_S25FL_DP_CMND_OPCD,
+ MV_S25FL128_SECTOR_SIZE,
+ MV_S25FL128_SECTOR_NUMBER,
+ MV_S25FL_PAGE_SIZE,
+ "SPANSION S25FL128",
+ MV_SPANSION_MANF_ID,
+ MV_S25FL128_DEVICE_ID,
+ MV_S25FL128_MAX_SPI_FREQ,
+ MV_M25P128_MAX_FAST_SPI_FREQ,
+ MV_M25P128_FAST_READ_DUMMY_BYTES
+ }
+};
+
+/* Static Functions */
+static MV_STATUS mvWriteEnable (MV_SFLASH_INFO * pFlinfo);
+static MV_STATUS mvStatusRegGet (MV_SFLASH_INFO * pFlinfo, MV_U8 * pStatReg);
+static MV_STATUS mvStatusRegSet (MV_SFLASH_INFO * pFlinfo, MV_U8 sr);
+static MV_STATUS mvWaitOnWipClear(MV_SFLASH_INFO * pFlinfo);
+static MV_STATUS mvSFlashPageWr (MV_SFLASH_INFO * pFlinfo, MV_U32 offset, \
+ MV_U8* pPageBuff, MV_U32 buffSize);
+static MV_STATUS mvSFlashWithDefaultsIdGet (MV_SFLASH_INFO * pFlinfo, \
+ MV_U8* manId, MV_U16* devId);
+
+/*******************************************************************************
+* mvWriteEnable - serialize the write enable sequence
+*
+* DESCRIPTION:
+* transmit the sequence for write enable
+*
+********************************************************************************/
+static MV_STATUS mvWriteEnable(MV_SFLASH_INFO * pFlinfo)
+{
+ MV_U8 cmd[MV_SFLASH_WREN_CMND_LENGTH];
+
+
+ cmd[0] = sflash[pFlinfo->index].opcdWREN;
+
+ return mvSpiWriteThenRead(cmd, MV_SFLASH_WREN_CMND_LENGTH, NULL, 0, 0);
+}
+
+/*******************************************************************************
+* mvStatusRegGet - Retrieve the value of the status register
+*
+* DESCRIPTION:
+* perform the RDSR sequence to get the 8bit status register
+*
+********************************************************************************/
+static MV_STATUS mvStatusRegGet(MV_SFLASH_INFO * pFlinfo, MV_U8 * pStatReg)
+{
+ MV_STATUS ret;
+ MV_U8 cmd[MV_SFLASH_RDSR_CMND_LENGTH];
+ MV_U8 sr[MV_SFLASH_RDSR_REPLY_LENGTH];
+
+
+
+
+ cmd[0] = sflash[pFlinfo->index].opcdRDSR;
+
+ if ((ret = mvSpiWriteThenRead(cmd, MV_SFLASH_RDSR_CMND_LENGTH, sr,
+ MV_SFLASH_RDSR_REPLY_LENGTH,0)) != MV_OK)
+ return ret;
+
+ *pStatReg = sr[0];
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvWaitOnWipClear - Block waiting for the WIP (write in progress) to be cleared
+*
+* DESCRIPTION:
+* Block waiting for the WIP (write in progress) to be cleared
+*
+********************************************************************************/
+static MV_STATUS mvWaitOnWipClear(MV_SFLASH_INFO * pFlinfo)
+{
+ MV_STATUS ret;
+ MV_U32 i;
+ MV_U8 stat;
+
+ for (i=0; i<MV_SFLASH_MAX_WAIT_LOOP; i++)
+ {
+ if ((ret = mvStatusRegGet(pFlinfo, &stat)) != MV_OK)
+ return ret;
+
+ if ((stat & MV_SFLASH_STATUS_REG_WIP_MASK) == 0)
+ return MV_OK;
+ }
+
+ DB(mvOsPrintf("%s WARNING: Write Timeout!\n", __FUNCTION__);)
+ return MV_TIMEOUT;
+}
+
+/*******************************************************************************
+* mvWaitOnChipEraseDone - Block waiting for the WIP (write in progress) to be
+* cleared after a chip erase command which is supposed
+* to take about 2:30 minutes
+*
+* DESCRIPTION:
+* Block waiting for the WIP (write in progress) to be cleared
+*
+********************************************************************************/
+static MV_STATUS mvWaitOnChipEraseDone(MV_SFLASH_INFO * pFlinfo)
+{
+ MV_STATUS ret;
+ MV_U32 i;
+ MV_U8 stat;
+
+ for (i=0; i<MV_SFLASH_CHIP_ERASE_MAX_WAIT_LOOP; i++)
+ {
+ if ((ret = mvStatusRegGet(pFlinfo, &stat)) != MV_OK)
+ return ret;
+
+ if ((stat & MV_SFLASH_STATUS_REG_WIP_MASK) == 0)
+ return MV_OK;
+ }
+
+ DB(mvOsPrintf("%s WARNING: Write Timeout!\n", __FUNCTION__);)
+ return MV_TIMEOUT;
+}
+
+/*******************************************************************************
+* mvStatusRegSet - Set the value of the 8bit status register
+*
+* DESCRIPTION:
+* Set the value of the 8bit status register
+*
+********************************************************************************/
+static MV_STATUS mvStatusRegSet(MV_SFLASH_INFO * pFlinfo, MV_U8 sr)
+{
+ MV_STATUS ret;
+ MV_U8 cmd[MV_SFLASH_WRSR_CMND_LENGTH];
+
+
+ /* Issue the Write enable command prior the WRSR command */
+ if ((ret = mvWriteEnable(pFlinfo)) != MV_OK)
+ return ret;
+
+ /* Write the SR with the new values */
+ cmd[0] = sflash[pFlinfo->index].opcdWRSR;
+ cmd[1] = sr;
+
+ if ((ret = mvSpiWriteThenRead(cmd, MV_SFLASH_WRSR_CMND_LENGTH, NULL, 0, 0)) != MV_OK)
+ return ret;
+
+ if ((ret = mvWaitOnWipClear(pFlinfo)) != MV_OK)
+ return ret;
+
+ mvOsDelay(1);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashPageWr - Write up to 256 Bytes in the same page
+*
+* DESCRIPTION:
+* Write a buffer up to the page size in length provided that the whole address
+* range is within the same page (alligned to page bounderies)
+*
+*******************************************************************************/
+static MV_STATUS mvSFlashPageWr (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+ MV_U8* pPageBuff, MV_U32 buffSize)
+{
+ MV_STATUS ret;
+ MV_U8 cmd[MV_SFLASH_PP_CMND_LENGTH];
+
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invalid parameter device index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* check that we do not cross the page bounderies */
+ if (((offset & (sflash[pFlinfo->index].pageSize - 1)) + buffSize) >
+ sflash[pFlinfo->index].pageSize)
+ {
+ DB(mvOsPrintf("%s WARNING: Page allignment problem!\n", __FUNCTION__);)
+ return MV_OUT_OF_RANGE;
+ }
+
+ /* Issue the Write enable command prior the page program command */
+ if ((ret = mvWriteEnable(pFlinfo)) != MV_OK)
+ return ret;
+
+ cmd[0] = sflash[pFlinfo->index].opcdPP;
+ cmd[1] = ((offset >> 16) & 0xFF);
+ cmd[2] = ((offset >> 8) & 0xFF);
+ cmd[3] = (offset & 0xFF);
+
+ if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_PP_CMND_LENGTH, pPageBuff, buffSize)) != MV_OK)
+ return ret;
+
+ if ((ret = mvWaitOnWipClear(pFlinfo)) != MV_OK)
+ return ret;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashWithDefaultsIdGet - Try to read the manufacturer and Device IDs from
+* the device using the default RDID opcode and the default WREN opcode.
+*
+* DESCRIPTION:
+* This is used to detect a generic device that uses the default opcodes
+* for the WREN and RDID.
+*
+********************************************************************************/
+static MV_STATUS mvSFlashWithDefaultsIdGet (MV_SFLASH_INFO * pFlinfo, MV_U8* manId, MV_U16* devId)
+{
+ MV_STATUS ret;
+ MV_U8 cmdRDID[MV_SFLASH_RDID_CMND_LENGTH];
+ MV_U8 id[MV_SFLASH_RDID_REPLY_LENGTH];
+
+
+
+ /* Use the default RDID opcode to read the IDs */
+ cmdRDID[0] = MV_SFLASH_DEFAULT_RDID_OPCD; /* unknown model try default */
+ if ((ret = mvSpiWriteThenRead(cmdRDID, MV_SFLASH_RDID_CMND_LENGTH, id, MV_SFLASH_RDID_REPLY_LENGTH, 0)) != MV_OK)
+ return ret;
+
+ *manId = id[0];
+ *devId = 0;
+ *devId |= (id[1] << 8);
+ *devId |= id[2];
+
+ return MV_OK;
+}
+
+/*
+#####################################################################################
+#####################################################################################
+*/
+
+/*******************************************************************************
+* mvSFlashInit - Initialize the serial flash device
+*
+* DESCRIPTION:
+* Perform the neccessary initialization and configuration
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* pFlinfo->baseAddr: base address in fast mode.
+* pFlinfo->index: Index of the flash in the sflash tabel. If the SPI
+* flash device does not support read Id command with
+* the standard opcode, then the user should supply this
+* as an input to skip the autodetection process!!!!
+*
+* OUTPUT:
+* pFlinfo: pointer to the Flash information structure after detection
+* pFlinfo->manufacturerId: Manufacturer ID
+* pFlinfo->deviceId: Device ID
+* pFlinfo->sectorSize: size of the sector (all sectors are the same).
+* pFlinfo->sectorNumber: number of sectors.
+* pFlinfo->pageSize: size of the page.
+* pFlinfo->index: Index of the detected flash in the sflash tabel
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashInit (MV_SFLASH_INFO * pFlinfo)
+{
+ MV_STATUS ret;
+ MV_U8 manf;
+ MV_U16 dev;
+ MV_U32 indx;
+ MV_BOOL detectFlag = MV_FALSE;
+
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Initialize the SPI interface with low frequency to make sure that the read ID succeeds */
+ if ((ret = mvSpiInit(MV_SFLASH_BASIC_SPI_FREQ)) != MV_OK)
+ {
+ mvOsPrintf("%s ERROR: Failed to initialize the SPI interface!\n", __FUNCTION__);
+ return ret;
+ }
+
+ /* First try to read the Manufacturer and Device IDs */
+ if ((ret = mvSFlashIdGet(pFlinfo, &manf, &dev)) != MV_OK)
+ {
+ mvOsPrintf("%s ERROR: Failed to get the SFlash ID!\n", __FUNCTION__);
+ return ret;
+ }
+
+ /* loop over the whole table and look for the appropriate SFLASH */
+ for (indx=0; indx<MV_ARRAY_SIZE(sflash); indx++)
+ {
+ if ((manf == sflash[indx].manufacturerId) && (dev == sflash[indx].deviceId))
+ {
+ pFlinfo->manufacturerId = manf;
+ pFlinfo->deviceId = dev;
+ pFlinfo->index = indx;
+ detectFlag = MV_TRUE;
+ }
+ }
+
+ if(!detectFlag)
+ {
+ mvOsPrintf("%s ERROR: Unknown SPI flash device!\n", __FUNCTION__);
+ return MV_FAIL;
+ }
+
+ /* fill the info based on the model detected */
+ pFlinfo->sectorSize = sflash[pFlinfo->index].sectorSize;
+ pFlinfo->sectorNumber = sflash[pFlinfo->index].sectorNumber;
+ pFlinfo->pageSize = sflash[pFlinfo->index].pageSize;
+
+ /* Set the SPI frequency to the MAX allowed for the device for best performance */
+ if ((ret = mvSpiBaudRateSet(sflash[pFlinfo->index].spiMaxFreq)) != MV_OK)
+ {
+ mvOsPrintf("%s ERROR: Failed to set the SPI frequency!\n", __FUNCTION__);
+ return ret;
+ }
+
+ /* As default lock the SR */
+ if ((ret = mvSFlashStatRegLock(pFlinfo, MV_TRUE)) != MV_OK)
+ return ret;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashSectorErase - Erasse a single sector of the serial flash
+*
+* DESCRIPTION:
+* Issue the erase sector command and address
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* secNumber: sector Number to erase (0 -> (sectorNumber-1))
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashSectorErase (MV_SFLASH_INFO * pFlinfo, MV_U32 secNumber)
+{
+ MV_STATUS ret;
+ MV_U8 cmd[MV_SFLASH_SE_CMND_LENGTH];
+
+ MV_U32 secAddr = (secNumber * pFlinfo->sectorSize);
+#if 0
+ MV_U32 i;
+ MV_U32 * pW = (MV_U32*) (secAddr + pFlinfo->baseAddr);
+ MV_U32 erasedWord = 0xFFFFFFFF;
+ MV_U32 wordsPerSector = (pFlinfo->sectorSize / sizeof(MV_U32));
+ MV_BOOL eraseNeeded = MV_FALSE;
+#endif
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* check that the sector number is valid */
+ if (secNumber >= pFlinfo->sectorNumber)
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter sector number!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* we don't want to access SPI in direct mode from in-direct API,
+ becasue of timing issue between CS asserts. */
+#if 0
+ /* First compare to FF and check if erase is needed */
+ for (i=0; i<wordsPerSector; i++)
+ {
+ if (memcmp(pW, &erasedWord, sizeof(MV_U32)) != 0)
+ {
+ eraseNeeded = MV_TRUE;
+ break;
+ }
+
+ ++pW;
+ }
+ if (!eraseNeeded)
+ return MV_OK;
+#endif
+
+ cmd[0] = sflash[pFlinfo->index].opcdSE;
+ cmd[1] = ((secAddr >> 16) & 0xFF);
+ cmd[2] = ((secAddr >> 8) & 0xFF);
+ cmd[3] = (secAddr & 0xFF);
+
+ /* Issue the Write enable command prior the sector erase command */
+ if ((ret = mvWriteEnable(pFlinfo)) != MV_OK)
+ return ret;
+
+ if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_SE_CMND_LENGTH, NULL, 0)) != MV_OK)
+ return ret;
+
+ if ((ret = mvWaitOnWipClear(pFlinfo)) != MV_OK)
+ return ret;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashChipErase - Erasse the whole serial flash
+*
+* DESCRIPTION:
+* Issue the bulk (chip) erase command
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashChipErase (MV_SFLASH_INFO * pFlinfo)
+{
+ MV_STATUS ret;
+ MV_U8 cmd[MV_SFLASH_BE_CMND_LENGTH];
+
+
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ cmd[0] = sflash[pFlinfo->index].opcdBE;
+
+ /* Issue the Write enable command prior the Bulk erase command */
+ if ((ret = mvWriteEnable(pFlinfo)) != MV_OK)
+ return ret;
+
+ if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_BE_CMND_LENGTH, NULL, 0)) != MV_OK)
+ return ret;
+
+ if ((ret = mvWaitOnChipEraseDone(pFlinfo)) != MV_OK)
+ return ret;
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashBlockRd - Read from the serial flash
+*
+* DESCRIPTION:
+* Issue the read command and address then perfom the needed read
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* offset: byte offset with the flash to start reading from
+* pReadBuff: pointer to the buffer to read the data in
+* buffSize: size of the buffer to read.
+*
+* OUTPUT:
+* pReadBuff: pointer to the buffer containing the read data
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashBlockRd (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+ MV_U8* pReadBuff, MV_U32 buffSize)
+{
+ MV_U8 cmd[MV_SFLASH_READ_CMND_LENGTH];
+
+
+ /* check for NULL pointer */
+ if ((pFlinfo == NULL) || (pReadBuff == NULL))
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ cmd[0] = sflash[pFlinfo->index].opcdREAD;
+ cmd[1] = ((offset >> 16) & 0xFF);
+ cmd[2] = ((offset >> 8) & 0xFF);
+ cmd[3] = (offset & 0xFF);
+
+ return mvSpiWriteThenRead(cmd, MV_SFLASH_READ_CMND_LENGTH, pReadBuff, buffSize, 0);
+}
+
+/*******************************************************************************
+* mvSFlashFastBlockRd - Fast read from the serial flash
+*
+* DESCRIPTION:
+* Issue the fast read command and address then perfom the needed read
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* offset: byte offset with the flash to start reading from
+* pReadBuff: pointer to the buffer to read the data in
+* buffSize: size of the buffer to read.
+*
+* OUTPUT:
+* pReadBuff: pointer to the buffer containing the read data
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashFastBlockRd (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+ MV_U8* pReadBuff, MV_U32 buffSize)
+{
+ MV_U8 cmd[MV_SFLASH_READ_CMND_LENGTH];
+ MV_STATUS ret;
+
+ /* check for NULL pointer */
+ if ((pFlinfo == NULL) || (pReadBuff == NULL))
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* Set the SPI frequency to the MAX allowed for fast-read operations */
+ mvOsPrintf("Setting freq to %d.\n",sflash[pFlinfo->index].spiMaxFastFreq);
+ if ((ret = mvSpiBaudRateSet(sflash[pFlinfo->index].spiMaxFastFreq)) != MV_OK)
+ {
+ mvOsPrintf("%s ERROR: Failed to set the SPI fast frequency!\n", __FUNCTION__);
+ return ret;
+ }
+
+ cmd[0] = sflash[pFlinfo->index].opcdFSTRD;
+ cmd[1] = ((offset >> 16) & 0xFF);
+ cmd[2] = ((offset >> 8) & 0xFF);
+ cmd[3] = (offset & 0xFF);
+
+
+ ret = mvSpiWriteThenRead(cmd, MV_SFLASH_READ_CMND_LENGTH, pReadBuff, buffSize,
+ sflash[pFlinfo->index].spiFastRdDummyBytes);
+
+ /* Reset the SPI frequency to the MAX allowed for the device for best performance */
+ if ((ret = mvSpiBaudRateSet(sflash[pFlinfo->index].spiMaxFreq)) != MV_OK)
+ {
+ mvOsPrintf("%s ERROR: Failed to set the SPI frequency!\n", __FUNCTION__);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+/*******************************************************************************
+* mvSFlashBlockWr - Write a buffer with any size
+*
+* DESCRIPTION:
+* write regardless of the page boundaries and size limit per Page
+* program command
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* offset: byte offset within the flash region
+* pWriteBuff: pointer to the buffer holding the data to program
+* buffSize: size of the buffer to write
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashBlockWr (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+ MV_U8* pWriteBuff, MV_U32 buffSize)
+{
+ MV_STATUS ret;
+ MV_U32 data2write = buffSize;
+ MV_U32 preAllOffset = (offset & MV_SFLASH_PAGE_ALLIGN_MASK(MV_M25P_PAGE_SIZE));
+ MV_U32 preAllSz = (preAllOffset ? (MV_M25P_PAGE_SIZE - preAllOffset) : 0);
+ MV_U32 writeOffset = offset;
+
+ /* check for NULL pointer */
+#ifndef CONFIG_MARVELL
+ if(NULL == pWriteBuff)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+#endif
+
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* check that the buffer size does not exceed the flash size */
+ if ((offset + buffSize) > mvSFlashSizeGet(pFlinfo))
+ {
+ DB(mvOsPrintf("%s WARNING: Write exceeds flash size!\n", __FUNCTION__);)
+ return MV_OUT_OF_RANGE;
+ }
+
+ /* check if the total block size is less than the first chunk remainder */
+ if (data2write < preAllSz)
+ preAllSz = data2write;
+
+ /* check if programing does not start at a 64byte alligned offset */
+ if (preAllSz)
+ {
+ if ((ret = mvSFlashPageWr(pFlinfo, writeOffset, pWriteBuff, preAllSz)) != MV_OK)
+ return ret;
+
+ /* increment pointers and counters */
+ writeOffset += preAllSz;
+ data2write -= preAllSz;
+ pWriteBuff += preAllSz;
+ }
+
+ /* program the data that fits in complete page chunks */
+ while (data2write >= sflash[pFlinfo->index].pageSize)
+ {
+ if ((ret = mvSFlashPageWr(pFlinfo, writeOffset, pWriteBuff, sflash[pFlinfo->index].pageSize)) != MV_OK)
+ return ret;
+
+ /* increment pointers and counters */
+ writeOffset += sflash[pFlinfo->index].pageSize;
+ data2write -= sflash[pFlinfo->index].pageSize;
+ pWriteBuff += sflash[pFlinfo->index].pageSize;
+ }
+
+ /* program the last partial chunk */
+ if (data2write)
+ {
+ if ((ret = mvSFlashPageWr(pFlinfo, writeOffset, pWriteBuff, data2write)) != MV_OK)
+ return ret;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashIdGet - Get the manufacturer and device IDs.
+*
+* DESCRIPTION:
+* Get the Manufacturer and device IDs from the serial flash through
+* writing the RDID command then reading 3 bytes of data. In case that
+* this command was called for the first time in order to detect the
+* manufacturer and device IDs, then the default RDID opcode will be used
+* unless the device index is indicated by the user (in case the SPI flash
+* does not use the default RDID opcode).
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* pManId: pointer to the 8bit variable to hold the manufacturing ID
+* pDevId: pointer to the 16bit variable to hold the device ID
+*
+* OUTPUT:
+* pManId: pointer to the 8bit variable holding the manufacturing ID
+* pDevId: pointer to the 16bit variable holding the device ID
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashIdGet (MV_SFLASH_INFO * pFlinfo, MV_U8* pManId, MV_U16* pDevId)
+{
+ MV_STATUS ret;
+ MV_U8 cmd[MV_SFLASH_RDID_CMND_LENGTH];
+ MV_U8 id[MV_SFLASH_RDID_REPLY_LENGTH];
+
+
+
+ /* check for NULL pointer */
+ if ((pFlinfo == NULL) || (pManId == NULL) || (pDevId == NULL))
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ return mvSFlashWithDefaultsIdGet(pFlinfo, pManId, pDevId);
+ else
+ cmd[0] = sflash[pFlinfo->index].opcdRDID;
+
+ if ((ret = mvSpiWriteThenRead(cmd, MV_SFLASH_RDID_CMND_LENGTH, id, MV_SFLASH_RDID_REPLY_LENGTH, 0)) != MV_OK)
+ return ret;
+
+ *pManId = id[0];
+ *pDevId = 0;
+ *pDevId |= (id[1] << 8);
+ *pDevId |= id[2];
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashWpRegionSet - Set the Write-Protected region
+*
+* DESCRIPTION:
+* Set the Write-Protected region
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* wpRegion: which region will be protected
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashWpRegionSet (MV_SFLASH_INFO * pFlinfo, MV_SFLASH_WP_REGION wpRegion)
+{
+ MV_U8 wpMask;
+
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* Check if the chip is an ST flash; then WP supports only 3 bits */
+ if (pFlinfo->manufacturerId == MV_M25PXXX_ST_MANF_ID)
+ {
+ switch (wpRegion)
+ {
+ case MV_WP_NONE:
+ wpMask = MV_M25P_STATUS_BP_NONE;
+ break;
+
+ case MV_WP_UPR_1OF128:
+ DB(mvOsPrintf("%s WARNING: Invaild option for this flash chip!\n", __FUNCTION__);)
+ return MV_NOT_SUPPORTED;
+
+ case MV_WP_UPR_1OF64:
+ wpMask = MV_M25P_STATUS_BP_1_OF_64;
+ break;
+
+ case MV_WP_UPR_1OF32:
+ wpMask = MV_M25P_STATUS_BP_1_OF_32;
+ break;
+
+ case MV_WP_UPR_1OF16:
+ wpMask = MV_M25P_STATUS_BP_1_OF_16;
+ break;
+
+ case MV_WP_UPR_1OF8:
+ wpMask = MV_M25P_STATUS_BP_1_OF_8;
+ break;
+
+ case MV_WP_UPR_1OF4:
+ wpMask = MV_M25P_STATUS_BP_1_OF_4;
+ break;
+
+ case MV_WP_UPR_1OF2:
+ wpMask = MV_M25P_STATUS_BP_1_OF_2;
+ break;
+
+ case MV_WP_ALL:
+ wpMask = MV_M25P_STATUS_BP_ALL;
+ break;
+
+ default:
+ DB(mvOsPrintf("%s WARNING: Invaild parameter WP region!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+ }
+ /* check if the manufacturer is MXIC then the WP is 4bits */
+ else if (pFlinfo->manufacturerId == MV_MXIC_MANF_ID)
+ {
+ switch (wpRegion)
+ {
+ case MV_WP_NONE:
+ wpMask = MV_MX25L_STATUS_BP_NONE;
+ break;
+
+ case MV_WP_UPR_1OF128:
+ wpMask = MV_MX25L_STATUS_BP_1_OF_128;
+ break;
+
+ case MV_WP_UPR_1OF64:
+ wpMask = MV_MX25L_STATUS_BP_1_OF_64;
+ break;
+
+ case MV_WP_UPR_1OF32:
+ wpMask = MV_MX25L_STATUS_BP_1_OF_32;
+ break;
+
+ case MV_WP_UPR_1OF16:
+ wpMask = MV_MX25L_STATUS_BP_1_OF_16;
+ break;
+
+ case MV_WP_UPR_1OF8:
+ wpMask = MV_MX25L_STATUS_BP_1_OF_8;
+ break;
+
+ case MV_WP_UPR_1OF4:
+ wpMask = MV_MX25L_STATUS_BP_1_OF_4;
+ break;
+
+ case MV_WP_UPR_1OF2:
+ wpMask = MV_MX25L_STATUS_BP_1_OF_2;
+ break;
+
+ case MV_WP_ALL:
+ wpMask = MV_MX25L_STATUS_BP_ALL;
+ break;
+
+ default:
+ DB(mvOsPrintf("%s WARNING: Invaild parameter WP region!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+ }
+ /* check if the manufacturer is SPANSION then the WP is 4bits */
+ else if (pFlinfo->manufacturerId == MV_SPANSION_MANF_ID)
+ {
+ switch (wpRegion)
+ {
+ case MV_WP_NONE:
+ wpMask = MV_S25FL_STATUS_BP_NONE;
+ break;
+
+ case MV_WP_UPR_1OF128:
+ DB(mvOsPrintf("%s WARNING: Invaild option for this flash chip!\n", __FUNCTION__);)
+ return MV_NOT_SUPPORTED;
+
+ case MV_WP_UPR_1OF64:
+ wpMask = MV_S25FL_STATUS_BP_1_OF_64;
+ break;
+
+ case MV_WP_UPR_1OF32:
+ wpMask = MV_S25FL_STATUS_BP_1_OF_32;
+ break;
+
+ case MV_WP_UPR_1OF16:
+ wpMask = MV_S25FL_STATUS_BP_1_OF_16;
+ break;
+
+ case MV_WP_UPR_1OF8:
+ wpMask = MV_S25FL_STATUS_BP_1_OF_8;
+ break;
+
+ case MV_WP_UPR_1OF4:
+ wpMask = MV_S25FL_STATUS_BP_1_OF_4;
+ break;
+
+ case MV_WP_UPR_1OF2:
+ wpMask = MV_S25FL_STATUS_BP_1_OF_2;
+ break;
+
+ case MV_WP_ALL:
+ wpMask = MV_S25FL_STATUS_BP_ALL;
+ break;
+
+
+ default:
+ DB(mvOsPrintf("%s WARNING: Invaild parameter WP region!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+ }
+ else
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter Manufacturer ID!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* Verify that the SRWD bit is always set - register is s/w locked */
+ wpMask |= MV_SFLASH_STATUS_REG_SRWD_MASK;
+
+ return mvStatusRegSet(pFlinfo, wpMask);
+}
+
+/*******************************************************************************
+* mvSFlashWpRegionGet - Get the Write-Protected region configured
+*
+* DESCRIPTION:
+* Get from the chip the Write-Protected region configured
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* pWpRegion: pointer to the variable to return the WP region in
+*
+* OUTPUT:
+* wpRegion: pointer to the variable holding the WP region configured
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashWpRegionGet (MV_SFLASH_INFO * pFlinfo, MV_SFLASH_WP_REGION * pWpRegion)
+{
+ MV_STATUS ret;
+ MV_U8 reg;
+
+ /* check for NULL pointer */
+ if ((pFlinfo == NULL) || (pWpRegion == NULL))
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ if ((ret = mvStatusRegGet(pFlinfo, &reg)) != MV_OK)
+ return ret;
+
+ /* Check if the chip is an ST flash; then WP supports only 3 bits */
+ if (pFlinfo->manufacturerId == MV_M25PXXX_ST_MANF_ID)
+ {
+ switch ((reg & MV_M25P_STATUS_REG_WP_MASK))
+ {
+ case MV_M25P_STATUS_BP_NONE:
+ *pWpRegion = MV_WP_NONE;
+ break;
+
+ case MV_M25P_STATUS_BP_1_OF_64:
+ *pWpRegion = MV_WP_UPR_1OF64;
+ break;
+
+ case MV_M25P_STATUS_BP_1_OF_32:
+ *pWpRegion = MV_WP_UPR_1OF32;
+ break;
+
+ case MV_M25P_STATUS_BP_1_OF_16:
+ *pWpRegion = MV_WP_UPR_1OF16;
+ break;
+
+ case MV_M25P_STATUS_BP_1_OF_8:
+ *pWpRegion = MV_WP_UPR_1OF8;
+ break;
+
+ case MV_M25P_STATUS_BP_1_OF_4:
+ *pWpRegion = MV_WP_UPR_1OF4;
+ break;
+
+ case MV_M25P_STATUS_BP_1_OF_2:
+ *pWpRegion = MV_WP_UPR_1OF2;
+ break;
+
+ case MV_M25P_STATUS_BP_ALL:
+ *pWpRegion = MV_WP_ALL;
+ break;
+
+ default:
+ DB(mvOsPrintf("%s WARNING: Unidentified WP region in h/w!\n", __FUNCTION__);)
+ return MV_BAD_VALUE;
+ }
+ }
+ /* check if the manufacturer is MXIC then the WP is 4bits */
+ else if (pFlinfo->manufacturerId == MV_MXIC_MANF_ID)
+ {
+ switch ((reg & MV_MX25L_STATUS_REG_WP_MASK))
+ {
+ case MV_MX25L_STATUS_BP_NONE:
+ *pWpRegion = MV_WP_NONE;
+ break;
+
+ case MV_MX25L_STATUS_BP_1_OF_128:
+ *pWpRegion = MV_WP_UPR_1OF128;
+ break;
+
+ case MV_MX25L_STATUS_BP_1_OF_64:
+ *pWpRegion = MV_WP_UPR_1OF64;
+ break;
+
+ case MV_MX25L_STATUS_BP_1_OF_32:
+ *pWpRegion = MV_WP_UPR_1OF32;
+ break;
+
+ case MV_MX25L_STATUS_BP_1_OF_16:
+ *pWpRegion = MV_WP_UPR_1OF16;
+ break;
+
+ case MV_MX25L_STATUS_BP_1_OF_8:
+ *pWpRegion = MV_WP_UPR_1OF8;
+ break;
+
+ case MV_MX25L_STATUS_BP_1_OF_4:
+ *pWpRegion = MV_WP_UPR_1OF4;
+ break;
+
+ case MV_MX25L_STATUS_BP_1_OF_2:
+ *pWpRegion = MV_WP_UPR_1OF2;
+ break;
+
+ case MV_MX25L_STATUS_BP_ALL:
+ *pWpRegion = MV_WP_ALL;
+ break;
+
+ default:
+ DB(mvOsPrintf("%s WARNING: Unidentified WP region in h/w!\n", __FUNCTION__);)
+ return MV_BAD_VALUE;
+ }
+ }
+ /* Check if the chip is an SPANSION flash; then WP supports only 3 bits */
+ else if (pFlinfo->manufacturerId == MV_SPANSION_MANF_ID)
+ {
+ switch ((reg & MV_S25FL_STATUS_REG_WP_MASK))
+ {
+ case MV_S25FL_STATUS_BP_NONE:
+ *pWpRegion = MV_WP_NONE;
+ break;
+
+ case MV_S25FL_STATUS_BP_1_OF_64:
+ *pWpRegion = MV_WP_UPR_1OF64;
+ break;
+
+ case MV_S25FL_STATUS_BP_1_OF_32:
+ *pWpRegion = MV_WP_UPR_1OF32;
+ break;
+
+ case MV_S25FL_STATUS_BP_1_OF_16:
+ *pWpRegion = MV_WP_UPR_1OF16;
+ break;
+
+ case MV_S25FL_STATUS_BP_1_OF_8:
+ *pWpRegion = MV_WP_UPR_1OF8;
+ break;
+
+ case MV_S25FL_STATUS_BP_1_OF_4:
+ *pWpRegion = MV_WP_UPR_1OF4;
+ break;
+
+ case MV_S25FL_STATUS_BP_1_OF_2:
+ *pWpRegion = MV_WP_UPR_1OF2;
+ break;
+
+ case MV_S25FL_STATUS_BP_ALL:
+ *pWpRegion = MV_WP_ALL;
+ break;
+
+ default:
+ DB(mvOsPrintf("%s WARNING: Unidentified WP region in h/w!\n", __FUNCTION__);)
+ return MV_BAD_VALUE;
+ }
+ }
+ else
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter Manufacturer ID!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashStatRegLock - Lock the status register for writing - W/Vpp
+* pin should be low to take effect
+*
+* DESCRIPTION:
+* Lock the access to the Status Register for writing. This will
+* cause the flash to enter the hardware protection mode if the W/Vpp
+* is low. If the W/Vpp is hi, the chip will be in soft protection mode, but
+* the register will continue to be writable if WREN sequence was used.
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+* srLock: enable/disable (MV_TRUE/MV_FALSE) status registor lock mechanism
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashStatRegLock (MV_SFLASH_INFO * pFlinfo, MV_BOOL srLock)
+{
+ MV_STATUS ret;
+ MV_U8 reg;
+
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ if ((ret = mvStatusRegGet(pFlinfo, &reg)) != MV_OK)
+ return ret;
+
+ if (srLock)
+ reg |= MV_SFLASH_STATUS_REG_SRWD_MASK;
+ else
+ reg &= ~MV_SFLASH_STATUS_REG_SRWD_MASK;
+
+ return mvStatusRegSet(pFlinfo, reg);
+}
+
+/*******************************************************************************
+* mvSFlashSizeGet - Get the size of the SPI flash
+*
+* DESCRIPTION:
+* based on the sector number and size of each sector calculate the total
+* size of the flash memory.
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Size of the flash in bytes.
+*
+*
+*******************************************************************************/
+MV_U32 mvSFlashSizeGet (MV_SFLASH_INFO * pFlinfo)
+{
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return 0;
+ }
+
+ return (pFlinfo->sectorSize * pFlinfo->sectorNumber);
+}
+
+/*******************************************************************************
+* mvSFlashPowerSaveEnter - Cause the falsh device to go into power save mode
+*
+* DESCRIPTION:
+* Enter a special power save mode.
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Size of the flash in bytes.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashPowerSaveEnter(MV_SFLASH_INFO * pFlinfo)
+{
+ MV_STATUS ret;
+ MV_U8 cmd[MV_SFLASH_DP_CMND_LENGTH];
+
+
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return 0;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* check that power save mode is supported in the specific device */
+ if (sflash[pFlinfo->index].opcdPwrSave == MV_SFLASH_NO_SPECIFIC_OPCD)
+ {
+ DB(mvOsPrintf("%s WARNING: Power save not supported for this device!\n", __FUNCTION__);)
+ return MV_NOT_SUPPORTED;
+ }
+
+ cmd[0] = sflash[pFlinfo->index].opcdPwrSave;
+
+ if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_DP_CMND_LENGTH, NULL, 0)) != MV_OK)
+ return ret;
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvSFlashPowerSaveExit - Cause the falsh device to exit the power save mode
+*
+* DESCRIPTION:
+* Exit the deep power save mode.
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Size of the flash in bytes.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashPowerSaveExit (MV_SFLASH_INFO * pFlinfo)
+{
+ MV_STATUS ret;
+ MV_U8 cmd[MV_SFLASH_RES_CMND_LENGTH];
+
+
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return 0;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return MV_BAD_PARAM;
+ }
+
+ /* check that power save mode is supported in the specific device */
+ if (sflash[pFlinfo->index].opcdRES == MV_SFLASH_NO_SPECIFIC_OPCD)
+ {
+ DB(mvOsPrintf("%s WARNING: Read Electronic Signature not supported for this device!\n", __FUNCTION__);)
+ return MV_NOT_SUPPORTED;
+ }
+
+ cmd[0] = sflash[pFlinfo->index].opcdRES;
+
+ if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_RES_CMND_LENGTH, NULL, 0)) != MV_OK)
+ return ret;
+
+ /* add the delay needed for the device to wake up */
+ mvOsDelay(MV_MXIC_DP_EXIT_DELAY); /* 30 ms */
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvSFlashModelGet - Retreive the string with the device manufacturer and model
+*
+* DESCRIPTION:
+* Retreive the string with the device manufacturer and model
+*
+* INPUT:
+* pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* pointer to the string indicating the device manufacturer and model
+*
+*
+*******************************************************************************/
+const MV_8 * mvSFlashModelGet (MV_SFLASH_INFO * pFlinfo)
+{
+ static const MV_8 * unknModel = (const MV_8 *)"Unknown";
+
+ /* check for NULL pointer */
+ if (pFlinfo == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return 0;
+ }
+
+ /* Protection - check if the model was detected */
+ if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+ {
+ DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+ return unknModel;
+ }
+
+ return sflash[pFlinfo->index].deviceModel;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.h
new file mode 100644
index 000000000..f441a5cf5
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.h
@@ -0,0 +1,166 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSFlashH
+#define __INCmvSFlashH
+
+#include "mvTypes.h"
+
+/* MCAROS */
+#define MV_SFLASH_PAGE_ALLIGN_MASK(pgSz) (pgSz-1)
+#define MV_ARRAY_SIZE(a) ((sizeof(a)) / (sizeof(a[0])))
+
+/* Constants */
+#define MV_INVALID_DEVICE_NUMBER 0xFFFFFFFF
+/* 10 MHz is the minimum possible SPI frequency when tclk is set 200MHz*/
+#define MV_SFLASH_BASIC_SPI_FREQ 10000000
+/* enumerations */
+typedef enum
+{
+ MV_WP_NONE, /* Unprotect the whole chip */
+ MV_WP_UPR_1OF128, /* Write protect the upper 1/128 part */
+ MV_WP_UPR_1OF64, /* Write protect the upper 1/64 part */
+ MV_WP_UPR_1OF32, /* Write protect the upper 1/32 part */
+ MV_WP_UPR_1OF16, /* Write protect the upper 1/16 part */
+ MV_WP_UPR_1OF8, /* Write protect the upper 1/8 part */
+ MV_WP_UPR_1OF4, /* Write protect the upper 1/4 part */
+ MV_WP_UPR_1OF2, /* Write protect the upper 1/2 part */
+ MV_WP_ALL /* Write protect the whole chip */
+} MV_SFLASH_WP_REGION;
+
+/* Type Definitions */
+typedef struct
+{
+ MV_U8 opcdWREN; /* Write enable opcode */
+ MV_U8 opcdWRDI; /* Write disable opcode */
+ MV_U8 opcdRDID; /* Read ID opcode */
+ MV_U8 opcdRDSR; /* Read Status Register opcode */
+ MV_U8 opcdWRSR; /* Write Status register opcode */
+ MV_U8 opcdREAD; /* Read opcode */
+ MV_U8 opcdFSTRD; /* Fast Read opcode */
+ MV_U8 opcdPP; /* Page program opcode */
+ MV_U8 opcdSE; /* Sector erase opcode */
+ MV_U8 opcdBE; /* Bulk erase opcode */
+ MV_U8 opcdRES; /* Read electronic signature */
+ MV_U8 opcdPwrSave; /* Go into power save mode */
+ MV_U32 sectorSize; /* Size of each sector */
+ MV_U32 sectorNumber; /* Number of sectors */
+ MV_U32 pageSize; /* size of each page */
+ const char * deviceModel; /* string with the device model */
+ MV_U32 manufacturerId; /* The manufacturer ID */
+ MV_U32 deviceId; /* Device ID */
+ MV_U32 spiMaxFreq; /* The MAX frequency that can be used with the device */
+ MV_U32 spiMaxFastFreq; /* The MAX frequency that can be used with the device for fast reads */
+ MV_U32 spiFastRdDummyBytes; /* Number of dumy bytes to read before real data when working in fast read mode. */
+} MV_SFLASH_DEVICE_PARAMS;
+
+typedef struct
+{
+ MV_U32 baseAddr; /* Flash Base Address used in fast mode */
+ MV_U8 manufacturerId; /* Manufacturer ID */
+ MV_U16 deviceId; /* Device ID */
+ MV_U32 sectorSize; /* Size of each sector - all the same */
+ MV_U32 sectorNumber; /* Number of sectors */
+ MV_U32 pageSize; /* Page size - affect allignment */
+ MV_U32 index; /* index of the device in the sflash table (internal parameter) */
+} MV_SFLASH_INFO;
+
+/* Function Prototypes */
+/* Init */
+MV_STATUS mvSFlashInit (MV_SFLASH_INFO * pFlinfo);
+
+/* erase */
+MV_STATUS mvSFlashSectorErase (MV_SFLASH_INFO * pFlinfo, MV_U32 secNumber);
+MV_STATUS mvSFlashChipErase (MV_SFLASH_INFO * pFlinfo);
+
+/* Read */
+MV_STATUS mvSFlashBlockRd (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+ MV_U8* pReadBuff, MV_U32 buffSize);
+MV_STATUS mvSFlashFastBlockRd (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+ MV_U8* pReadBuff, MV_U32 buffSize);
+
+/* write regardless of the page boundaries and size limit per Page program command */
+MV_STATUS mvSFlashBlockWr (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+ MV_U8* pWriteBuff, MV_U32 buffSize);
+/* Get IDs */
+MV_STATUS mvSFlashIdGet (MV_SFLASH_INFO * pFlinfo, MV_U8* pManId, MV_U16* pDevId);
+
+/* Set and Get the Write Protection region - if the Status register is not locked */
+MV_STATUS mvSFlashWpRegionSet (MV_SFLASH_INFO * pFlinfo, MV_SFLASH_WP_REGION wpRegion);
+MV_STATUS mvSFlashWpRegionGet (MV_SFLASH_INFO * pFlinfo, MV_SFLASH_WP_REGION * pWpRegion);
+
+/* Lock the status register for writing - W/Vpp pin should be low to take effect */
+MV_STATUS mvSFlashStatRegLock (MV_SFLASH_INFO * pFlinfo, MV_BOOL srLock);
+
+/* Get the regions sizes */
+MV_U32 mvSFlashSizeGet (MV_SFLASH_INFO * pFlinfo);
+
+/* Cause the falsh device to go into power save mode */
+MV_STATUS mvSFlashPowerSaveEnter(MV_SFLASH_INFO * pFlinfo);
+MV_STATUS mvSFlashPowerSaveExit (MV_SFLASH_INFO * pFlinfo);
+
+/* Retreive the string with the device manufacturer and model */
+const MV_8 * mvSFlashModelGet (MV_SFLASH_INFO * pFlinfo);
+
+#endif /* __INCmvSFlashH */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlashSpec.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlashSpec.h
new file mode 100644
index 000000000..eeb44262e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlashSpec.h
@@ -0,0 +1,233 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSFlashSpecH
+#define __INCmvSFlashSpecH
+
+/* Constants */
+#define MV_SFLASH_READ_CMND_LENGTH 4 /* 1B opcode + 3B address */
+#define MV_SFLASH_SE_CMND_LENGTH 4 /* 1B opcode + 3B address */
+#define MV_SFLASH_BE_CMND_LENGTH 1 /* 1B opcode */
+#define MV_SFLASH_PP_CMND_LENGTH 4 /* 1B opcode + 3B address */
+#define MV_SFLASH_WREN_CMND_LENGTH 1 /* 1B opcode */
+#define MV_SFLASH_WRDI_CMND_LENGTH 1 /* 1B opcode */
+#define MV_SFLASH_RDID_CMND_LENGTH 1 /* 1B opcode */
+#define MV_SFLASH_RDID_REPLY_LENGTH 3 /* 1B manf ID and 2B device ID */
+#define MV_SFLASH_RDSR_CMND_LENGTH 1 /* 1B opcode */
+#define MV_SFLASH_RDSR_REPLY_LENGTH 1 /* 1B status */
+#define MV_SFLASH_WRSR_CMND_LENGTH 2 /* 1B opcode + 1B status value */
+#define MV_SFLASH_DP_CMND_LENGTH 1 /* 1B opcode */
+#define MV_SFLASH_RES_CMND_LENGTH 1 /* 1B opcode */
+
+/* Status Register Bit Masks */
+#define MV_SFLASH_STATUS_REG_WIP_OFFSET 0 /* bit 0; write in progress */
+#define MV_SFLASH_STATUS_REG_WP_OFFSET 2 /* bit 2-4; write protect option */
+#define MV_SFLASH_STATUS_REG_SRWD_OFFSET 7 /* bit 7; lock status register write */
+#define MV_SFLASH_STATUS_REG_WIP_MASK (0x1 << MV_SFLASH_STATUS_REG_WIP_OFFSET)
+#define MV_SFLASH_STATUS_REG_SRWD_MASK (0x1 << MV_SFLASH_STATUS_REG_SRWD_OFFSET)
+
+#define MV_SFLASH_MAX_WAIT_LOOP 1000000
+#define MV_SFLASH_CHIP_ERASE_MAX_WAIT_LOOP 0x50000000
+
+#define MV_SFLASH_DEFAULT_RDID_OPCD 0x9F /* Default Read ID */
+#define MV_SFLASH_DEFAULT_WREN_OPCD 0x06 /* Default Write Enable */
+#define MV_SFLASH_NO_SPECIFIC_OPCD 0x00
+
+/********************************/
+/* ST M25Pxxx Device Specific */
+/********************************/
+
+/* Manufacturer IDs and Device IDs for SFLASHs supported by the driver */
+#define MV_M25PXXX_ST_MANF_ID 0x20
+#define MV_M25P32_DEVICE_ID 0x2016
+#define MV_M25P32_MAX_SPI_FREQ 20000000 /* 20MHz */
+#define MV_M25P32_MAX_FAST_SPI_FREQ 50000000 /* 50MHz */
+#define MV_M25P32_FAST_READ_DUMMY_BYTES 1
+#define MV_M25P64_DEVICE_ID 0x2017
+#define MV_M25P64_MAX_SPI_FREQ 20000000 /* 20MHz */
+#define MV_M25P64_MAX_FAST_SPI_FREQ 50000000 /* 50MHz */
+#define MV_M25P64_FAST_READ_DUMMY_BYTES 1
+#define MV_M25P128_DEVICE_ID 0x2018
+#define MV_M25P128_MAX_SPI_FREQ 20000000 /* 20MHz */
+#define MV_M25P128_MAX_FAST_SPI_FREQ 50000000 /* 50MHz */
+#define MV_M25P128_FAST_READ_DUMMY_BYTES 1
+
+
+/* Sector Sizes and population per device model*/
+#define MV_M25P32_SECTOR_SIZE 0x10000 /* 64K */
+#define MV_M25P64_SECTOR_SIZE 0x10000 /* 64K */
+#define MV_M25P128_SECTOR_SIZE 0x40000 /* 256K */
+#define MV_M25P32_SECTOR_NUMBER 64
+#define MV_M25P64_SECTOR_NUMBER 128
+#define MV_M25P128_SECTOR_NUMBER 64
+#define MV_M25P_PAGE_SIZE 0x100 /* 256 byte */
+
+#define MV_M25P_WREN_CMND_OPCD 0x06 /* Write Enable */
+#define MV_M25P_WRDI_CMND_OPCD 0x04 /* Write Disable */
+#define MV_M25P_RDID_CMND_OPCD 0x9F /* Read ID */
+#define MV_M25P_RDSR_CMND_OPCD 0x05 /* Read Status Register */
+#define MV_M25P_WRSR_CMND_OPCD 0x01 /* Write Status Register */
+#define MV_M25P_READ_CMND_OPCD 0x03 /* Sequential Read */
+#define MV_M25P_FAST_RD_CMND_OPCD 0x0B /* Fast Read */
+#define MV_M25P_PP_CMND_OPCD 0x02 /* Page Program */
+#define MV_M25P_SE_CMND_OPCD 0xD8 /* Sector Erase */
+#define MV_M25P_BE_CMND_OPCD 0xC7 /* Bulk Erase */
+#define MV_M25P_RES_CMND_OPCD 0xAB /* Read Electronic Signature */
+
+/* Status Register Write Protect Bit Masks - 3bits */
+#define MV_M25P_STATUS_REG_WP_MASK (0x07 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_M25P_STATUS_BP_NONE (0x00 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_M25P_STATUS_BP_1_OF_64 (0x01 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_M25P_STATUS_BP_1_OF_32 (0x02 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_M25P_STATUS_BP_1_OF_16 (0x03 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_M25P_STATUS_BP_1_OF_8 (0x04 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_M25P_STATUS_BP_1_OF_4 (0x05 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_M25P_STATUS_BP_1_OF_2 (0x06 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_M25P_STATUS_BP_ALL (0x07 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+
+/************************************/
+/* MXIC MX25L6405 Device Specific */
+/************************************/
+
+/* Manufacturer IDs and Device IDs for SFLASHs supported by the driver */
+#define MV_MXIC_MANF_ID 0xC2
+#define MV_MX25L6405_DEVICE_ID 0x2017
+#define MV_MX25L6405_MAX_SPI_FREQ 20000000 /* 20MHz */
+#define MV_MX25L6405_MAX_FAST_SPI_FREQ 50000000 /* 50MHz */
+#define MV_MX25L6405_FAST_READ_DUMMY_BYTES 1
+#define MV_MXIC_DP_EXIT_DELAY 30 /* 30 ms */
+
+/* Sector Sizes and population per device model*/
+#define MV_MX25L6405_SECTOR_SIZE 0x10000 /* 64K */
+#define MV_MX25L6405_SECTOR_NUMBER 128
+#define MV_MXIC_PAGE_SIZE 0x100 /* 256 byte */
+
+#define MV_MX25L_WREN_CMND_OPCD 0x06 /* Write Enable */
+#define MV_MX25L_WRDI_CMND_OPCD 0x04 /* Write Disable */
+#define MV_MX25L_RDID_CMND_OPCD 0x9F /* Read ID */
+#define MV_MX25L_RDSR_CMND_OPCD 0x05 /* Read Status Register */
+#define MV_MX25L_WRSR_CMND_OPCD 0x01 /* Write Status Register */
+#define MV_MX25L_READ_CMND_OPCD 0x03 /* Sequential Read */
+#define MV_MX25L_FAST_RD_CMND_OPCD 0x0B /* Fast Read */
+#define MV_MX25L_PP_CMND_OPCD 0x02 /* Page Program */
+#define MV_MX25L_SE_CMND_OPCD 0xD8 /* Sector Erase */
+#define MV_MX25L_BE_CMND_OPCD 0xC7 /* Bulk Erase */
+#define MV_MX25L_DP_CMND_OPCD 0xB9 /* Deep Power Down */
+#define MV_MX25L_RES_CMND_OPCD 0xAB /* Read Electronic Signature */
+
+/* Status Register Write Protect Bit Masks - 4bits */
+#define MV_MX25L_STATUS_REG_WP_MASK (0x0F << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_NONE (0x00 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_1_OF_128 (0x01 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_1_OF_64 (0x02 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_1_OF_32 (0x03 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_1_OF_16 (0x04 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_1_OF_8 (0x05 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_1_OF_4 (0x06 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_1_OF_2 (0x07 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_MX25L_STATUS_BP_ALL (0x0F << MV_SFLASH_STATUS_REG_WP_OFFSET)
+
+/************************************/
+/* SPANSION S25FL128P Device Specific */
+/************************************/
+
+/* Manufacturer IDs and Device IDs for SFLASHs supported by the driver */
+#define MV_SPANSION_MANF_ID 0x01
+#define MV_S25FL128_DEVICE_ID 0x2018
+#define MV_S25FL128_MAX_SPI_FREQ 33000000 /* 33MHz */
+#define MV_S25FL128_MAX_FAST_SPI_FREQ 104000000 /* 104MHz */
+#define MV_S25FL128_FAST_READ_DUMMY_BYTES 1
+
+/* Sector Sizes and population per device model*/
+#define MV_S25FL128_SECTOR_SIZE 0x40000 /* 256K */
+#define MV_S25FL128_SECTOR_NUMBER 64
+#define MV_S25FL_PAGE_SIZE 0x100 /* 256 byte */
+
+#define MV_S25FL_WREN_CMND_OPCD 0x06 /* Write Enable */
+#define MV_S25FL_WRDI_CMND_OPCD 0x04 /* Write Disable */
+#define MV_S25FL_RDID_CMND_OPCD 0x9F /* Read ID */
+#define MV_S25FL_RDSR_CMND_OPCD 0x05 /* Read Status Register */
+#define MV_S25FL_WRSR_CMND_OPCD 0x01 /* Write Status Register */
+#define MV_S25FL_READ_CMND_OPCD 0x03 /* Sequential Read */
+#define MV_S25FL_FAST_RD_CMND_OPCD 0x0B /* Fast Read */
+#define MV_S25FL_PP_CMND_OPCD 0x02 /* Page Program */
+#define MV_S25FL_SE_CMND_OPCD 0xD8 /* Sector Erase */
+#define MV_S25FL_BE_CMND_OPCD 0xC7 /* Bulk Erase */
+#define MV_S25FL_DP_CMND_OPCD 0xB9 /* Deep Power Down */
+#define MV_S25FL_RES_CMND_OPCD 0xAB /* Read Electronic Signature */
+
+/* Status Register Write Protect Bit Masks - 4bits */
+#define MV_S25FL_STATUS_REG_WP_MASK (0x0F << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_NONE (0x00 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_1_OF_128 (0x01 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_1_OF_64 (0x02 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_1_OF_32 (0x03 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_1_OF_16 (0x04 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_1_OF_8 (0x05 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_1_OF_4 (0x06 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_1_OF_2 (0x07 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define MV_S25FL_STATUS_BP_ALL (0x0F << MV_SFLASH_STATUS_REG_WP_OFFSET)
+
+#endif /* __INCmvSFlashSpecH */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvCompVer.txt
new file mode 100644
index 000000000..85bfa612c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.3
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c
new file mode 100644
index 000000000..39e0b720d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c
@@ -0,0 +1,576 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "spi/mvSpi.h"
+#include "spi/mvSpiSpec.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+#define DB(x) x
+#define mvOsPrintf printf
+#else
+#define DB(x)
+#endif
+
+
+/*******************************************************************************
+* mvSpi16bitDataTxRx - Transmt and receive data
+*
+* DESCRIPTION:
+* Tx data and block waiting for data to be transmitted
+*
+********************************************************************************/
+static MV_STATUS mvSpi16bitDataTxRx (MV_U16 txData, MV_U16 * pRxData)
+{
+ MV_U32 i;
+ MV_BOOL ready = MV_FALSE;
+
+ /* First clear the bit in the interrupt cause register */
+ MV_REG_WRITE(MV_SPI_INT_CAUSE_REG, 0x0);
+
+ /* Transmit data */
+ MV_REG_WRITE(MV_SPI_DATA_OUT_REG, MV_16BIT_LE(txData));
+
+ /* wait with timeout for memory ready */
+ for (i=0; i<MV_SPI_WAIT_RDY_MAX_LOOP; i++)
+ {
+ if (MV_REG_READ(MV_SPI_INT_CAUSE_REG))
+ {
+ ready = MV_TRUE;
+ break;
+ }
+#ifdef MV_SPI_SLEEP_ON_WAIT
+ mvOsSleep(1);
+#endif /* MV_SPI_SLEEP_ON_WAIT */
+ }
+
+ if (!ready)
+ return MV_TIMEOUT;
+
+ /* check that the RX data is needed */
+ if (pRxData)
+ {
+ if ((MV_U32)pRxData & 0x1) /* check if address is not alligned to 16bit */
+ {
+#if defined(MV_CPU_LE)
+ /* perform the data write to the buffer in two stages with 8bit each */
+ MV_U8 * bptr = (MV_U8*)pRxData;
+ MV_U16 data = MV_16BIT_LE(MV_REG_READ(MV_SPI_DATA_IN_REG));
+ *bptr = (data & 0xFF);
+ ++bptr;
+ *bptr = ((data >> 8) & 0xFF);
+
+#elif defined(MV_CPU_BE)
+
+ /* perform the data write to the buffer in two stages with 8bit each */
+ MV_U8 * bptr = (MV_U8 *)pRxData;
+ MV_U16 data = MV_16BIT_LE(MV_REG_READ(MV_SPI_DATA_IN_REG));
+ *bptr = ((data >> 8) & 0xFF);
+ ++bptr;
+ *bptr = (data & 0xFF);
+
+#else
+ #error "CPU endianess isn't defined!\n"
+#endif
+
+ }
+ else
+ *pRxData = MV_16BIT_LE(MV_REG_READ(MV_SPI_DATA_IN_REG));
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvSpi8bitDataTxRx - Transmt and receive data (8bits)
+*
+* DESCRIPTION:
+* Tx data and block waiting for data to be transmitted
+*
+********************************************************************************/
+static MV_STATUS mvSpi8bitDataTxRx (MV_U8 txData, MV_U8 * pRxData)
+{
+ MV_U32 i;
+ MV_BOOL ready = MV_FALSE;
+
+ /* First clear the bit in the interrupt cause register */
+ MV_REG_WRITE(MV_SPI_INT_CAUSE_REG, 0x0);
+
+ /* Transmit data */
+ MV_REG_WRITE(MV_SPI_DATA_OUT_REG, txData);
+
+ /* wait with timeout for memory ready */
+ for (i=0; i<MV_SPI_WAIT_RDY_MAX_LOOP; i++)
+ {
+ if (MV_REG_READ(MV_SPI_INT_CAUSE_REG))
+ {
+ ready = MV_TRUE;
+ break;
+ }
+#ifdef MV_SPI_SLEEP_ON_WAIT
+ mvOsSleep(1);
+#endif /* MV_SPI_SLEEP_ON_WAIT */
+ }
+
+ if (!ready)
+ return MV_TIMEOUT;
+
+ /* check that the RX data is needed */
+ if (pRxData)
+ *pRxData = MV_REG_READ(MV_SPI_DATA_IN_REG);
+
+ return MV_OK;
+}
+
+/*
+#####################################################################################
+#####################################################################################
+*/
+
+/*******************************************************************************
+* mvSpiInit - Initialize the SPI controller
+*
+* DESCRIPTION:
+* Perform the neccessary initialization in order to be able to send an
+* receive over the SPI interface.
+*
+* INPUT:
+* serialBaudRate: Baud rate (SPI clock frequency)
+* use16BitMode: Whether to use 2bytes (MV_TRUE) or 1bytes (MV_FALSE)
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiInit (MV_U32 serialBaudRate)
+{
+ MV_STATUS ret;
+
+ /* Set the serial clock */
+ if ((ret = mvSpiBaudRateSet(serialBaudRate)) != MV_OK)
+ return ret;
+
+ /* For devices in which the SPI is muxed on the MPP with other interfaces*/
+ mvMPPConfigToSPI();
+
+ /* Configure the default SPI mode to be 16bit */
+ MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+ /* Fix ac timing on SPI in 6183, 6183L and 78x00 only */
+ if ( (mvCtrlModelGet() == MV_6183_DEV_ID) ||
+ (mvCtrlModelGet() == MV_6183L_DEV_ID) ||
+ (mvCtrlModelGet() == MV_78100_DEV_ID) ||
+ (mvCtrlModelGet() == MV_78200_DEV_ID) ||
+ (mvCtrlModelGet() == MV_76100_DEV_ID))
+ MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, BIT14);
+
+ /* Verify that the CS is deasserted */
+ mvSpiCsDeassert();
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSpiBaudRateSet - Set the Frequency of the SPI clock
+*
+* DESCRIPTION:
+* Set the Prescale bits to adapt to the requested baud rate (the clock
+* used for thr SPI).
+*
+* INPUT:
+* serialBaudRate: Baud rate (SPI clock frequency)
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiBaudRateSet (MV_U32 serialBaudRate)
+{
+ MV_U8 i;
+ /* MV_U8 preScale[32] = {1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 2, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30};
+ */
+ MV_U8 preScale[14] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30};
+ MV_U8 bestPrescaleIndx = 100;
+ MV_U32 minBaudOffset = 0xFFFFFFFF;
+ MV_U32 cpuClk = mvBoardTclkGet(); /*mvCpuPclkGet();*/
+ MV_U32 tempReg;
+
+ /* Find the best prescale configuration - less or equal */
+ for (i=0; i<14; i++)
+ {
+ /* check for higher - irrelevent */
+ if ((cpuClk / preScale[i]) > serialBaudRate)
+ continue;
+
+ /* check for exact fit */
+ if ((cpuClk / preScale[i]) == serialBaudRate)
+ {
+ bestPrescaleIndx = i;
+ break;
+ }
+
+ /* check if this is better than the previous one */
+ if ((serialBaudRate - (cpuClk / preScale[i])) < minBaudOffset)
+ {
+ minBaudOffset = (serialBaudRate - (cpuClk / preScale[i]));
+ bestPrescaleIndx = i;
+ }
+ }
+
+ if (bestPrescaleIndx > 14)
+ {
+ mvOsPrintf("%s ERROR: SPI baud rate prescale error!\n", __FUNCTION__);
+ return MV_OUT_OF_RANGE;
+ }
+
+ /* configure the Prescale */
+ tempReg = MV_REG_READ(MV_SPI_IF_CONFIG_REG);
+ tempReg = ((tempReg & ~MV_SPI_CLK_PRESCALE_MASK) | (bestPrescaleIndx + 0x12));
+ MV_REG_WRITE(MV_SPI_IF_CONFIG_REG, tempReg);
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSpiCsAssert - Assert the Chip Select pin indicating a new transfer
+*
+* DESCRIPTION:
+* Assert The chip select - used to select an external SPI device
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Success or Error code.
+*
+********************************************************************************/
+MV_VOID mvSpiCsAssert(MV_VOID)
+{
+ /* For devices in which the SPI is muxed on the MPP with other interfaces*/
+ mvMPPConfigToSPI();
+ mvOsUDelay(1);
+ MV_REG_BIT_SET(MV_SPI_IF_CTRL_REG, MV_SPI_CS_ENABLE_MASK);
+}
+
+/*******************************************************************************
+* mvSpiCsDeassert - DeAssert the Chip Select pin indicating the end of a
+* SPI transfer sequence
+*
+* DESCRIPTION:
+* DeAssert the chip select pin
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Success or Error code.
+*
+********************************************************************************/
+MV_VOID mvSpiCsDeassert(MV_VOID)
+{
+ MV_REG_BIT_RESET(MV_SPI_IF_CTRL_REG, MV_SPI_CS_ENABLE_MASK);
+
+ /* For devices in which the SPI is muxed on the MPP with other interfaces*/
+ mvMPPConfigToDefault();
+}
+
+/*******************************************************************************
+* mvSpiRead - Read a buffer over the SPI interface
+*
+* DESCRIPTION:
+* Receive (read) a buffer over the SPI interface in 16bit chunks. If the
+* buffer size is odd, then the last chunk will be 8bits. Chip select is not
+* handled at this level.
+*
+* INPUT:
+* pRxBuff: Pointer to the buffer to hold the received data
+* buffSize: length of the pRxBuff
+*
+* OUTPUT:
+* pRxBuff: Pointer to the buffer with the received data
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiRead (MV_U8* pRxBuff, MV_U32 buffSize)
+{
+ MV_STATUS ret;
+ MV_U32 bytesLeft = buffSize;
+ MV_U16* rxPtr = (MV_U16*)pRxBuff;
+
+ /* check for null parameters */
+ if (pRxBuff == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check that the buffer pointer and the buffer size are 16bit aligned */
+ if ((((MV_U32)buffSize & 1) == 0) && (((MV_U32)pRxBuff & 1) == 0))
+ {
+ /* Verify that the SPI mode is in 16bit mode */
+ MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+ /* TX/RX as long we have complete 16bit chunks */
+ while (bytesLeft >= MV_SPI_16_BIT_CHUNK_SIZE)
+ {
+ /* Transmitted and wait for the transfer to be completed */
+ if ((ret = mvSpi16bitDataTxRx(MV_SPI_DUMMY_WRITE_16BITS, rxPtr)) != MV_OK)
+ return ret;
+
+ /* increment the pointers */
+ rxPtr++;
+ bytesLeft -= MV_SPI_16_BIT_CHUNK_SIZE;
+ }
+
+ }
+ else
+ {
+ /* Verify that the SPI mode is in 8bit mode */
+ MV_REG_BIT_RESET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+ /* TX/RX in 8bit chanks */
+ while (bytesLeft > 0)
+ {
+ /* Transmitted and wait for the transfer to be completed */
+ if ((ret = mvSpi8bitDataTxRx(MV_SPI_DUMMY_WRITE_8BITS, pRxBuff)) != MV_OK)
+ return ret;
+ /* increment the pointers */
+ pRxBuff++;
+ bytesLeft--;
+ }
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvSpiWrite - Transmit a buffer over the SPI interface
+*
+* DESCRIPTION:
+* Transmit a buffer over the SPI interface in 16bit chunks. If the
+* buffer size is odd, then the last chunk will be 8bits. No chip select
+* action is taken.
+*
+* INPUT:
+* pTxBuff: Pointer to the buffer holding the TX data
+* buffSize: length of the pTxBuff
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiWrite(MV_U8* pTxBuff, MV_U32 buffSize)
+{
+ MV_STATUS ret;
+ MV_U32 bytesLeft = buffSize;
+ MV_U16* txPtr = (MV_U16*)pTxBuff;
+
+ /* check for null parameters */
+ if (pTxBuff == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check that the buffer pointer and the buffer size are 16bit aligned */
+ if ((((MV_U32)buffSize & 1) == 0) && (((MV_U32)pTxBuff & 1) == 0))
+ {
+ /* Verify that the SPI mode is in 16bit mode */
+ MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+ /* TX/RX as long we have complete 16bit chunks */
+ while (bytesLeft >= MV_SPI_16_BIT_CHUNK_SIZE)
+ {
+ /* Transmitted and wait for the transfer to be completed */
+ if ((ret = mvSpi16bitDataTxRx(*txPtr, NULL)) != MV_OK)
+ return ret;
+
+ /* increment the pointers */
+ txPtr++;
+ bytesLeft -= MV_SPI_16_BIT_CHUNK_SIZE;
+ }
+ }
+ else
+ {
+
+ /* Verify that the SPI mode is in 8bit mode */
+ MV_REG_BIT_RESET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+ /* TX/RX in 8bit chanks */
+ while (bytesLeft > 0)
+ {
+ /* Transmitted and wait for the transfer to be completed */
+ if ((ret = mvSpi8bitDataTxRx(*pTxBuff, NULL)) != MV_OK)
+ return ret;
+
+ /* increment the pointers */
+ pTxBuff++;
+ bytesLeft--;
+ }
+ }
+
+ return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvSpiReadWrite - Read and Write a buffer simultanuosely
+*
+* DESCRIPTION:
+* Transmit and receive a buffer over the SPI in 16bit chunks. If the
+* buffer size is odd, then the last chunk will be 8bits. The SPI chip
+* select is not handled implicitely.
+*
+* INPUT:
+* pRxBuff: Pointer to the buffer to write the RX info in
+* pTxBuff: Pointer to the buffer holding the TX info
+* buffSize: length of both the pTxBuff and pRxBuff
+*
+* OUTPUT:
+* pRxBuff: Pointer of the buffer holding the RX data
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiReadWrite(MV_U8* pRxBuff, MV_U8* pTxBuff, MV_U32 buffSize)
+{
+ MV_STATUS ret;
+ MV_U32 bytesLeft = buffSize;
+ MV_U16* txPtr = (MV_U16*)pTxBuff;
+ MV_U16* rxPtr = (MV_U16*)pRxBuff;
+
+ /* check for null parameters */
+ if ((pRxBuff == NULL) || (pTxBuff == NULL))
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* Check that the buffer pointer and the buffer size are 16bit aligned */
+ if ((((MV_U32)buffSize & 1) == 0) && (((MV_U32)pTxBuff & 1) == 0) && (((MV_U32)pRxBuff & 1) == 0))
+ {
+ /* Verify that the SPI mode is in 16bit mode */
+ MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+ /* TX/RX as long we have complete 16bit chunks */
+ while (bytesLeft >= MV_SPI_16_BIT_CHUNK_SIZE)
+ {
+ /* Transmitted and wait for the transfer to be completed */
+ if ((ret = mvSpi16bitDataTxRx(*txPtr, rxPtr)) != MV_OK)
+ return ret;
+
+ /* increment the pointers */
+ txPtr++;
+ rxPtr++;
+ bytesLeft -= MV_SPI_16_BIT_CHUNK_SIZE;
+ }
+ }
+ else
+ {
+ /* Verify that the SPI mode is in 8bit mode */
+ MV_REG_BIT_RESET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+ /* TX/RX in 8bit chanks */
+ while (bytesLeft > 0)
+ {
+ /* Transmitted and wait for the transfer to be completed */
+ if ( (ret = mvSpi8bitDataTxRx(*pTxBuff, pRxBuff) ) != MV_OK)
+ return ret;
+ pRxBuff++;
+ pTxBuff++;
+ bytesLeft--;
+ }
+ }
+
+ return MV_OK;
+}
+
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h
new file mode 100644
index 000000000..74859f03e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h
@@ -0,0 +1,94 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSpihH
+#define __INCmvSpihH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+/* Function Prototypes */
+/* Init */
+MV_STATUS mvSpiInit (MV_U32 serialBaudRate);
+
+/* Set the Frequency of the Spi clock */
+MV_STATUS mvSpiBaudRateSet(MV_U32 serialBaudRate);
+
+/* Assert the SPI chip select */
+MV_VOID mvSpiCsAssert (MV_VOID);
+
+/* De-assert the SPI chip select */
+MV_VOID mvSpiCsDeassert (MV_VOID);
+
+/* Simultanuous Read and write */
+MV_STATUS mvSpiReadWrite (MV_U8* pRxBuff, MV_U8* pTxBuff, MV_U32 buffSize);
+
+/* serialize a buffer on the TX line - Rx is ignored */
+MV_STATUS mvSpiWrite (MV_U8* pTxBuff, MV_U32 buffSize);
+
+/* read from the RX line by writing dummy values to the TX line */
+MV_STATUS mvSpiRead (MV_U8* pRxBuff, MV_U32 buffSize);
+
+#endif /* __INCmvSpihH */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c
new file mode 100644
index 000000000..a5d5a6478
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c
@@ -0,0 +1,249 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "spi/mvSpi.h"
+#include "spi/mvSpiSpec.h"
+
+/*#define MV_DEBUG*/
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+
+/*******************************************************************************
+* mvSpiReadAndWrite - Read and Write a buffer simultanuousely
+*
+* DESCRIPTION:
+* Transmit and receive a buffer over the SPI in 16bit chunks. If the
+* buffer size is odd, then the last chunk will be 8bits.
+*
+* INPUT:
+* pRxBuff: Pointer to the buffer to write the RX info in
+* pTxBuff: Pointer to the buffer holding the TX info
+* buffSize: length of both the pTxBuff and pRxBuff
+*
+* OUTPUT:
+* pRxBuff: Pointer of the buffer holding the RX data
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiReadAndWrite(MV_U8* pRxBuff, MV_U8* pTxBuff, MV_U32 buffSize)
+{
+ MV_STATUS ret;
+
+ /* check for null parameters */
+ if ((pRxBuff == NULL) || (pTxBuff == NULL) || (buffSize == 0))
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* First assert the chip select */
+ mvSpiCsAssert();
+
+ ret = mvSpiReadWrite(pRxBuff, pTxBuff, buffSize);
+
+ /* Finally deassert the chip select */
+ mvSpiCsDeassert();
+
+ return ret;
+}
+
+/*******************************************************************************
+* mvSpiWriteThenWrite - Serialize a command followed by the data over the TX line
+*
+* DESCRIPTION:
+* Assert the chip select line. Transmit the command buffer followed by
+* the data buffer. Then deassert the CS line.
+*
+* INPUT:
+* pCmndBuff: Pointer to the command buffer to transmit
+* cmndSize: length of the command size
+* pTxDataBuff: Pointer to the data buffer to transmit
+* txDataSize: length of the data buffer
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiWriteThenWrite (MV_U8* pCmndBuff, MV_U32 cmndSize, MV_U8* pTxDataBuff,
+ MV_U32 txDataSize)
+{
+ MV_STATUS ret = MV_OK, tempRet;
+
+ /* check for null parameters */
+#ifndef CONFIG_MARVELL
+ if(NULL == pTxDataBuff)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+#endif
+
+ if (pCmndBuff == NULL)
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* First assert the chip select */
+ mvSpiCsAssert();
+
+ /* first write the command */
+ if ((cmndSize) && (pCmndBuff != NULL))
+ {
+ if ((tempRet = mvSpiWrite(pCmndBuff, cmndSize)) != MV_OK)
+ ret = tempRet;
+ }
+
+ /* Then write the data buffer */
+#ifndef CONFIG_MARVELL
+ if (txDataSize)
+#else
+ if ((txDataSize) && (pTxDataBuff != NULL))
+#endif
+ {
+ if ((tempRet = mvSpiWrite(pTxDataBuff, txDataSize)) != MV_OK)
+ ret = tempRet;
+ }
+
+ /* Finally deassert the chip select */
+ mvSpiCsDeassert();
+
+ return ret;
+}
+
+/*******************************************************************************
+* mvSpiWriteThenRead - Serialize a command then read a data buffer
+*
+* DESCRIPTION:
+* Assert the chip select line. Transmit the command buffer then read
+* the data buffer. Then deassert the CS line.
+*
+* INPUT:
+* pCmndBuff: Pointer to the command buffer to transmit
+* cmndSize: length of the command size
+* pRxDataBuff: Pointer to the buffer to read the data in
+* txDataSize: length of the data buffer
+*
+* OUTPUT:
+* pRxDataBuff: Pointer to the buffer holding the data
+*
+* RETURN:
+* Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiWriteThenRead (MV_U8* pCmndBuff, MV_U32 cmndSize, MV_U8* pRxDataBuff,
+ MV_U32 rxDataSize,MV_U32 dummyBytesToRead)
+{
+ MV_STATUS ret = MV_OK, tempRet;
+ MV_U8 dummyByte;
+
+ /* check for null parameters */
+ if ((pCmndBuff == NULL) && (pRxDataBuff == NULL))
+ {
+ mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+ return MV_BAD_PARAM;
+ }
+
+ /* First assert the chip select */
+ mvSpiCsAssert();
+
+ /* first write the command */
+ if ((cmndSize) && (pCmndBuff != NULL))
+ {
+ if ((tempRet = mvSpiWrite(pCmndBuff, cmndSize)) != MV_OK)
+ ret = tempRet;
+ }
+
+ /* Read dummy bytes before real data. */
+ while(dummyBytesToRead)
+ {
+ mvSpiRead(&dummyByte,1);
+ dummyBytesToRead--;
+ }
+
+ /* Then write the data buffer */
+ if ((rxDataSize) && (pRxDataBuff != NULL))
+ {
+ if ((tempRet = mvSpiRead(pRxDataBuff, rxDataSize)) != MV_OK)
+ ret = tempRet;
+ }
+
+ /* Finally deassert the chip select */
+ mvSpiCsDeassert();
+
+ return ret;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h
new file mode 100644
index 000000000..329e26b7c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSpiCmndhH
+#define __INCmvSpiCmndhH
+
+#include "mvTypes.h"
+
+/* Function Prototypes */
+
+/* Simultanuous Read and write */
+MV_STATUS mvSpiReadAndWrite (MV_U8* pRxBuff, MV_U8* pTxBuff, MV_U32 buffSize);
+
+/* write command - write a command and then write data */
+MV_STATUS mvSpiWriteThenWrite (MV_U8* pCmndBuff, MV_U32 cmndSize, MV_U8* pTxDataBuff, MV_U32 txDataSize);
+
+/* read command - write a command and then read data by writing dummy data */
+MV_STATUS mvSpiWriteThenRead (MV_U8* pCmndBuff, MV_U32 cmndSize, MV_U8* pRxDataBuff,
+ MV_U32 rxDataSize,MV_U32 dummyBytesToRead);
+
+#endif /* __INCmvSpiCmndhH */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h
new file mode 100644
index 000000000..658159abb
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSpiSpecH
+#define __INCmvSpiSpecH
+
+/* Constants */
+#define MV_SPI_WAIT_RDY_MAX_LOOP 100000
+#define MV_SPI_16_BIT_CHUNK_SIZE 2
+#define MV_SPI_DUMMY_WRITE_16BITS 0xFFFF
+#define MV_SPI_DUMMY_WRITE_8BITS 0xFF
+
+/* Marvell Flash Device Controller Registers */
+#define MV_SPI_CTRLR_OFST 0x10600
+#define MV_SPI_IF_CTRL_REG (MV_SPI_CTRLR_OFST + 0x00)
+#define MV_SPI_IF_CONFIG_REG (MV_SPI_CTRLR_OFST + 0x04)
+#define MV_SPI_DATA_OUT_REG (MV_SPI_CTRLR_OFST + 0x08)
+#define MV_SPI_DATA_IN_REG (MV_SPI_CTRLR_OFST + 0x0c)
+#define MV_SPI_INT_CAUSE_REG (MV_SPI_CTRLR_OFST + 0x10)
+#define MV_SPI_INT_CAUSE_MASK_REG (MV_SPI_CTRLR_OFST + 0x14)
+
+/* Serial Memory Interface Control Register Masks */
+#define MV_SPI_CS_ENABLE_OFFSET 0 /* bit 0 */
+#define MV_SPI_MEMORY_READY_OFFSET 1 /* bit 1 */
+#define MV_SPI_CS_ENABLE_MASK (0x1 << MV_SPI_CS_ENABLE_OFFSET)
+#define MV_SPI_MEMORY_READY_MASK (0x1 << MV_SPI_MEMORY_READY_OFFSET)
+
+/* Serial Memory Interface Configuration Register Masks */
+#define MV_SPI_CLK_PRESCALE_OFFSET 0 /* bit 0-4 */
+#define MV_SPI_BYTE_LENGTH_OFFSET 5 /* bit 5 */
+#define MV_SPI_ADDRESS_BURST_LENGTH_OFFSET 8 /* bit 8-9 */
+#define MV_SPI_CLK_PRESCALE_MASK (0x1F << MV_SPI_CLK_PRESCALE_OFFSET)
+#define MV_SPI_BYTE_LENGTH_MASK (0x1 << MV_SPI_BYTE_LENGTH_OFFSET)
+#define MV_SPI_ADDRESS_BURST_LENGTH_MASK (0x3 << MV_SPI_ADDRESS_BURST_LENGTH_OFFSET)
+
+#endif /* __INCmvSpiSpecH */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvCompVer.txt b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvCompVer.txt
new file mode 100644
index 000000000..40531164c
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvCompVer.txt
@@ -0,0 +1,4 @@
+Global HAL Version: FEROCEON_HAL_3_1_7
+Unit HAL Version: 3.1.5
+Description: This component includes an implementation of the unit HAL drivers
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c
new file mode 100644
index 000000000..0bf8b7571
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c
@@ -0,0 +1,1023 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "mvTwsi.h"
+#include "mvTwsiSpec.h"
+#include "cpu/mvCpu.h"
+
+
+/*#define MV_DEBUG*/
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+static MV_VOID twsiIntFlgClr(MV_U8 chanNum);
+static MV_BOOL twsiMainIntGet(MV_U8 chanNum);
+static MV_VOID twsiAckBitSet(MV_U8 chanNum);
+static MV_U32 twsiStsGet(MV_U8 chanNum);
+static MV_VOID twsiReset(MV_U8 chanNum);
+static MV_STATUS twsiAddr7BitSet(MV_U8 chanNum, MV_U32 deviceAddress,MV_TWSI_CMD command);
+static MV_STATUS twsiAddr10BitSet(MV_U8 chanNum, MV_U32 deviceAddress,MV_TWSI_CMD command);
+static MV_STATUS twsiDataTransmit(MV_U8 chanNum, MV_U8 *pBlock, MV_U32 blockSize);
+static MV_STATUS twsiDataReceive(MV_U8 chanNum, MV_U8 *pBlock, MV_U32 blockSize);
+static MV_STATUS twsiTargetOffsSet(MV_U8 chanNum, MV_U32 offset,MV_BOOL moreThen256);
+
+
+static MV_BOOL twsiTimeoutChk(MV_U32 timeout, const MV_8 *pString)
+{
+ if(timeout >= TWSI_TIMEOUT_VALUE)
+ {
+ DB(mvOsPrintf("%s",pString));
+ return MV_TRUE;
+ }
+ return MV_FALSE;
+
+}
+/*******************************************************************************
+* mvTwsiStartBitSet - Set start bit on the bus
+*
+* DESCRIPTION:
+* This routine sets the start bit on the TWSI bus.
+* The routine first checks for interrupt flag condition, then it sets
+* the start bit in the TWSI Control register.
+* If the interrupt flag condition check previously was set, the function
+* will clear it.
+* The function then wait for the start bit to be cleared by the HW.
+* Then it waits for the interrupt flag to be set and eventually, the
+* TWSI status is checked to be 0x8 or 0x10(repeated start bit).
+*
+* INPUT:
+* chanNum - TWSI channel.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK is start bit was set successfuly on the bus.
+* MV_FAIL if interrupt flag was set before setting start bit.
+*
+*******************************************************************************/
+MV_STATUS mvTwsiStartBitSet(MV_U8 chanNum)
+{
+ MV_BOOL isIntFlag = MV_FALSE;
+ MV_U32 timeout, temp;
+
+ DB(mvOsPrintf("TWSI: mvTwsiStartBitSet \n"));
+ /* check Int flag */
+ if(twsiMainIntGet(chanNum))
+ isIntFlag = MV_TRUE;
+ /* set start Bit */
+ temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+ MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), temp | TWSI_CONTROL_START_BIT);
+
+ /* in case that the int flag was set before i.e. repeated start bit */
+ if(isIntFlag){
+ DB(mvOsPrintf("TWSI: mvTwsiStartBitSet repeated start Bit\n"));
+ twsiIntFlgClr(chanNum);
+ }
+
+ /* wait for interrupt */
+ timeout = 0;
+ while(!twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: mvTwsiStartBitSet ERROR - Start Clear bit TimeOut .\n"))
+ return MV_TIMEOUT;
+
+
+ /* check that start bit went down */
+ if((MV_REG_READ(TWSI_CONTROL_REG(chanNum)) & TWSI_CONTROL_START_BIT) != 0)
+ {
+ mvOsPrintf("TWSI: mvTwsiStartBitSet ERROR - start bit didn't went down\n");
+ return MV_FAIL;
+ }
+
+ /* check the status */
+ temp = twsiStsGet(chanNum);
+ if(( temp != TWSI_START_CON_TRA ) && ( temp != TWSI_REPEATED_START_CON_TRA ))
+ {
+ mvOsPrintf("TWSI: mvTwsiStartBitSet ERROR - status %x after Set Start Bit. \n",temp);
+ return MV_FAIL;
+ }
+
+ return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvTwsiStopBitSet - Set stop bit on the bus
+*
+* DESCRIPTION:
+* This routine set the stop bit on the TWSI bus.
+* The function then wait for the stop bit to be cleared by the HW.
+* Finally the function checks for status of 0xF8.
+*
+* INPUT:
+* chanNum - TWSI channel
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE is stop bit was set successfuly on the bus.
+*
+*******************************************************************************/
+MV_STATUS mvTwsiStopBitSet(MV_U8 chanNum)
+{
+ MV_U32 timeout, temp;
+
+ /* Generate stop bit */
+ temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+ MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), temp | TWSI_CONTROL_STOP_BIT);
+
+ twsiIntFlgClr(chanNum);
+
+ /* wait for stop bit to come down */
+ timeout = 0;
+ while( ((MV_REG_READ(TWSI_CONTROL_REG(chanNum)) & TWSI_CONTROL_STOP_BIT) != 0) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: mvTwsiStopBitSet ERROR - Stop bit TimeOut .\n"))
+ return MV_TIMEOUT;
+
+ /* check that the stop bit went down */
+ if((MV_REG_READ(TWSI_CONTROL_REG(chanNum)) & TWSI_CONTROL_STOP_BIT) != 0)
+ {
+ mvOsPrintf("TWSI: mvTwsiStopBitSet ERROR - stop bit didn't went down. \n");
+ return MV_FAIL;
+ }
+
+ /* check the status */
+ temp = twsiStsGet(chanNum);
+ if( temp != TWSI_NO_REL_STS_INT_FLAG_IS_KEPT_0){
+ mvOsPrintf("TWSI: mvTwsiStopBitSet ERROR - status %x after Stop Bit. \n", temp);
+ return MV_FAIL;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* twsiMainIntGet - Get twsi bit from main Interrupt cause.
+*
+* DESCRIPTION:
+* This routine returns the twsi interrupt flag value.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_TRUE is interrupt flag is set, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL twsiMainIntGet(MV_U8 chanNum)
+{
+ MV_U32 temp;
+
+ /* get the int flag bit */
+
+ temp = MV_REG_READ(TWSI_CPU_MAIN_INT_CAUSE_REG);
+ if (temp & (TWSI0_CPU_MAIN_INT_BIT << chanNum))
+ return MV_TRUE;
+
+ return MV_FALSE;
+}
+/*******************************************************************************
+* twsiIntFlgClr - Clear Interrupt flag.
+*
+* DESCRIPTION:
+* This routine clears the interrupt flag. It does NOT poll the interrupt
+* to make sure the clear. After clearing the interrupt, it waits for at
+* least 1 miliseconds.
+*
+* INPUT:
+* chanNum - TWSI channel
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+static MV_VOID twsiIntFlgClr(MV_U8 chanNum)
+{
+ MV_U32 temp;
+
+ /* wait for 1 mili to prevent TWSI register write after write problems */
+ mvOsDelay(1);
+ /* clear the int flag bit */
+ temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+ MV_REG_WRITE(TWSI_CONTROL_REG(chanNum),temp & ~(TWSI_CONTROL_INT_FLAG_SET));
+
+ /* wait for 1 mili sec for the clear to take effect */
+ mvOsDelay(1);
+
+ return;
+}
+
+
+/*******************************************************************************
+* twsiAckBitSet - Set acknowledge bit on the bus
+*
+* DESCRIPTION:
+* This routine set the acknowledge bit on the TWSI bus.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+static MV_VOID twsiAckBitSet(MV_U8 chanNum)
+{
+ MV_U32 temp;
+
+ /*Set the Ack bit */
+ temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+ MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), temp | TWSI_CONTROL_ACK);
+
+ /* Add delay of 1ms */
+ mvOsDelay(1);
+ return;
+}
+
+
+/*******************************************************************************
+* twsiInit - Initialize TWSI interface
+*
+* DESCRIPTION:
+* This routine:
+* -Reset the TWSI.
+* -Initialize the TWSI clock baud rate according to given frequancy
+* parameter based on Tclk frequancy and enables TWSI slave.
+* -Set the ack bit.
+* -Assign the TWSI slave address according to the TWSI address Type.
+*
+*
+* INPUT:
+* chanNum - TWSI channel
+* frequancy - TWSI frequancy in KHz. (up to 100KHZ)
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* Actual frequancy.
+*
+*******************************************************************************/
+MV_U32 mvTwsiInit(MV_U8 chanNum, MV_HZ frequancy, MV_U32 Tclk, MV_TWSI_ADDR *pTwsiAddr, MV_BOOL generalCallEnable)
+{
+ MV_U32 n,m,freq,margin,minMargin = 0xffffffff;
+ MV_U32 power;
+ MV_U32 actualFreq = 0,actualN = 0,actualM = 0,val;
+
+ if(frequancy > 100000)
+ {
+ mvOsPrintf("Warning TWSI frequancy is too high, please use up tp 100Khz. \n");
+ }
+
+ DB(mvOsPrintf("TWSI: mvTwsiInit - Tclk = %d freq = %d\n",Tclk,frequancy));
+ /* Calucalte N and M for the TWSI clock baud rate */
+ for(n = 0 ; n < 8 ; n++)
+ {
+ for(m = 0 ; m < 16 ; m++)
+ {
+ power = 2 << n; /* power = 2^(n+1) */
+ freq = Tclk/(10*(m+1)*power);
+ margin = MV_ABS(frequancy - freq);
+ if(margin < minMargin)
+ {
+ minMargin = margin;
+ actualFreq = freq;
+ actualN = n;
+ actualM = m;
+ }
+ }
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiInit - actN %d actM %d actFreq %d\n",actualN , actualM, actualFreq));
+ /* Reset the TWSI logic */
+ twsiReset(chanNum);
+
+ /* Set the baud rate */
+ val = ((actualM<< TWSI_BAUD_RATE_M_OFFS) | actualN << TWSI_BAUD_RATE_N_OFFS);
+ MV_REG_WRITE(TWSI_STATUS_BAUDE_RATE_REG(chanNum),val);
+
+ /* Enable the TWSI and slave */
+ MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), TWSI_CONTROL_ENA | TWSI_CONTROL_ACK);
+
+ /* set the TWSI slave address */
+ if( pTwsiAddr->type == ADDR10_BIT )/* 10 Bit deviceAddress */
+ {
+ /* writing the 2 most significant bits of the 10 bit address*/
+ val = ((pTwsiAddr->address & TWSI_SLAVE_ADDR_10BIT_MASK) >> TWSI_SLAVE_ADDR_10BIT_OFFS );
+ /* bits 7:3 must be 0x11110 */
+ val |= TWSI_SLAVE_ADDR_10BIT_CONST;
+ /* set GCE bit */
+ if(generalCallEnable)
+ val |= TWSI_SLAVE_ADDR_GCE_ENA;
+ /* write slave address */
+ MV_REG_WRITE(TWSI_SLAVE_ADDR_REG(chanNum),val);
+
+ /* writing the 8 least significant bits of the 10 bit address*/
+ val = (pTwsiAddr->address << TWSI_EXTENDED_SLAVE_OFFS) & TWSI_EXTENDED_SLAVE_MASK;
+ MV_REG_WRITE(TWSI_EXTENDED_SLAVE_ADDR_REG(chanNum), val);
+ }
+ else /*7 bit address*/
+ {
+ /* set the 7 Bits address */
+ MV_REG_WRITE(TWSI_EXTENDED_SLAVE_ADDR_REG(chanNum),0x0);
+ val = (pTwsiAddr->address << TWSI_SLAVE_ADDR_7BIT_OFFS) & TWSI_SLAVE_ADDR_7BIT_MASK;
+ MV_REG_WRITE(TWSI_SLAVE_ADDR_REG(chanNum), val);
+ }
+
+ /* unmask twsi int */
+ val = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+ MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), val | TWSI_CONTROL_INT_ENA);
+ /* Add delay of 1ms */
+ mvOsDelay(1);
+
+ return actualFreq;
+}
+
+
+/*******************************************************************************
+* twsiStsGet - Get the TWSI status value.
+*
+* DESCRIPTION:
+* This routine returns the TWSI status value.
+*
+* INPUT:
+* chanNum - TWSI channel
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_U32 - the TWSI status.
+*
+*******************************************************************************/
+static MV_U32 twsiStsGet(MV_U8 chanNum)
+{
+ return MV_REG_READ(TWSI_STATUS_BAUDE_RATE_REG(chanNum));
+
+}
+
+/*******************************************************************************
+* twsiReset - Reset the TWSI.
+*
+* DESCRIPTION:
+* Resets the TWSI logic and sets all TWSI registers to their reset values.
+*
+* INPUT:
+* chanNum - TWSI channel
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None
+*
+*******************************************************************************/
+static MV_VOID twsiReset(MV_U8 chanNum)
+{
+ /* Reset the TWSI logic */
+ MV_REG_WRITE(TWSI_SOFT_RESET_REG(chanNum),0);
+
+ /* wait for 2 mili sec */
+ mvOsDelay(2);
+
+ return;
+}
+
+
+
+
+/******************************* POLICY ****************************************/
+
+
+
+/*******************************************************************************
+* mvTwsiAddrSet - Set address on TWSI bus.
+*
+* DESCRIPTION:
+* This function Set address (7 or 10 Bit address) on the Twsi Bus.
+*
+* INPUT:
+* chanNum - TWSI channel
+* pTwsiAddr - twsi address.
+* command - read / write .
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK - if setting the address completed succesfully.
+* MV_FAIL otherwmise.
+*
+*******************************************************************************/
+MV_STATUS mvTwsiAddrSet(MV_U8 chanNum, MV_TWSI_ADDR *pTwsiAddr, MV_TWSI_CMD command)
+{
+ DB(mvOsPrintf("TWSI: mvTwsiAddr7BitSet addr %x , type %d, cmd is %s\n",pTwsiAddr->address,\
+ pTwsiAddr->type, ((command==MV_TWSI_WRITE)?"Write":"Read") ));
+ /* 10 Bit address */
+ if(pTwsiAddr->type == ADDR10_BIT)
+ {
+ return twsiAddr10BitSet(chanNum, pTwsiAddr->address,command);
+ }
+ /* 7 Bit address */
+ else
+ {
+ return twsiAddr7BitSet(chanNum, pTwsiAddr->address,command);
+ }
+
+}
+
+/*******************************************************************************
+* twsiAddr10BitSet - Set 10 Bit address on TWSI bus.
+*
+* DESCRIPTION:
+* There are two address phases:
+* 1) Write '11110' to data register bits [7:3] and 10-bit address MSB
+* (bits [9:8]) to data register bits [2:1] plus a write(0) or read(1) bit
+* to the Data register. Then it clears interrupt flag which drive
+* the address on the TWSI bus. The function then waits for interrupt
+* flag to be active and status 0x18 (write) or 0x40 (read) to be set.
+* 2) write the rest of 10-bit address to data register and clears
+* interrupt flag which drive the address on the TWSI bus. The
+* function then waits for interrupt flag to be active and status
+* 0xD0 (write) or 0xE0 (read) to be set.
+*
+* INPUT:
+* chanNum - TWSI channel
+* deviceAddress - twsi address.
+* command - read / write .
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK - if setting the address completed succesfully.
+* MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiAddr10BitSet(MV_U8 chanNum, MV_U32 deviceAddress,MV_TWSI_CMD command)
+{
+ MV_U32 val,timeout;
+
+ /* writing the 2 most significant bits of the 10 bit address*/
+ val = ((deviceAddress & TWSI_DATA_ADDR_10BIT_MASK) >> TWSI_DATA_ADDR_10BIT_OFFS );
+ /* bits 7:3 must be 0x11110 */
+ val |= TWSI_DATA_ADDR_10BIT_CONST;
+ /* set command */
+ val |= command;
+ MV_REG_WRITE(TWSI_DATA_REG(chanNum), val);
+ /* WA add a delay */
+ mvOsDelay(1);
+
+ /* clear Int flag */
+ twsiIntFlgClr(chanNum);
+
+ /* wait for Int to be Set */
+ timeout = 0;
+ while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiAddr10BitSet ERROR - 1st addr (10Bit) Int TimeOut.\n"))
+ return MV_TIMEOUT;
+
+ /* check the status */
+ val = twsiStsGet(chanNum);
+ if(( (val != TWSI_AD_PLS_RD_BIT_TRA_ACK_REC) && (command == MV_TWSI_READ ) ) ||
+ ( (val != TWSI_AD_PLS_WR_BIT_TRA_ACK_REC) && (command == MV_TWSI_WRITE) ))
+ {
+ mvOsPrintf("TWSI: twsiAddr10BitSet ERROR - status %x 1st addr (10 Bit) in %s mode.\n"\
+ ,val, ((command==MV_TWSI_WRITE)?"Write":"Read") );
+ return MV_FAIL;
+ }
+
+ /* set 8 LSB of the address */
+ val = (deviceAddress << TWSI_DATA_ADDR_7BIT_OFFS) & TWSI_DATA_ADDR_7BIT_MASK;
+ MV_REG_WRITE(TWSI_DATA_REG(chanNum), val);
+
+ /* clear Int flag */
+ twsiIntFlgClr(chanNum);
+
+ /* wait for Int to be Set */
+ timeout = 0;
+ while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiAddr10BitSet ERROR - 2nd (10 Bit) Int TimOut.\n"))
+ return MV_TIMEOUT;
+
+ /* check the status */
+ val = twsiStsGet(chanNum);
+ if(( (val != TWSI_SEC_AD_PLS_RD_BIT_TRA_ACK_REC) && (command == MV_TWSI_READ ) ) ||
+ ( (val != TWSI_SEC_AD_PLS_WR_BIT_TRA_ACK_REC) && (command == MV_TWSI_WRITE) ))
+ {
+ mvOsPrintf("TWSI: twsiAddr10BitSet ERROR - status %x 2nd addr(10 Bit) in %s mode.\n"\
+ ,val, ((command==MV_TWSI_WRITE)?"Write":"Read") );
+ return MV_FAIL;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* twsiAddr7BitSet - Set 7 Bit address on TWSI bus.
+*
+* DESCRIPTION:
+* This function writes 7 bit address plus a write or read bit to the
+* Data register. Then it clears interrupt flag which drive the address on
+* the TWSI bus. The function then waits for interrupt flag to be active
+* and status 0x18 (write) or 0x40 (read) to be set.
+*
+* INPUT:
+* chanNum - TWSI channel
+* deviceAddress - twsi address.
+* command - read / write .
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK - if setting the address completed succesfully.
+* MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiAddr7BitSet(MV_U8 chanNum, MV_U32 deviceAddress,MV_TWSI_CMD command)
+{
+ MV_U32 val,timeout;
+
+ /* set the address */
+ val = (deviceAddress << TWSI_DATA_ADDR_7BIT_OFFS) & TWSI_DATA_ADDR_7BIT_MASK;
+ /* set command */
+ val |= command;
+ MV_REG_WRITE(TWSI_DATA_REG(chanNum), val);
+ /* WA add a delay */
+ mvOsDelay(1);
+
+ /* clear Int flag */
+ twsiIntFlgClr(chanNum);
+
+ /* wait for Int to be Set */
+ timeout = 0;
+ while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiAddr7BitSet ERROR - Addr (7 Bit) int TimeOut.\n"))
+ return MV_TIMEOUT;
+
+ /* check the status */
+ val = twsiStsGet(chanNum);
+ if(( (val != TWSI_AD_PLS_RD_BIT_TRA_ACK_REC) && (command == MV_TWSI_READ ) ) ||
+ ( (val != TWSI_AD_PLS_WR_BIT_TRA_ACK_REC) && (command == MV_TWSI_WRITE) ))
+ {
+ /* only in debug, since in boot we try to read the SPD of both DRAM, and we don't
+ want error messeges in case DIMM doesn't exist. */
+ DB(mvOsPrintf("TWSI: twsiAddr7BitSet ERROR - status %x addr (7 Bit) in %s mode.\n"\
+ ,val,((command==MV_TWSI_WRITE)?"Write":"Read") ));
+ return MV_FAIL;
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* twsiDataWrite - Trnasmit a data block over TWSI bus.
+*
+* DESCRIPTION:
+* This function writes a given data block to TWSI bus in 8 bit granularity.
+* first The function waits for interrupt flag to be active then
+* For each 8-bit data:
+* The function writes data to data register. It then clears
+* interrupt flag which drives the data on the TWSI bus.
+* The function then waits for interrupt flag to be active and status
+* 0x28 to be set.
+*
+*
+* INPUT:
+* chanNum - TWSI channel
+* pBlock - Data block.
+* blockSize - number of chars in pBlock.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK - if transmiting the block completed succesfully,
+* MV_BAD_PARAM - if pBlock is NULL,
+* MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiDataTransmit(MV_U8 chanNum, MV_U8 *pBlock, MV_U32 blockSize)
+{
+ MV_U32 timeout, temp, blockSizeWr = blockSize;
+
+ if(NULL == pBlock)
+ return MV_BAD_PARAM;
+
+ /* wait for Int to be Set */
+ timeout = 0;
+ while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiDataTransmit ERROR - Read Data Int TimeOut.\n"))
+ return MV_TIMEOUT;
+
+ while(blockSizeWr)
+ {
+ /* write the data*/
+ MV_REG_WRITE(TWSI_DATA_REG(chanNum),(MV_U32)*pBlock);
+ DB(mvOsPrintf("TWSI: twsiDataTransmit place = %d write %x \n",\
+ blockSize - blockSizeWr, *pBlock));
+ pBlock++;
+ blockSizeWr--;
+
+ twsiIntFlgClr(chanNum);
+
+ /* wait for Int to be Set */
+ timeout = 0;
+ while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiDataTransmit ERROR - Read Data Int TimeOut.\n"))
+ return MV_TIMEOUT;
+
+ /* check the status */
+ temp = twsiStsGet(chanNum);
+ if(temp != TWSI_M_TRAN_DATA_BYTE_ACK_REC)
+ {
+ mvOsPrintf("TWSI: twsiDataTransmit ERROR - status %x in write trans\n",temp);
+ return MV_FAIL;
+ }
+
+ }
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* twsiDataReceive - Receive data block from TWSI bus.
+*
+* DESCRIPTION:
+* This function receive data block from TWSI bus in 8bit granularity
+* into pBlock buffer.
+* first The function waits for interrupt flag to be active then
+* For each 8-bit data:
+* It clears the interrupt flag which allows the next data to be
+* received from TWSI bus.
+* The function waits for interrupt flag to be active,
+* and status reg is 0x50.
+* Then the function reads data from data register, and copies it to
+* the given buffer.
+*
+* INPUT:
+* chanNum - TWSI channel
+* blockSize - number of bytes to read.
+*
+* OUTPUT:
+* pBlock - Data block.
+*
+* RETURN:
+* MV_OK - if receive transaction completed succesfully,
+* MV_BAD_PARAM - if pBlock is NULL,
+* MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiDataReceive(MV_U8 chanNum, MV_U8 *pBlock, MV_U32 blockSize)
+{
+ MV_U32 timeout, temp, blockSizeRd = blockSize;
+ if(NULL == pBlock)
+ return MV_BAD_PARAM;
+
+ /* wait for Int to be Set */
+ timeout = 0;
+ while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiDataReceive ERROR - Read Data int Time out .\n"))
+ return MV_TIMEOUT;
+
+ while(blockSizeRd)
+ {
+ if(blockSizeRd == 1)
+ {
+ /* clear ack and Int flag */
+ temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+ temp &= ~(TWSI_CONTROL_ACK);
+ MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), temp);
+ }
+ twsiIntFlgClr(chanNum);
+ /* wait for Int to be Set */
+ timeout = 0;
+ while( (!twsiMainIntGet(chanNum)) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+ /* check for timeout */
+ if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiDataReceive ERROR - Read Data Int Time out .\n"))
+ return MV_TIMEOUT;
+
+ /* check the status */
+ temp = twsiStsGet(chanNum);
+ if((temp != TWSI_M_REC_RD_DATA_ACK_TRA) && (blockSizeRd !=1))
+ {
+ mvOsPrintf("TWSI: twsiDataReceive ERROR - status %x in read trans \n",temp);
+ return MV_FAIL;
+ }
+ else if((temp != TWSI_M_REC_RD_DATA_ACK_NOT_TRA) && (blockSizeRd ==1))
+ {
+ mvOsPrintf("TWSI: twsiDataReceive ERROR - status %x in Rd Terminate\n",temp);
+ return MV_FAIL;
+ }
+
+ /* read the data*/
+ *pBlock = (MV_U8)MV_REG_READ(TWSI_DATA_REG(chanNum));
+ DB(mvOsPrintf("TWSI: twsiDataReceive place %d read %x \n",\
+ blockSize - blockSizeRd,*pBlock));
+ pBlock++;
+ blockSizeRd--;
+ }
+
+ return MV_OK;
+}
+
+
+
+/*******************************************************************************
+* twsiTargetOffsSet - Set TWST target offset on TWSI bus.
+*
+* DESCRIPTION:
+* The function support TWSI targets that have inside address space (for
+* example EEPROMs). The function:
+* 1) Convert the given offset into pBlock and size.
+* in case the offset should be set to a TWSI slave which support
+* more then 256 bytes offset, the offset setting will be done
+* in 2 transactions.
+* 2) Use twsiDataTransmit to place those on the bus.
+*
+* INPUT:
+* chanNum - TWSI channel
+* offset - offset to be set on the EEPROM device.
+* moreThen256 - whether the EEPROM device support more then 256 byte offset.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* MV_OK - if setting the offset completed succesfully.
+* MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiTargetOffsSet(MV_U8 chanNum, MV_U32 offset, MV_BOOL moreThen256)
+{
+ MV_U8 offBlock[2];
+ MV_U32 offSize;
+
+ if(moreThen256 == MV_TRUE)
+ {
+ offBlock[0] = (offset >> 8) & 0xff;
+ offBlock[1] = offset & 0xff;
+ offSize = 2;
+ }
+ else
+ {
+ offBlock[0] = offset & 0xff;
+ offSize = 1;
+ }
+ DB(mvOsPrintf("TWSI: twsiTargetOffsSet offSize = %x addr1 = %x addr2 = %x\n",\
+ offSize,offBlock[0],offBlock[1]));
+ return twsiDataTransmit(chanNum, offBlock, offSize);
+
+}
+
+/*******************************************************************************
+* mvTwsiRead - Read data block from a TWSI Slave.
+*
+* DESCRIPTION:
+* The function calls the following functions:
+* -) mvTwsiStartBitSet();
+* if(EEPROM device)
+* -) mvTwsiAddrSet(w);
+* -) twsiTargetOffsSet();
+* -) mvTwsiStartBitSet();
+* -) mvTwsiAddrSet(r);
+* -) twsiDataReceive();
+* -) mvTwsiStopBitSet();
+*
+* INPUT:
+* chanNum - TWSI channel
+* pTwsiSlave - Twsi Slave structure.
+* blockSize - number of bytes to read.
+*
+* OUTPUT:
+* pBlock - Data block.
+*
+* RETURN:
+* MV_OK - if EEPROM read transaction completed succesfully,
+* MV_BAD_PARAM - if pBlock is NULL,
+* MV_FAIL otherwmise.
+*
+*******************************************************************************/
+MV_STATUS mvTwsiRead(MV_U8 chanNum, MV_TWSI_SLAVE *pTwsiSlave, MV_U8 *pBlock, MV_U32 blockSize)
+{
+ if((NULL == pBlock) || (NULL == pTwsiSlave))
+ return MV_BAD_PARAM;
+ if(MV_OK != mvTwsiStartBitSet(chanNum))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+
+ DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiStartBitSet\n"));
+
+ /* in case offset exsist (i.e. eeprom ) */
+ if(MV_TRUE == pTwsiSlave->validOffset)
+ {
+ if(MV_OK != mvTwsiAddrSet(chanNum, &(pTwsiSlave->slaveAddr), MV_TWSI_WRITE))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiAddrSet\n"));
+ if(MV_OK != twsiTargetOffsSet(chanNum, pTwsiSlave->offset, pTwsiSlave->moreThen256))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiEepromRead after twsiTargetOffsSet\n"));
+ if(MV_OK != mvTwsiStartBitSet(chanNum))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiStartBitSet\n"));
+ }
+ if(MV_OK != mvTwsiAddrSet(chanNum, &(pTwsiSlave->slaveAddr), MV_TWSI_READ))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiAddrSet\n"));
+ if(MV_OK != twsiDataReceive(chanNum, pBlock, blockSize))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiEepromRead after twsiDataReceive\n"));
+
+ if(MV_OK != mvTwsiStopBitSet(chanNum))
+ {
+ return MV_FAIL;
+ }
+
+ twsiAckBitSet(chanNum);
+
+ DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiStopBitSet\n"));
+
+ return MV_OK;
+}
+
+/*******************************************************************************
+* mvTwsiWrite - Write data block to a TWSI Slave.
+*
+* DESCRIPTION:
+* The function calls the following functions:
+* -) mvTwsiStartBitSet();
+* -) mvTwsiAddrSet();
+* -)if(EEPROM device)
+* -) twsiTargetOffsSet();
+* -) twsiDataTransmit();
+* -) mvTwsiStopBitSet();
+*
+* INPUT:
+* chanNum - TWSI channel
+* eepromAddress - eeprom address.
+* blockSize - number of bytes to write.
+* pBlock - Data block.
+*
+* OUTPUT:
+* None
+*
+* RETURN:
+* MV_OK - if EEPROM read transaction completed succesfully.
+* MV_BAD_PARAM - if pBlock is NULL,
+* MV_FAIL otherwmise.
+*
+* NOTE: Part of the EEPROM, required that the offset will be aligned to the
+* max write burst supported.
+*******************************************************************************/
+MV_STATUS mvTwsiWrite(MV_U8 chanNum, MV_TWSI_SLAVE *pTwsiSlave, MV_U8 *pBlock, MV_U32 blockSize)
+{
+ if((NULL == pBlock) || (NULL == pTwsiSlave))
+ return MV_BAD_PARAM;
+
+ if(MV_OK != mvTwsiStartBitSet(chanNum))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+
+ DB(mvOsPrintf("TWSI: mvTwsiEepromWrite after mvTwsiStartBitSet\n"));
+ if(MV_OK != mvTwsiAddrSet(chanNum, &(pTwsiSlave->slaveAddr), MV_TWSI_WRITE))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI :mvTwsiEepromWrite after mvTwsiAddrSet\n"));
+
+ /* in case offset exsist (i.e. eeprom ) */
+ if(MV_TRUE == pTwsiSlave->validOffset)
+ {
+ if(MV_OK != twsiTargetOffsSet(chanNum, pTwsiSlave->offset, pTwsiSlave->moreThen256))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiEepromWrite after twsiTargetOffsSet\n"));
+ }
+ if(MV_OK != twsiDataTransmit(chanNum, pBlock, blockSize))
+ {
+ mvTwsiStopBitSet(chanNum);
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiEepromWrite after twsiDataTransmit\n"));
+ if(MV_OK != mvTwsiStopBitSet(chanNum))
+ {
+ return MV_FAIL;
+ }
+ DB(mvOsPrintf("TWSI: mvTwsiEepromWrite after mvTwsiStopBitSet\n"));
+
+ return MV_OK;
+}
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h
new file mode 100644
index 000000000..bd5b6d009
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h
@@ -0,0 +1,121 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCmvTwsiH
+#define __INCmvTwsiH
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* need to update this includes */
+#include "twsi/mvTwsiSpec.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+
+/* The TWSI interface supports both 7-bit and 10-bit addressing. */
+/* This enumerator describes addressing type. */
+typedef enum _mvTwsiAddrType
+{
+ ADDR7_BIT, /* 7 bit address */
+ ADDR10_BIT /* 10 bit address */
+}MV_TWSI_ADDR_TYPE;
+
+/* This structure describes TWSI address. */
+typedef struct _mvTwsiAddr
+{
+ MV_U32 address; /* address */
+ MV_TWSI_ADDR_TYPE type; /* Address type */
+}MV_TWSI_ADDR;
+
+/* This structure describes a TWSI slave. */
+typedef struct _mvTwsiSlave
+{
+ MV_TWSI_ADDR slaveAddr;
+ MV_BOOL validOffset; /* whether the slave has offset (i.e. Eeprom etc.) */
+ MV_U32 offset; /* offset in the slave. */
+ MV_BOOL moreThen256; /* whether the ofset is bigger then 256 */
+}MV_TWSI_SLAVE;
+
+/* This enumerator describes TWSI protocol commands. */
+typedef enum _mvTwsiCmd
+{
+ MV_TWSI_WRITE, /* TWSI write command - 0 according to spec */
+ MV_TWSI_READ /* TWSI read command - 1 according to spec */
+}MV_TWSI_CMD;
+
+MV_STATUS mvTwsiStartBitSet(MV_U8 chanNum);
+MV_STATUS mvTwsiStopBitSet(MV_U8 chanNum);
+MV_STATUS mvTwsiAddrSet(MV_U8 chanNum, MV_TWSI_ADDR *twsiAddr, MV_TWSI_CMD command);
+
+MV_U32 mvTwsiInit(MV_U8 chanNum, MV_KHZ frequancy, MV_U32 Tclk, MV_TWSI_ADDR *twsiAddr, MV_BOOL generalCallEnable);
+MV_STATUS mvTwsiRead (MV_U8 chanNum, MV_TWSI_SLAVE *twsiSlave, MV_U8 *pBlock, MV_U32 blockSize);
+MV_STATUS mvTwsiWrite(MV_U8 chanNum, MV_TWSI_SLAVE *twsiSlave, MV_U8 *pBlock, MV_U32 blockSize);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvTwsiH */
+
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S
new file mode 100644
index 000000000..9d81ef2a6
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S
@@ -0,0 +1,457 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#define MV_ASMLANGUAGE
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "boardEnv/mvBoardEnvSpec.h"
+#include "mvOsAsm.h"
+#include "mvTwsiSpec.h"
+#include "mvSysHwConfig.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+#include "mvCommon.h"
+
+#define I2C_CH MV_BOARD_DIMM_I2C_CHANNEL
+
+/* defines */
+/* defines */
+
+
+ .data
+ .global _i2cInit
+ .global _i2cRead
+
+ .text
+
+/*******************************************************************************
+* _i2cInit - Initialize TWSI interface
+*
+* DESCRIPTION:
+* The function performs TWSI interface initialization. It resets the
+* TWSI state machine and initialize its clock to 100KHz assuming Tclock
+* of 133MHz.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+_i2cInit:
+ mov r9, LR /* Save link register */
+ mov r0, #0 /* Make sure r0 is zero */
+
+ /* Reset the i2c Mechanism first */
+ MV_REG_WRITE_ASM (r0, r1, TWSI_SOFT_RESET_REG(I2C_CH))
+
+ bl _twsiDelay
+ bl _twsiDelay
+
+ /* Initializing the I2C mechanism. Assuming Tclock frequency */
+ /* of 166MHz. The I2C frequency in that case will be 100KHz. */
+ /* For this settings, M = 9 and N = 3. Set the baud-rate with the */
+ /* value of 0x2b (freq of ==> 100KHz */
+ /* see spec for more details about the calculation of this value) */
+ mov r6, #(9 << 3 | 3)
+ MV_REG_WRITE_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+
+ /* Enable the I2C master */
+ /* Enable TWSI interrupt in main mask reg */
+ mov r6, #0xC4
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+
+ /* Let the slow TWSI machine get used to the idea that it is enabled */
+ bl _twsiDelay
+
+
+ mov PC, r9 /* r9 is saved link register */
+
+/*******************************************************************************
+* _twsiDelay - Perform delay.
+*
+* DESCRIPTION:
+* The function performs a delay to enable TWSI logic to stable.
+*
+* INPUT:
+* None.
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* None.
+*
+*******************************************************************************/
+_twsiDelay:
+ mov r10, #0x100000 /*was 0x400*/
+
+_twsiDelayLoop:
+ subs r10, r10, #1
+ bne _twsiDelayLoop
+
+ mov PC, LR
+
+/*******************************************************************************
+* _i2cRead - Read byte from I2C EEPROM device.
+*
+* DESCRIPTION:
+* The function returns a byte from I2C EEPROM device.
+* The EEPROM device is 7-bit address type.
+*
+* INPUT:
+* r4 has the DIMM0 base address with shift 1 bit to the left
+* r7 has the EEPROM offset
+*
+* OUTPUT:
+* None.
+*
+* RETURN:
+* r4 returns '0' if address can not be read.
+* r7 has byte value in case read is successful.
+*
+*******************************************************************************/
+_i2cRead:
+ mov r9, LR /* Save link register */
+
+ /* Transmit the device address and desired offset within the EEPROM. */
+
+ /* Generate Start Bit */
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ orr r6, r6, #TWSI_CONTROL_START_BIT
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+
+ /* Wait for the interrupt flag (bit3) to be set */
+ mov r10, #0x50000
+loop_1:
+ subs r10, r10, #1
+ beq loop_1_timeout
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+ tst r6, #BIT2
+#else
+ MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+ tst r6, #BIT5
+#endif
+ beq loop_1
+
+loop_1_timeout:
+
+ /* Wait for the start bit to be reset by HW */
+ mov r10, #0x50000
+loop_2:
+ subs r10, r10, #1
+ beq loop_2_timeout
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ tst r6, #TWSI_CONTROL_START_BIT
+ bne loop_2
+
+loop_2_timeout:
+
+ /* Wait for the status TWSI_START_CONDITION_TRA = 0x8 */
+ mov r10, #0x50000
+loop_3:
+ subs r10, r10, #1
+ beq loop_3_timeout
+ MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+ cmp r6, #0x08
+ bne loop_3
+
+loop_3_timeout:
+
+ /* writing the address of (DIMM0/1 << 1) with write indication */
+ mov r6, r4, LSL #1 /* Write operation address bit 0 must be 0 */
+ MV_REG_WRITE_ASM (r6, r1, TWSI_DATA_REG(I2C_CH))
+
+ bl _twsiDelay
+ /* Clear the interrupt flag */
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bic r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bl _twsiDelay
+
+ /* Waiting for the interrupt flag to be set which means that the
+ address has been transmitted */
+loop_4:
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+ tst r6, #BIT2
+#else
+ MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+ tst r6, #BIT5
+#endif
+ beq loop_4 /* if tst = 0, then the bit is not set yet */
+
+ /* Wait for status TWSI_ADDR_PLUS_WRITE_BIT_TRA_ACK_REC = 0x18 */
+ mov r10, #0x50000 /* Set r10 to 0x50000 =~ 328,000 */
+
+loop_5:
+ subs r10, r10, #1 /* timeout count down */
+ bne testStatus
+ mov r4, #0 /* r4 = 0 -> operation failed */
+ b exit_i2cRead /* Exit if timeout (No DIMM) */
+
+testStatus:
+ MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+ cmp r6, #0x18
+ bne loop_5
+
+
+ /* check if the offset is bigger than 256 byte*/
+ tst r7, #0x80000000
+ bne great_than_256
+
+ /* Write the offset to be read from the DIMM EEPROM */
+ MV_REG_WRITE_ASM (r7, r1, TWSI_DATA_REG(I2C_CH))
+
+ b after_offset
+
+great_than_256:
+ mov r10, r7, LSR #8
+ and r10, r10, #0xff
+ /* Write the offset0 to be read from the EEPROM */
+ MV_REG_WRITE_ASM (r10, r1, TWSI_DATA_REG(I2C_CH))
+
+ /* Clear the interrupt flag ==> signaling that the address can now
+ be transmited */
+
+ bl _twsiDelay
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bic r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bl _twsiDelay
+
+ /* Wait for the interrupt to be set again ==> address has transmited */
+loop_6_1:
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+ tst r6, #BIT2
+#else
+ MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+ tst r6, #BIT5
+#endif
+ beq loop_6_1
+
+ /* Wait for status TWSI_MAS_TRAN_DATA_BYTE_ACK_REC = 0x28 */
+loop_7_1:
+ MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+ cmp r6, #0x28
+ bne loop_7_1
+
+
+ mov r10, r7
+ and r10, r10, #0xff
+ /* Write the offset1 to be read from the EEPROM */
+ MV_REG_WRITE_ASM (r10, r1, TWSI_DATA_REG(I2C_CH))
+
+
+
+after_offset:
+
+ /* Clear the interrupt flag ==> signaling that the address can now
+ be transmited */
+
+ bl _twsiDelay
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bic r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bl _twsiDelay
+
+ /* Wait for the interrupt to be set again ==> address has transmited */
+loop_6:
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+ tst r6, #BIT2
+#else
+ MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+ tst r6, #BIT5
+#endif
+ beq loop_6
+
+ /* Wait for status TWSI_MAS_TRAN_DATA_BYTE_ACK_REC = 0x28 */
+loop_7:
+ MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+ cmp r6, #0x28
+ bne loop_7
+
+ /* Retransmit the device address with read indication to get the data */
+
+ /* generate a repeated start bit */
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ orr r6, r6, #TWSI_CONTROL_START_BIT
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+
+
+ /* Clear the interrupt flag ==> the start bit will be transmitted. */
+ bl _twsiDelay
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bic r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bl _twsiDelay
+
+ /* Wait for the interrupt flag (bit3) to be set */
+loop_9:
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+ tst r6, #BIT2
+#else
+ MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+ tst r6, #BIT5
+#endif
+ beq loop_9
+
+ /* Wait for the start bit to be reset by HW */
+loop_8:
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ tst r6, #TWSI_CONTROL_START_BIT
+ bne loop_8
+
+ /* Wait for status TWSI_REPEATED_START_CONDITION_TRA = 0x10 */
+loop_10:
+ MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+ cmp r6, #0x10
+ bne loop_10
+
+ /* Writing the address of (DIMM0<<1) with read indication (bit0 is 1) */
+ mov r6, r4, LSL #1
+ orr r6, r6, #1 /* Read operation address bit 0 must be 1 */
+ MV_REG_WRITE_ASM (r6, r1, TWSI_DATA_REG(I2C_CH))
+
+ /* Clear the interrupt flag ==> the address will be transmitted */
+ bl _twsiDelay
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bic r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bl _twsiDelay
+
+ /* Wait for the interrupt flag (bit3) to be set as a result of
+ transmitting the address. */
+loop_11:
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+ tst r6, #BIT2
+#else
+ MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+ tst r6, #BIT5
+#endif
+ beq loop_11
+
+ /* Wait for status TWSI_ADDR_PLUS_READ_BIT_TRA_ACK_REC = 0x40 */
+loop_12:
+ MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+ cmp r6, #0x40
+ bne loop_12
+
+ /* Clear the interrupt flag and the Acknoledge bit */
+ bl _twsiDelay
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bic r6, r6, #(TWSI_CONTROL_INT_FLAG_SET | TWSI_CONTROL_ACK)
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bl _twsiDelay
+
+ /* Wait for the interrupt flag (bit3) to be set */
+loop_14:
+#ifdef MV78XX0
+ MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+ tst r6, #BIT2
+#else
+ MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+ tst r6, #BIT5
+#endif
+ beq loop_14
+
+ /* Wait for status TWSI_MAS_REC_READ_DATA_ACK_NOT_TRA = 0x58 */
+loop_15:
+ MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+ cmp r6, #0x58
+ bne loop_15
+
+ /* Store the data in r7. */
+ MV_REG_READ_ASM (r7, r1, TWSI_DATA_REG(I2C_CH))
+
+ /* Generate stop bit */
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ orr r6, r6, #TWSI_CONTROL_STOP_BIT
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+
+
+ /* Clear the interrupt flag */
+ bl _twsiDelay
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bic r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+ MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ bl _twsiDelay
+
+ /* Wait for the stop bit to be reset by HW */
+loop_16:
+ MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+ tst r6, #TWSI_CONTROL_INT_FLAG_SET
+ bne loop_16
+
+exit_i2cRead:
+ mov PC, r9 /* r9 is saved link register */
diff --git a/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h
new file mode 100644
index 000000000..d0c2b9e7b
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h
@@ -0,0 +1,160 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/****************************************/
+/* TWSI Registers */
+/****************************************/
+#ifndef __INCmvTwsiSpech
+#define __INCmvTwsiSpech
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines */
+#define TWSI_SLAVE_ADDR_REG(chanNum) (TWSI_SLAVE_BASE(chanNum)+ 0x00)
+
+#define TWSI_SLAVE_ADDR_GCE_ENA BIT0
+#define TWSI_SLAVE_ADDR_7BIT_OFFS 0x1
+#define TWSI_SLAVE_ADDR_7BIT_MASK (0xFF << TWSI_SLAVE_ADDR_7BIT_OFFS)
+#define TWSI_SLAVE_ADDR_10BIT_OFFS 0x7
+#define TWSI_SLAVE_ADDR_10BIT_MASK 0x300
+#define TWSI_SLAVE_ADDR_10BIT_CONST 0xF0
+
+
+#define TWSI_EXTENDED_SLAVE_ADDR_REG(chanNum) (TWSI_SLAVE_BASE(chanNum) + 0x10)
+#define TWSI_EXTENDED_SLAVE_OFFS 0
+#define TWSI_EXTENDED_SLAVE_MASK (0xFF << TWSI_EXTENDED_SLAVE_OFFS)
+
+
+#define TWSI_DATA_REG(chanNum) (TWSI_SLAVE_BASE(chanNum) + 0x04)
+#define TWSI_DATA_COMMAND_OFFS 0x0
+#define TWSI_DATA_COMMAND_MASK (0x1 << TWSI_DATA_COMMAND_OFFS)
+#define TWSI_DATA_COMMAND_WR (0x1 << TWSI_DATA_COMMAND_OFFS)
+#define TWSI_DATA_COMMAND_RD (0x0 << TWSI_DATA_COMMAND_OFFS)
+#define TWSI_DATA_ADDR_7BIT_OFFS 0x1
+#define TWSI_DATA_ADDR_7BIT_MASK (0xFF << TWSI_DATA_ADDR_7BIT_OFFS)
+#define TWSI_DATA_ADDR_10BIT_OFFS 0x7
+#define TWSI_DATA_ADDR_10BIT_MASK 0x300
+#define TWSI_DATA_ADDR_10BIT_CONST 0xF0
+
+
+#define TWSI_CONTROL_REG(chanNum) (TWSI_SLAVE_BASE(chanNum) + 0x08)
+#define TWSI_CONTROL_ACK BIT2
+#define TWSI_CONTROL_INT_FLAG_SET BIT3
+#define TWSI_CONTROL_STOP_BIT BIT4
+#define TWSI_CONTROL_START_BIT BIT5
+#define TWSI_CONTROL_ENA BIT6
+#define TWSI_CONTROL_INT_ENA BIT7
+
+
+#define TWSI_STATUS_BAUDE_RATE_REG(chanNum) (TWSI_SLAVE_BASE(chanNum) + 0x0c)
+#define TWSI_BAUD_RATE_N_OFFS 0
+#define TWSI_BAUD_RATE_N_MASK (0x7 << TWSI_BAUD_RATE_N_OFFS)
+#define TWSI_BAUD_RATE_M_OFFS 3
+#define TWSI_BAUD_RATE_M_MASK (0xF << TWSI_BAUD_RATE_M_OFFS)
+
+#define TWSI_SOFT_RESET_REG(chanNum) (TWSI_SLAVE_BASE(chanNum) + 0x1c)
+
+/* defines */
+#define TWSI_TIMEOUT_VALUE 0x500
+
+/* TWSI status codes */
+#define TWSI_BUS_ERROR 0x00
+#define TWSI_START_CON_TRA 0x08
+#define TWSI_REPEATED_START_CON_TRA 0x10
+#define TWSI_AD_PLS_WR_BIT_TRA_ACK_REC 0x18
+#define TWSI_AD_PLS_WR_BIT_TRA_ACK_NOT_REC 0x20
+#define TWSI_M_TRAN_DATA_BYTE_ACK_REC 0x28
+#define TWSI_M_TRAN_DATA_BYTE_ACK_NOT_REC 0x30
+#define TWSI_M_LOST_ARB_DUR_AD_OR_DATA_TRA 0x38
+#define TWSI_AD_PLS_RD_BIT_TRA_ACK_REC 0x40
+#define TWSI_AD_PLS_RD_BIT_TRA_ACK_NOT_REC 0x48
+#define TWSI_M_REC_RD_DATA_ACK_TRA 0x50
+#define TWSI_M_REC_RD_DATA_ACK_NOT_TRA 0x58
+#define TWSI_SLA_REC_AD_PLS_WR_BIT_ACK_TRA 0x60
+#define TWSI_M_LOST_ARB_DUR_AD_TRA_AD_IS_TRGT_TO_SLA_ACK_TRA_W 0x68
+#define TWSI_GNL_CALL_REC_ACK_TRA 0x70
+#define TWSI_M_LOST_ARB_DUR_AD_TRA_GNL_CALL_AD_REC_ACK_TRA 0x78
+#define TWSI_SLA_REC_WR_DATA_AF_REC_SLA_AD_ACK_TRAN 0x80
+#define TWSI_SLA_REC_WR_DATA_AF_REC_SLA_AD_ACK_NOT_TRAN 0x88
+#define TWSI_SLA_REC_WR_DATA_AF_REC_GNL_CALL_ACK_TRAN 0x90
+#define TWSI_SLA_REC_WR_DATA_AF_REC_GNL_CALL_ACK_NOT_TRAN 0x98
+#define TWSI_SLA_REC_STOP_OR_REPEATED_STRT_CON 0xA0
+#define TWSI_SLA_REC_AD_PLS_RD_BIT_ACK_TRA 0xA8
+#define TWSI_M_LOST_ARB_DUR_AD_TRA_AD_IS_TRGT_TO_SLA_ACK_TRA_R 0xB0
+#define TWSI_SLA_TRA_RD_DATA_ACK_REC 0xB8
+#define TWSI_SLA_TRA_RD_DATA_ACK_NOT_REC 0xC0
+#define TWSI_SLA_TRA_LAST_RD_DATA_ACK_REC 0xC8
+#define TWSI_SEC_AD_PLS_WR_BIT_TRA_ACK_REC 0xD0
+#define TWSI_SEC_AD_PLS_WR_BIT_TRA_ACK_NOT_REC 0xD8
+#define TWSI_SEC_AD_PLS_RD_BIT_TRA_ACK_REC 0xE0
+#define TWSI_SEC_AD_PLS_RD_BIT_TRA_ACK_NOT_REC 0xE8
+#define TWSI_NO_REL_STS_INT_FLAG_IS_KEPT_0 0xF8
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvTwsiSpech */
diff --git a/target/linux/generic/files/crypto/ocf/ocf-bench.c b/target/linux/generic/files/crypto/ocf/ocf-bench.c
new file mode 100644
index 000000000..f3fe9d0e9
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ocf-bench.c
@@ -0,0 +1,514 @@
+/*
+ * A loadable module that benchmarks the OCF crypto speed from kernel space.
+ *
+ * Copyright (C) 2004-2010 David McCullough <david_mccullough@mcafee.com>
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ * 1. distributions of this source code include the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ *
+ * 2. distributions in binary form include the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other associated materials;
+ *
+ * 3. the copyright holder's name is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <cryptodev.h>
+
+#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
+#define BENCH_IXP_ACCESS_LIB 1
+#endif
+#ifdef BENCH_IXP_ACCESS_LIB
+#include <IxTypes.h>
+#include <IxOsBuffMgt.h>
+#include <IxNpeDl.h>
+#include <IxCryptoAcc.h>
+#include <IxQMgr.h>
+#include <IxOsServices.h>
+#include <IxOsCacheMMU.h>
+#endif
+
+/*
+ * support for access lib version 1.4
+ */
+#ifndef IX_MBUF_PRIV
+#define IX_MBUF_PRIV(x) ((x)->priv)
+#endif
+
+/*
+ * the number of simultaneously active requests
+ */
+static int request_q_len = 40;
+module_param(request_q_len, int, 0);
+MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
+
+/*
+ * how many requests we want to have processed
+ */
+static int request_num = 1024;
+module_param(request_num, int, 0);
+MODULE_PARM_DESC(request_num, "run for at least this many requests");
+
+/*
+ * the size of each request
+ */
+static int request_size = 1488;
+module_param(request_size, int, 0);
+MODULE_PARM_DESC(request_size, "size of each request");
+
+/*
+ * OCF batching of requests
+ */
+static int request_batch = 1;
+module_param(request_batch, int, 0);
+MODULE_PARM_DESC(request_batch, "enable OCF request batching");
+
+/*
+ * OCF immediate callback on completion
+ */
+static int request_cbimm = 1;
+module_param(request_cbimm, int, 0);
+MODULE_PARM_DESC(request_cbimm, "enable OCF immediate callback on completion");
+
+/*
+ * a structure for each request
+ */
+typedef struct {
+ struct work_struct work;
+#ifdef BENCH_IXP_ACCESS_LIB
+ IX_MBUF mbuf;
+#endif
+ unsigned char *buffer;
+} request_t;
+
+static request_t *requests;
+
+static spinlock_t ocfbench_counter_lock;
+static int outstanding;
+static int total;
+
+/*************************************************************************/
+/*
+ * OCF benchmark routines
+ */
+
+static uint64_t ocf_cryptoid;
+static unsigned long jstart, jstop;
+
+static int ocf_init(void);
+static int ocf_cb(struct cryptop *crp);
+static void ocf_request(void *arg);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void ocf_request_wq(struct work_struct *work);
+#endif
+
+static int
+ocf_init(void)
+{
+ int error;
+ struct cryptoini crie, cria;
+ struct cryptodesc crda, crde;
+
+ memset(&crie, 0, sizeof(crie));
+ memset(&cria, 0, sizeof(cria));
+ memset(&crde, 0, sizeof(crde));
+ memset(&crda, 0, sizeof(crda));
+
+ cria.cri_alg = CRYPTO_SHA1_HMAC;
+ cria.cri_klen = 20 * 8;
+ cria.cri_key = "0123456789abcdefghij";
+
+ //crie.cri_alg = CRYPTO_3DES_CBC;
+ crie.cri_alg = CRYPTO_AES_CBC;
+ crie.cri_klen = 24 * 8;
+ crie.cri_key = "0123456789abcdefghijklmn";
+
+ crie.cri_next = &cria;
+
+ error = crypto_newsession(&ocf_cryptoid, &crie,
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
+ if (error) {
+ printk("crypto_newsession failed %d\n", error);
+ return -1;
+ }
+ return 0;
+}
+
+static int
+ocf_cb(struct cryptop *crp)
+{
+ request_t *r = (request_t *) crp->crp_opaque;
+ unsigned long flags;
+
+ if (crp->crp_etype)
+ printk("Error in OCF processing: %d\n", crp->crp_etype);
+ crypto_freereq(crp);
+ crp = NULL;
+
+ /* do all requests but take at least 1 second */
+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
+ total++;
+ if (total > request_num && jstart + HZ < jiffies) {
+ outstanding--;
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+
+ schedule_work(&r->work);
+ return 0;
+}
+
+
+static void
+ocf_request(void *arg)
+{
+ request_t *r = arg;
+ struct cryptop *crp = crypto_getreq(2);
+ struct cryptodesc *crde, *crda;
+ unsigned long flags;
+
+ if (!crp) {
+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
+ outstanding--;
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+ return;
+ }
+
+ crde = crp->crp_desc;
+ crda = crde->crd_next;
+
+ crda->crd_skip = 0;
+ crda->crd_flags = 0;
+ crda->crd_len = request_size;
+ crda->crd_inject = request_size;
+ crda->crd_alg = CRYPTO_SHA1_HMAC;
+ crda->crd_key = "0123456789abcdefghij";
+ crda->crd_klen = 20 * 8;
+
+ crde->crd_skip = 0;
+ crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
+ crde->crd_len = request_size;
+ crde->crd_inject = request_size;
+ //crde->crd_alg = CRYPTO_3DES_CBC;
+ crde->crd_alg = CRYPTO_AES_CBC;
+ crde->crd_key = "0123456789abcdefghijklmn";
+ crde->crd_klen = 24 * 8;
+
+ crp->crp_ilen = request_size + 64;
+ crp->crp_flags = 0;
+ if (request_batch)
+ crp->crp_flags |= CRYPTO_F_BATCH;
+ if (request_cbimm)
+ crp->crp_flags |= CRYPTO_F_CBIMM;
+ crp->crp_buf = (caddr_t) r->buffer;
+ crp->crp_callback = ocf_cb;
+ crp->crp_sid = ocf_cryptoid;
+ crp->crp_opaque = (caddr_t) r;
+ crypto_dispatch(crp);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+ocf_request_wq(struct work_struct *work)
+{
+ request_t *r = container_of(work, request_t, work);
+ ocf_request(r);
+}
+#endif
+
+static void
+ocf_done(void)
+{
+ crypto_freesession(ocf_cryptoid);
+}
+
+/*************************************************************************/
+#ifdef BENCH_IXP_ACCESS_LIB
+/*************************************************************************/
+/*
+ * CryptoAcc benchmark routines
+ */
+
+static IxCryptoAccCtx ixp_ctx;
+static UINT32 ixp_ctx_id;
+static IX_MBUF ixp_pri;
+static IX_MBUF ixp_sec;
+static int ixp_registered = 0;
+
+static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
+ IxCryptoAccStatus status);
+static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
+ IxCryptoAccStatus status);
+static void ixp_request(void *arg);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void ixp_request_wq(struct work_struct *work);
+#endif
+
+static int
+ixp_init(void)
+{
+ IxCryptoAccStatus status;
+
+ ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
+ ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
+ ixp_ctx.cipherCtx.cipherKeyLen = 24;
+ ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
+ ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
+ memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
+
+ ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
+ ixp_ctx.authCtx.authDigestLen = 12;
+ ixp_ctx.authCtx.aadLen = 0;
+ ixp_ctx.authCtx.authKeyLen = 20;
+ memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
+
+ ixp_ctx.useDifferentSrcAndDestMbufs = 0;
+ ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
+
+ IX_MBUF_MLEN(&ixp_pri) = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
+ IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
+ IX_MBUF_MLEN(&ixp_sec) = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
+ IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
+
+ status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
+ ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
+
+ if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
+ while (!ixp_registered)
+ schedule();
+ return ixp_registered < 0 ? -1 : 0;
+ }
+
+ printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
+ return -1;
+}
+
+static void
+ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
+{
+ if (bufp) {
+ IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
+ kfree(IX_MBUF_MDATA(bufp));
+ IX_MBUF_MDATA(bufp) = NULL;
+ }
+
+ if (IX_CRYPTO_ACC_STATUS_WAIT == status)
+ return;
+ if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
+ ixp_registered = 1;
+ else
+ ixp_registered = -1;
+}
+
+static void
+ixp_perform_cb(
+ UINT32 ctx_id,
+ IX_MBUF *sbufp,
+ IX_MBUF *dbufp,
+ IxCryptoAccStatus status)
+{
+ request_t *r = NULL;
+ unsigned long flags;
+
+ /* do all requests but take at least 1 second */
+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
+ total++;
+ if (total > request_num && jstart + HZ < jiffies) {
+ outstanding--;
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+ return;
+ }
+
+ if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
+ printk("crappo %p %p\n", sbufp, r);
+ outstanding--;
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+
+ schedule_work(&r->work);
+}
+
+static void
+ixp_request(void *arg)
+{
+ request_t *r = arg;
+ IxCryptoAccStatus status;
+ unsigned long flags;
+
+ memset(&r->mbuf, 0, sizeof(r->mbuf));
+ IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
+ IX_MBUF_MDATA(&r->mbuf) = r->buffer;
+ IX_MBUF_PRIV(&r->mbuf) = r;
+ status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
+ 0, request_size, 0, request_size, request_size, r->buffer);
+ if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
+ printk("status1 = %d\n", status);
+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
+ outstanding--;
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+ return;
+ }
+ return;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+ixp_request_wq(struct work_struct *work)
+{
+ request_t *r = container_of(work, request_t, work);
+ ixp_request(r);
+}
+#endif
+
+static void
+ixp_done(void)
+{
+ /* we should free the session here but I am lazy :-) */
+}
+
+/*************************************************************************/
+#endif /* BENCH_IXP_ACCESS_LIB */
+/*************************************************************************/
+
+int
+ocfbench_init(void)
+{
+ int i;
+ unsigned long mbps;
+ unsigned long flags;
+
+ printk("Crypto Speed tests\n");
+
+ requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
+ if (!requests) {
+ printk("malloc failed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < request_q_len; i++) {
+ /* +64 for return data */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ INIT_WORK(&requests[i].work, ocf_request_wq);
+#else
+ INIT_WORK(&requests[i].work, ocf_request, &requests[i]);
+#endif
+ requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
+ if (!requests[i].buffer) {
+ printk("malloc failed\n");
+ return -EINVAL;
+ }
+ memset(requests[i].buffer, '0' + i, request_size + 128);
+ }
+
+ /*
+ * OCF benchmark
+ */
+ printk("OCF: testing ...\n");
+ if (ocf_init() == -1)
+ return -EINVAL;
+
+ spin_lock_init(&ocfbench_counter_lock);
+ total = outstanding = 0;
+ jstart = jiffies;
+ for (i = 0; i < request_q_len; i++) {
+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
+ outstanding++;
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+ ocf_request(&requests[i]);
+ }
+ while (outstanding > 0)
+ schedule();
+ jstop = jiffies;
+
+ mbps = 0;
+ if (jstop > jstart) {
+ mbps = (unsigned long) total * (unsigned long) request_size * 8;
+ mbps /= ((jstop - jstart) * 1000) / HZ;
+ }
+ printk("OCF: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
+ total, request_size, (int)(jstop - jstart),
+ ((int)mbps) / 1000, ((int)mbps) % 1000);
+ ocf_done();
+
+#ifdef BENCH_IXP_ACCESS_LIB
+ /*
+ * IXP benchmark
+ */
+ printk("IXP: testing ...\n");
+ ixp_init();
+ total = outstanding = 0;
+ jstart = jiffies;
+ for (i = 0; i < request_q_len; i++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ INIT_WORK(&requests[i].work, ixp_request_wq);
+#else
+ INIT_WORK(&requests[i].work, ixp_request, &requests[i]);
+#endif
+ spin_lock_irqsave(&ocfbench_counter_lock, flags);
+ outstanding++;
+ spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+ ixp_request(&requests[i]);
+ }
+ while (outstanding > 0)
+ schedule();
+ jstop = jiffies;
+
+ mbps = 0;
+ if (jstop > jstart) {
+ mbps = (unsigned long) total * (unsigned long) request_size * 8;
+ mbps /= ((jstop - jstart) * 1000) / HZ;
+ }
+ printk("IXP: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
+ total, request_size, jstop - jstart,
+ ((int)mbps) / 1000, ((int)mbps) % 1000);
+ ixp_done();
+#endif /* BENCH_IXP_ACCESS_LIB */
+
+ for (i = 0; i < request_q_len; i++)
+ kfree(requests[i].buffer);
+ kfree(requests);
+ return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
+}
+
+static void __exit ocfbench_exit(void)
+{
+}
+
+module_init(ocfbench_init);
+module_exit(ocfbench_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
diff --git a/target/linux/generic/files/crypto/ocf/ocf-compat.h b/target/linux/generic/files/crypto/ocf/ocf-compat.h
new file mode 100644
index 000000000..4ad12232b
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ocf-compat.h
@@ -0,0 +1,372 @@
+#ifndef _BSD_COMPAT_H_
+#define _BSD_COMPAT_H_ 1
+/****************************************************************************/
+/*
+ * Provide compat routines for older linux kernels and BSD kernels
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2010 David McCullough <david_mccullough@mcafee.com>
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ * 1. distributions of this source code include the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ *
+ * 2. distributions in binary form include the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other associated materials;
+ *
+ * 3. the copyright holder's name is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this file
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+/****************************************************************************/
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+
+/*
+ * fake some BSD driver interface stuff specifically for OCF use
+ */
+
+typedef struct ocf_device *device_t;
+
+typedef struct {
+ int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
+ int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
+ int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
+ int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
+} device_method_t;
+#define DEVMETHOD(id, func) id: func
+
+struct ocf_device {
+ char name[32]; /* the driver name */
+ char nameunit[32]; /* the driver name + HW instance */
+ int unit;
+ device_method_t methods;
+ void *softc;
+};
+
+#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
+ ((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
+#define CRYPTODEV_FREESESSION(dev, sid) \
+ ((*(dev)->methods.cryptodev_freesession)(dev, sid))
+#define CRYPTODEV_PROCESS(dev, crp, hint) \
+ ((*(dev)->methods.cryptodev_process)(dev, crp, hint))
+#define CRYPTODEV_KPROCESS(dev, krp, hint) \
+ ((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
+
+#define device_get_name(dev) ((dev)->name)
+#define device_get_nameunit(dev) ((dev)->nameunit)
+#define device_get_unit(dev) ((dev)->unit)
+#define device_get_softc(dev) ((dev)->softc)
+
+#define softc_device_decl \
+ struct ocf_device _device; \
+ device_t
+
+#define softc_device_init(_sc, _name, _unit, _methods) \
+ if (1) {\
+ strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
+ snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
+ (_sc)->_device.unit = _unit; \
+ (_sc)->_device.methods = _methods; \
+ (_sc)->_device.softc = (void *) _sc; \
+ *(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
+ } else
+
+#define softc_get_device(_sc) (&(_sc)->_device)
+
+/*
+ * iomem support for 2.4 and 2.6 kernels
+ */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define ocf_iomem_t unsigned long
+
+/*
+ * implement simple workqueue like support for older kernels
+ */
+
+#include <linux/tqueue.h>
+
+#define work_struct tq_struct
+
+#define INIT_WORK(wp, fp, ap) \
+ do { \
+ (wp)->sync = 0; \
+ (wp)->routine = (fp); \
+ (wp)->data = (ap); \
+ } while (0)
+
+#define schedule_work(wp) \
+ do { \
+ queue_task((wp), &tq_immediate); \
+ mark_bh(IMMEDIATE_BH); \
+ } while (0)
+
+#define flush_scheduled_work() run_task_queue(&tq_immediate)
+
+#else
+#define ocf_iomem_t void __iomem *
+
+#include <linux/workqueue.h>
+
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/fdtable.h>
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+#define files_fdtable(files) (files)
+#endif
+
+#ifdef MODULE_PARM
+#undef module_param /* just in case */
+#define module_param(a,b,c) MODULE_PARM(a,"i")
+#endif
+
+#define bzero(s,l) memset(s,0,l)
+#define bcopy(s,d,l) memcpy(d,s,l)
+#define bcmp(x, y, l) memcmp(x,y,l)
+
+#define MIN(x,y) ((x) < (y) ? (x) : (y))
+
+#define device_printf(dev, a...) ({ \
+ printk("%s: ", device_get_nameunit(dev)); printk(a); \
+ })
+
+#undef printf
+#define printf(fmt...) printk(fmt)
+
+#define KASSERT(c,p) if (!(c)) { printk p ; } else
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define ocf_daemonize(str) \
+ daemonize(); \
+ spin_lock_irq(&current->sigmask_lock); \
+ sigemptyset(&current->blocked); \
+ recalc_sigpending(current); \
+ spin_unlock_irq(&current->sigmask_lock); \
+ sprintf(current->comm, str);
+#else
+#define ocf_daemonize(str) daemonize(str);
+#endif
+
+#define TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
+#define TAILQ_EMPTY(q) list_empty(q)
+#define TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
+
+#define read_random(p,l) get_random_bytes(p,l)
+
+#define DELAY(x) ((x) > 2000 ? mdelay((x)/1000) : udelay(x))
+#define strtoul simple_strtoul
+
+#define pci_get_vendor(dev) ((dev)->vendor)
+#define pci_get_device(dev) ((dev)->device)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define pci_set_consistent_dma_mask(dev, mask) (0)
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+#define pci_dma_sync_single_for_cpu pci_dma_sync_single
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#endif
+
+#ifndef htole32
+#define htole32(x) cpu_to_le32(x)
+#endif
+#ifndef htobe32
+#define htobe32(x) cpu_to_be32(x)
+#endif
+#ifndef htole16
+#define htole16(x) cpu_to_le16(x)
+#endif
+#ifndef htobe16
+#define htobe16(x) cpu_to_be16(x)
+#endif
+
+/* older kernels don't have these */
+
+#include <asm/irq.h>
+#if !defined(IRQ_NONE) && !defined(IRQ_RETVAL)
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_WAKE_THREAD
+#define IRQ_RETVAL
+#define irqreturn_t void
+typedef irqreturn_t (*irq_handler_t)(int irq, void *arg, struct pt_regs *regs);
+#endif
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+# define strlcpy(dest,src,len) \
+ ({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
+#endif
+
+#ifndef MAX_ERRNO
+#define MAX_ERRNO 4095
+#endif
+#ifndef IS_ERR_VALUE
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,5)
+#include <linux/err.h>
+#endif
+#ifndef IS_ERR_VALUE
+#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
+#endif
+#endif
+
+/*
+ * common debug for all
+ */
+#if 1
+#define dprintk(a...) do { if (debug) printk(a); } while(0)
+#else
+#define dprintk(a...)
+#endif
+
+#ifndef SLAB_ATOMIC
+/* Changed in 2.6.20, must use GFP_ATOMIC now */
+#define SLAB_ATOMIC GFP_ATOMIC
+#endif
+
+/*
+ * need some additional support for older kernels */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
+#define pci_register_driver_compat(driver, rc) \
+ do { \
+ if ((rc) > 0) { \
+ (rc) = 0; \
+ } else if (rc == 0) { \
+ (rc) = -ENODEV; \
+ } else { \
+ pci_unregister_driver(driver); \
+ } \
+ } while (0)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
+#else
+#define pci_register_driver_compat(driver,rc)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+
+#include <linux/mm.h>
+#include <asm/scatterlist.h>
+
+static inline void sg_set_page(struct scatterlist *sg, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ sg->page = page;
+ sg->offset = offset;
+ sg->length = len;
+}
+
+static inline void *sg_virt(struct scatterlist *sg)
+{
+ return page_address(sg->page) + sg->offset;
+}
+
+#define sg_init_table(sg, n)
+
+#define sg_mark_end(sg)
+
+#endif
+
+#ifndef late_initcall
+#define late_initcall(init) module_init(init)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) || !defined(CONFIG_SMP)
+#define ocf_for_each_cpu(cpu) for ((cpu) = 0; (cpu) == 0; (cpu)++)
+#else
+#define ocf_for_each_cpu(cpu) for_each_present_cpu(cpu)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#include <linux/sched.h>
+#define kill_proc(p,s,v) send_sig(s,find_task_by_vpid(p),0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
+
+struct ocf_thread {
+ struct task_struct *task;
+ int (*func)(void *arg);
+ void *arg;
+};
+
+/* thread startup helper func */
+static inline int ocf_run_thread(void *arg)
+{
+ struct ocf_thread *t = (struct ocf_thread *) arg;
+ if (!t)
+ return -1; /* very bad */
+ t->task = current;
+ daemonize();
+ spin_lock_irq(&current->sigmask_lock);
+ sigemptyset(&current->blocked);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ return (*t->func)(t->arg);
+}
+
+#define kthread_create(f,a,fmt...) \
+ ({ \
+ struct ocf_thread t; \
+ pid_t p; \
+ t.task = NULL; \
+ t.func = (f); \
+ t.arg = (a); \
+ p = kernel_thread(ocf_run_thread, &t, CLONE_FS|CLONE_FILES); \
+ while (p != (pid_t) -1 && t.task == NULL) \
+ schedule(); \
+ if (t.task) \
+ snprintf(t.task->comm, sizeof(t.task->comm), fmt); \
+ (t.task); \
+ })
+
+#define kthread_bind(t,cpu) /**/
+
+#define kthread_should_stop() (strcmp(current->comm, "stopping") == 0)
+
+#define kthread_stop(t) \
+ ({ \
+ strcpy((t)->comm, "stopping"); \
+ kill_proc((t)->pid, SIGTERM, 1); \
+ do { \
+ schedule(); \
+ } while (kill_proc((t)->pid, SIGTERM, 1) == 0); \
+ })
+
+#else
+#include <linux/kthread.h>
+#endif
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
+#define skb_frag_page(x) ((x)->page)
+#endif
+
+#endif /* __KERNEL__ */
+
+/****************************************************************************/
+#endif /* _BSD_COMPAT_H_ */
diff --git a/target/linux/generic/files/crypto/ocf/ocfnull/Makefile b/target/linux/generic/files/crypto/ocf/ocfnull/Makefile
new file mode 100644
index 000000000..044bcacb7
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ocfnull/Makefile
@@ -0,0 +1,12 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/..
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/ocfnull/ocfnull.c b/target/linux/generic/files/crypto/ocf/ocfnull/ocfnull.c
new file mode 100644
index 000000000..9cf3f6e02
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ocfnull/ocfnull.c
@@ -0,0 +1,204 @@
+/*
+ * An OCF module for determining the cost of crypto versus the cost of
+ * IPSec processing outside of OCF. This modules gives us the effect of
+ * zero cost encryption, of course you will need to run it at both ends
+ * since it does no crypto at all.
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ * 1. distributions of this source code include the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ *
+ * 2. distributions in binary form include the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other associated materials;
+ *
+ * 3. the copyright holder's name is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+
+static int32_t null_id = -1;
+static u_int32_t null_sesnum = 0;
+
+static int null_process(device_t, struct cryptop *, int);
+static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int null_freesession(device_t, u_int64_t);
+
+#define debug ocfnull_debug
+int ocfnull_debug = 0;
+module_param(ocfnull_debug, int, 0644);
+MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
+
+/*
+ * dummy device structure
+ */
+
+static struct {
+ softc_device_decl sc_dev;
+} nulldev;
+
+static device_method_t null_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, null_newsession),
+ DEVMETHOD(cryptodev_freesession,null_freesession),
+ DEVMETHOD(cryptodev_process, null_process),
+};
+
+/*
+ * Generate a new software session.
+ */
+static int
+null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
+{
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid == NULL || cri == NULL) {
+ dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ if (null_sesnum == 0)
+ null_sesnum++;
+ *sid = null_sesnum++;
+ return 0;
+}
+
+
+/*
+ * Free a session.
+ */
+static int
+null_freesession(device_t arg, u_int64_t tid)
+{
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (sid > null_sesnum) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ /* Silently accept and return */
+ if (sid == 0)
+ return 0;
+ return 0;
+}
+
+
+/*
+ * Process a request.
+ */
+static int
+null_process(device_t arg, struct cryptop *crp, int hint)
+{
+ unsigned int lid;
+
+ dprintk("%s()\n", __FUNCTION__);
+
+ /* Sanity check */
+ if (crp == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ crp->crp_etype = 0;
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ /*
+ * find the session we are using
+ */
+
+ lid = crp->crp_sid & 0xffffffff;
+ if (lid >= null_sesnum || lid == 0) {
+ crp->crp_etype = ENOENT;
+ dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+done:
+ crypto_done(crp);
+ return 0;
+}
+
+
+/*
+ * our driver startup and shutdown routines
+ */
+
+static int
+null_init(void)
+{
+ dprintk("%s(%p)\n", __FUNCTION__, null_init);
+
+ memset(&nulldev, 0, sizeof(nulldev));
+ softc_device_init(&nulldev, "ocfnull", 0, null_methods);
+
+ null_id = crypto_get_driverid(softc_get_device(&nulldev),
+ CRYPTOCAP_F_HARDWARE);
+ if (null_id < 0)
+ panic("ocfnull: crypto device cannot initialize!");
+
+#define REGISTER(alg) \
+ crypto_register(null_id,alg,0,0)
+ REGISTER(CRYPTO_DES_CBC);
+ REGISTER(CRYPTO_3DES_CBC);
+ REGISTER(CRYPTO_RIJNDAEL128_CBC);
+ REGISTER(CRYPTO_MD5);
+ REGISTER(CRYPTO_SHA1);
+ REGISTER(CRYPTO_MD5_HMAC);
+ REGISTER(CRYPTO_SHA1_HMAC);
+#undef REGISTER
+
+ return 0;
+}
+
+static void
+null_exit(void)
+{
+ dprintk("%s()\n", __FUNCTION__);
+ crypto_unregister_all(null_id);
+ null_id = -1;
+}
+
+module_init(null_init);
+module_exit(null_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
diff --git a/target/linux/generic/files/crypto/ocf/pasemi/Makefile b/target/linux/generic/files/crypto/ocf/pasemi/Makefile
new file mode 100644
index 000000000..b0a3980f2
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/pasemi/Makefile
@@ -0,0 +1,12 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_PASEMI) += pasemi.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/pasemi/pasemi.c b/target/linux/generic/files/crypto/ocf/pasemi/pasemi.c
new file mode 100644
index 000000000..1b4333cdd
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/pasemi/pasemi.c
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (C) 2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient DMA Crypto Engine
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/scatterlist.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <cryptodev.h>
+#include <uio.h>
+#include "pasemi_fnu.h"
+
+#define DRV_NAME "pasemi"
+
+#define TIMER_INTERVAL 1000
+
+static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
+static struct pasdma_status volatile * dma_status;
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable debug");
+
+static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
+{
+ desc->postop = 0;
+ desc->quad[0] = hdr;
+ desc->quad_cnt = 1;
+ desc->size = 1;
+}
+
+static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
+{
+ desc->quad[desc->quad_cnt++] = val;
+ desc->size = (desc->quad_cnt + 1) / 2;
+}
+
+static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
+{
+ desc->quad[0] |= hdr;
+}
+
+static int pasemi_desc_size(struct pasemi_desc *desc)
+{
+ return desc->size;
+}
+
+static void pasemi_ring_add_desc(
+ struct pasemi_fnu_txring *ring,
+ struct pasemi_desc *desc,
+ struct cryptop *crp) {
+ int i;
+ int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
+
+ TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
+ TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
+ TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
+
+ for (i = 0; i < desc->quad_cnt; i += 2) {
+ ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
+ ring->desc[ring_index] = desc->quad[i];
+ ring->desc[ring_index + 1] = desc->quad[i + 1];
+ ring->next_to_fill++;
+ }
+
+ if (desc->quad_cnt & 1)
+ ring->desc[ring_index + 1] = 0;
+}
+
+static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
+{
+ out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
+ incr);
+}
+
+/*
+ * Generate a new software session.
+ */
+static int
+pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+ struct cryptoini *c, *encini = NULL, *macini = NULL;
+ struct pasemi_softc *sc = device_get_softc(dev);
+ struct pasemi_session *ses = NULL, **sespp;
+ int sesn, blksz = 0;
+ u64 ccmd = 0;
+ unsigned long flags;
+ struct pasemi_desc init_desc;
+ struct pasemi_fnu_txring *txring;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+ if (sidp == NULL || cri == NULL || sc == NULL) {
+ DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
+ return -EINVAL;
+ }
+ for (c = cri; c != NULL; c = c->cri_next) {
+ if (ALG_IS_SIG(c->cri_alg)) {
+ if (macini)
+ return -EINVAL;
+ macini = c;
+ } else if (ALG_IS_CIPHER(c->cri_alg)) {
+ if (encini)
+ return -EINVAL;
+ encini = c;
+ } else {
+ DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
+ return -EINVAL;
+ }
+ }
+ if (encini == NULL && macini == NULL)
+ return -EINVAL;
+ if (encini) {
+ /* validate key length */
+ switch (encini->cri_alg) {
+ case CRYPTO_DES_CBC:
+ if (encini->cri_klen != 64)
+ return -EINVAL;
+ ccmd = DMA_CALGO_DES;
+ break;
+ case CRYPTO_3DES_CBC:
+ if (encini->cri_klen != 192)
+ return -EINVAL;
+ ccmd = DMA_CALGO_3DES;
+ break;
+ case CRYPTO_AES_CBC:
+ if (encini->cri_klen != 128 &&
+ encini->cri_klen != 192 &&
+ encini->cri_klen != 256)
+ return -EINVAL;
+ ccmd = DMA_CALGO_AES;
+ break;
+ case CRYPTO_ARC4:
+ if (encini->cri_klen != 128)
+ return -EINVAL;
+ ccmd = DMA_CALGO_ARC;
+ break;
+ default:
+ DPRINTF("UNKNOWN encini->cri_alg %d\n",
+ encini->cri_alg);
+ return -EINVAL;
+ }
+ }
+
+ if (macini) {
+ switch (macini->cri_alg) {
+ case CRYPTO_MD5:
+ case CRYPTO_MD5_HMAC:
+ blksz = 16;
+ break;
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ blksz = 20;
+ break;
+ default:
+ DPRINTF("UNKNOWN macini->cri_alg %d\n",
+ macini->cri_alg);
+ return -EINVAL;
+ }
+ if (((macini->cri_klen + 7) / 8) > blksz) {
+ DPRINTF("key length %d bigger than blksize %d not supported\n",
+ ((macini->cri_klen + 7) / 8), blksz);
+ return -EINVAL;
+ }
+ }
+
+ for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+ if (sc->sc_sessions[sesn] == NULL) {
+ sc->sc_sessions[sesn] = (struct pasemi_session *)
+ kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
+ ses = sc->sc_sessions[sesn];
+ break;
+ } else if (sc->sc_sessions[sesn]->used == 0) {
+ ses = sc->sc_sessions[sesn];
+ break;
+ }
+ }
+
+ if (ses == NULL) {
+ sespp = (struct pasemi_session **)
+ kzalloc(sc->sc_nsessions * 2 *
+ sizeof(struct pasemi_session *), GFP_ATOMIC);
+ if (sespp == NULL)
+ return -ENOMEM;
+ memcpy(sespp, sc->sc_sessions,
+ sc->sc_nsessions * sizeof(struct pasemi_session *));
+ kfree(sc->sc_sessions);
+ sc->sc_sessions = sespp;
+ sesn = sc->sc_nsessions;
+ ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
+ kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
+ if (ses == NULL)
+ return -ENOMEM;
+ sc->sc_nsessions *= 2;
+ }
+
+ ses->used = 1;
+
+ ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
+ sizeof(struct pasemi_session), DMA_TO_DEVICE);
+
+ /* enter the channel scheduler */
+ spin_lock_irqsave(&sc->sc_chnlock, flags);
+
+ /* ARC4 has to be processed by the even channel */
+ if (encini && (encini->cri_alg == CRYPTO_ARC4))
+ ses->chan = sc->sc_lastchn & ~1;
+ else
+ ses->chan = sc->sc_lastchn;
+ sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
+
+ spin_unlock_irqrestore(&sc->sc_chnlock, flags);
+
+ txring = &sc->tx[ses->chan];
+
+ if (encini) {
+ ses->ccmd = ccmd;
+ ses->keysz = (encini->cri_klen - 63) / 64;
+ memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
+
+ pasemi_desc_start(&init_desc,
+ XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
+ pasemi_desc_build(&init_desc,
+ XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
+ }
+ if (macini) {
+ if (macini->cri_alg == CRYPTO_MD5_HMAC ||
+ macini->cri_alg == CRYPTO_SHA1_HMAC)
+ memcpy(ses->hkey, macini->cri_key, blksz);
+ else {
+ /* Load initialization constants(RFC 1321, 3174) */
+ ses->hiv[0] = 0x67452301efcdab89ULL;
+ ses->hiv[1] = 0x98badcfe10325476ULL;
+ ses->hiv[2] = 0xc3d2e1f000000000ULL;
+ }
+ ses->hseq = 0ULL;
+ }
+
+ spin_lock_irqsave(&txring->fill_lock, flags);
+
+ if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
+ txring->next_to_clean) > TX_RING_SIZE) {
+ spin_unlock_irqrestore(&txring->fill_lock, flags);
+ return ERESTART;
+ }
+
+ if (encini) {
+ pasemi_ring_add_desc(txring, &init_desc, NULL);
+ pasemi_ring_incr(sc, ses->chan,
+ pasemi_desc_size(&init_desc));
+ }
+
+ txring->sesn = sesn;
+ spin_unlock_irqrestore(&txring->fill_lock, flags);
+
+ *sidp = PASEMI_SID(sesn);
+ return 0;
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+pasemi_freesession(device_t dev, u_int64_t tid)
+{
+ struct pasemi_softc *sc = device_get_softc(dev);
+ int session;
+ u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (sc == NULL)
+ return -EINVAL;
+ session = PASEMI_SESSION(sid);
+ if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
+ return -EINVAL;
+
+ pci_unmap_single(sc->dma_pdev,
+ sc->sc_sessions[session]->dma_addr,
+ sizeof(struct pasemi_session), DMA_TO_DEVICE);
+ memset(sc->sc_sessions[session], 0,
+ sizeof(struct pasemi_session));
+
+ return 0;
+}
+
+static int
+pasemi_process(device_t dev, struct cryptop *crp, int hint)
+{
+
+ int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
+ struct pasemi_softc *sc = device_get_softc(dev);
+ struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+ caddr_t ivp;
+ struct pasemi_desc init_desc, work_desc;
+ struct pasemi_session *ses;
+ struct sk_buff *skb;
+ struct uio *uiop;
+ unsigned long flags;
+ struct pasemi_fnu_txring *txring;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
+ return -EINVAL;
+
+ crp->crp_etype = 0;
+ if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
+ return -EINVAL;
+
+ ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
+
+ crd1 = crp->crp_desc;
+ if (crd1 == NULL) {
+ err = -EINVAL;
+ goto errout;
+ }
+ crd2 = crd1->crd_next;
+
+ if (ALG_IS_SIG(crd1->crd_alg)) {
+ maccrd = crd1;
+ if (crd2 == NULL)
+ enccrd = NULL;
+ else if (ALG_IS_CIPHER(crd2->crd_alg) &&
+ (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
+ enccrd = crd2;
+ else
+ goto erralg;
+ } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
+ enccrd = crd1;
+ if (crd2 == NULL)
+ maccrd = NULL;
+ else if (ALG_IS_SIG(crd2->crd_alg) &&
+ (crd1->crd_flags & CRD_F_ENCRYPT))
+ maccrd = crd2;
+ else
+ goto erralg;
+ } else
+ goto erralg;
+
+ chsel = ses->chan;
+
+ txring = &sc->tx[chsel];
+
+ if (enccrd && !maccrd) {
+ if (enccrd->crd_alg == CRYPTO_ARC4)
+ reinit = 1;
+ reinit_size = 0x40;
+ srclen = crp->crp_ilen;
+
+ pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
+ | XCT_FUN_FUN(chsel));
+ if (enccrd->crd_flags & CRD_F_ENCRYPT)
+ pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
+ else
+ pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
+ } else if (enccrd && maccrd) {
+ if (enccrd->crd_alg == CRYPTO_ARC4)
+ reinit = 1;
+ reinit_size = 0x68;
+
+ if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+ /* Encrypt -> Authenticate */
+ pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
+ | XCT_FUN_A | XCT_FUN_FUN(chsel));
+ srclen = maccrd->crd_skip + maccrd->crd_len;
+ } else {
+ /* Authenticate -> Decrypt */
+ pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
+ | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
+ pasemi_desc_build(&work_desc, 0);
+ pasemi_desc_build(&work_desc, 0);
+ pasemi_desc_build(&work_desc, 0);
+ work_desc.postop = PASEMI_CHECK_SIG;
+ srclen = crp->crp_ilen;
+ }
+
+ pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
+ pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
+ } else if (!enccrd && maccrd) {
+ srclen = maccrd->crd_len;
+
+ pasemi_desc_start(&init_desc,
+ XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
+ pasemi_desc_build(&init_desc,
+ XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
+
+ pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
+ | XCT_FUN_A | XCT_FUN_FUN(chsel));
+ }
+
+ if (enccrd) {
+ switch (enccrd->crd_alg) {
+ case CRYPTO_3DES_CBC:
+ pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
+ XCT_FUN_BCM_CBC);
+ ivsize = sizeof(u64);
+ break;
+ case CRYPTO_DES_CBC:
+ pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
+ XCT_FUN_BCM_CBC);
+ ivsize = sizeof(u64);
+ break;
+ case CRYPTO_AES_CBC:
+ pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
+ XCT_FUN_BCM_CBC);
+ ivsize = 2 * sizeof(u64);
+ break;
+ case CRYPTO_ARC4:
+ pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
+ ivsize = 0;
+ break;
+ default:
+ printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
+ enccrd->crd_alg);
+ err = -EINVAL;
+ goto errout;
+ }
+
+ ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
+ if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ memcpy(ivp, enccrd->crd_iv, ivsize);
+ else
+ read_random(ivp, ivsize);
+ /* If IV is not present in the buffer already, it has to be copied there */
+ if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ enccrd->crd_inject, ivsize, ivp);
+ } else {
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ /* IV is provided expicitly in descriptor */
+ memcpy(ivp, enccrd->crd_iv, ivsize);
+ else
+ /* IV is provided in the packet */
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ enccrd->crd_inject, ivsize,
+ ivp);
+ }
+ }
+
+ if (maccrd) {
+ switch (maccrd->crd_alg) {
+ case CRYPTO_MD5:
+ pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
+ XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
+ break;
+ case CRYPTO_SHA1:
+ pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
+ XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
+ break;
+ case CRYPTO_MD5_HMAC:
+ pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
+ XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
+ break;
+ case CRYPTO_SHA1_HMAC:
+ pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
+ XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
+ break;
+ default:
+ printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
+ maccrd->crd_alg);
+ err = -EINVAL;
+ goto errout;
+ }
+ }
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ /* using SKB buffers */
+ skb = (struct sk_buff *)crp->crp_buf;
+ if (skb_shinfo(skb)->nr_frags) {
+ printk(DRV_NAME ": skb frags unimplemented\n");
+ err = -EINVAL;
+ goto errout;
+ }
+ pasemi_desc_build(
+ &work_desc,
+ XCT_FUN_DST_PTR(skb->len, pci_map_single(
+ sc->dma_pdev, skb->data,
+ skb->len, DMA_TO_DEVICE)));
+ pasemi_desc_build(
+ &work_desc,
+ XCT_FUN_SRC_PTR(
+ srclen, pci_map_single(
+ sc->dma_pdev, skb->data,
+ srclen, DMA_TO_DEVICE)));
+ pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ /* using IOV buffers */
+ uiop = (struct uio *)crp->crp_buf;
+ if (uiop->uio_iovcnt > 1) {
+ printk(DRV_NAME ": iov frags unimplemented\n");
+ err = -EINVAL;
+ goto errout;
+ }
+
+ /* crp_olen is never set; always use crp_ilen */
+ pasemi_desc_build(
+ &work_desc,
+ XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
+ sc->dma_pdev,
+ uiop->uio_iov->iov_base,
+ crp->crp_ilen, DMA_TO_DEVICE)));
+ pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
+
+ pasemi_desc_build(
+ &work_desc,
+ XCT_FUN_SRC_PTR(srclen, pci_map_single(
+ sc->dma_pdev,
+ uiop->uio_iov->iov_base,
+ srclen, DMA_TO_DEVICE)));
+ } else {
+ /* using contig buffers */
+ pasemi_desc_build(
+ &work_desc,
+ XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
+ sc->dma_pdev,
+ crp->crp_buf,
+ crp->crp_ilen, DMA_TO_DEVICE)));
+ pasemi_desc_build(
+ &work_desc,
+ XCT_FUN_SRC_PTR(srclen, pci_map_single(
+ sc->dma_pdev,
+ crp->crp_buf, srclen,
+ DMA_TO_DEVICE)));
+ pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
+ }
+
+ spin_lock_irqsave(&txring->fill_lock, flags);
+
+ if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
+ txring->sesn = PASEMI_SESSION(crp->crp_sid);
+ reinit = 1;
+ }
+
+ if (enccrd) {
+ pasemi_desc_start(&init_desc,
+ XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
+ pasemi_desc_build(&init_desc,
+ XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
+ }
+
+ if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
+ pasemi_desc_size(&work_desc)) -
+ txring->next_to_clean) > TX_RING_SIZE) {
+ spin_unlock_irqrestore(&txring->fill_lock, flags);
+ err = ERESTART;
+ goto errout;
+ }
+
+ pasemi_ring_add_desc(txring, &init_desc, NULL);
+ pasemi_ring_add_desc(txring, &work_desc, crp);
+
+ pasemi_ring_incr(sc, chsel,
+ pasemi_desc_size(&init_desc) +
+ pasemi_desc_size(&work_desc));
+
+ spin_unlock_irqrestore(&txring->fill_lock, flags);
+
+ mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
+
+ return 0;
+
+erralg:
+ printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
+ crd1->crd_alg, crd2->crd_alg);
+ err = -EINVAL;
+
+errout:
+ if (err != ERESTART) {
+ crp->crp_etype = err;
+ crypto_done(crp);
+ }
+ return err;
+}
+
+static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
+{
+ int i, j, ring_idx;
+ struct pasemi_fnu_txring *ring = &sc->tx[chan];
+ u16 delta_cnt;
+ int flags, loops = 10;
+ int desc_size;
+ struct cryptop *crp;
+
+ spin_lock_irqsave(&ring->clean_lock, flags);
+
+ while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
+ & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
+ && loops--) {
+
+ for (i = 0; i < delta_cnt; i++) {
+ desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
+ crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
+ if (crp) {
+ ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
+ if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
+ /* Need to make sure signature matched,
+ * if not - return error */
+ if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
+ crp->crp_etype = -EINVAL;
+ }
+ crypto_done(TX_DESC_INFO(ring,
+ ring->next_to_clean).cf_crp);
+ TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
+ pci_unmap_single(
+ sc->dma_pdev,
+ XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
+ PCI_DMA_TODEVICE);
+
+ ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
+
+ ring->next_to_clean++;
+ for (j = 1; j < desc_size; j++) {
+ ring_idx = 2 *
+ (ring->next_to_clean &
+ (TX_RING_SIZE-1));
+ pci_unmap_single(
+ sc->dma_pdev,
+ XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
+ PCI_DMA_TODEVICE);
+ if (ring->desc[ring_idx + 1])
+ pci_unmap_single(
+ sc->dma_pdev,
+ XCT_PTR_ADDR_LEN(
+ ring->desc[
+ ring_idx + 1]),
+ PCI_DMA_TODEVICE);
+ ring->desc[ring_idx] =
+ ring->desc[ring_idx + 1] = 0;
+ ring->next_to_clean++;
+ }
+ } else {
+ for (j = 0; j < desc_size; j++) {
+ ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
+ ring->desc[ring_idx] =
+ ring->desc[ring_idx + 1] = 0;
+ ring->next_to_clean++;
+ }
+ }
+ }
+
+ ring->total_pktcnt += delta_cnt;
+ }
+ spin_unlock_irqrestore(&ring->clean_lock, flags);
+
+ return 0;
+}
+
+static void sweepup_tx(struct pasemi_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->sc_num_channels; i++)
+ pasemi_clean_tx(sc, i);
+}
+
+static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
+{
+ struct pasemi_softc *sc = arg;
+ unsigned int reg;
+ int chan = irq - sc->base_irq;
+ int chan_index = sc->base_chan + chan;
+ u64 stat = dma_status->tx_sta[chan_index];
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (!(stat & PAS_STATUS_CAUSE_M))
+ return IRQ_NONE;
+
+ pasemi_clean_tx(sc, chan);
+
+ stat = dma_status->tx_sta[chan_index];
+
+ reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
+ PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
+
+ if (stat & PAS_STATUS_SOFT)
+ reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
+
+ out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
+
+
+ return IRQ_HANDLED;
+}
+
+static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
+{
+ u32 val;
+ int chan_index = chan + sc->base_chan;
+ int ret;
+ struct pasemi_fnu_txring *ring;
+
+ ring = &sc->tx[chan];
+
+ spin_lock_init(&ring->fill_lock);
+ spin_lock_init(&ring->clean_lock);
+
+ ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
+ TX_RING_SIZE, GFP_KERNEL);
+ if (!ring->desc_info)
+ return -ENOMEM;
+
+ /* Allocate descriptors */
+ ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
+ TX_RING_SIZE *
+ 2 * sizeof(u64),
+ &ring->dma, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
+
+ out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
+
+ ring->total_pktcnt = 0;
+
+ out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
+ PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
+
+ val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
+ val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
+
+ out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
+
+ out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
+ PAS_DMA_TXCHAN_CFG_TY_FUNC |
+ PAS_DMA_TXCHAN_CFG_TATTR(chan) |
+ PAS_DMA_TXCHAN_CFG_WT(2));
+
+ /* enable tx channel */
+ out_le32(sc->dma_regs +
+ PAS_DMA_TXCHAN_TCMDSTA(chan_index),
+ PAS_DMA_TXCHAN_TCMDSTA_EN);
+
+ out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
+ PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
+
+ ring->next_to_fill = 0;
+ ring->next_to_clean = 0;
+
+ snprintf(ring->irq_name, sizeof(ring->irq_name),
+ "%s%d", "crypto", chan);
+
+ ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
+ ret = request_irq(ring->irq, (irq_handler_t)
+ pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
+ if (ret) {
+ printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
+ ring->irq, ret);
+ ring->irq = -1;
+ return ret;
+ }
+
+ setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
+
+ return 0;
+}
+
+static device_method_t pasemi_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, pasemi_newsession),
+ DEVMETHOD(cryptodev_freesession, pasemi_freesession),
+ DEVMETHOD(cryptodev_process, pasemi_process),
+};
+
+/* Set up the crypto device structure, private data,
+ * and anything else we need before we start */
+
+static int __devinit
+pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pasemi_softc *sc;
+ int ret, i;
+
+ DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
+
+ sc = kzalloc(sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return -ENOMEM;
+
+ softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
+
+ pci_set_drvdata(pdev, sc);
+
+ spin_lock_init(&sc->sc_chnlock);
+
+ sc->sc_sessions = (struct pasemi_session **)
+ kzalloc(PASEMI_INITIAL_SESSIONS *
+ sizeof(struct pasemi_session *), GFP_ATOMIC);
+ if (sc->sc_sessions == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
+ sc->sc_lastchn = 0;
+ sc->base_irq = pdev->irq + 6;
+ sc->base_chan = 6;
+ sc->sc_cid = -1;
+ sc->dma_pdev = pdev;
+
+ sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
+ if (!sc->iob_pdev) {
+ dev_err(&pdev->dev, "Can't find I/O Bridge\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* This is hardcoded and ugly, but we have some firmware versions
+ * who don't provide the register space in the device tree. Luckily
+ * they are at well-known locations so we can just do the math here.
+ */
+ sc->dma_regs =
+ ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
+ sc->iob_regs =
+ ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
+ if (!sc->dma_regs || !sc->iob_regs) {
+ dev_err(&pdev->dev, "Can't map registers\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dma_status = __ioremap(0xfd800000, 0x1000, 0);
+ if (!dma_status) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Can't map dmastatus space\n");
+ goto out;
+ }
+
+ sc->tx = (struct pasemi_fnu_txring *)
+ kzalloc(sizeof(struct pasemi_fnu_txring)
+ * 8, GFP_KERNEL);
+ if (!sc->tx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Initialize the h/w */
+ out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
+ (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
+ PAS_DMA_COM_CFG_FWF));
+ out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
+
+ for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
+ sc->sc_num_channels++;
+ ret = pasemi_dma_setup_tx_resources(sc, i);
+ if (ret)
+ goto out;
+ }
+
+ sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
+ CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid < 0) {
+ printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* register algorithms with the framework */
+ printk(DRV_NAME ":");
+
+ crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+
+ return 0;
+
+out:
+ pasemi_dma_remove(pdev);
+ return ret;
+}
+
+#define MAX_RETRIES 5000
+
+static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
+{
+ struct pasemi_fnu_txring *ring = &sc->tx[chan];
+ int chan_index = chan + sc->base_chan;
+ int retries;
+ u32 stat;
+
+ /* Stop the channel */
+ out_le32(sc->dma_regs +
+ PAS_DMA_TXCHAN_TCMDSTA(chan_index),
+ PAS_DMA_TXCHAN_TCMDSTA_ST);
+
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ stat = in_le32(sc->dma_regs +
+ PAS_DMA_TXCHAN_TCMDSTA(chan_index));
+ if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+ dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
+ chan_index);
+
+ /* Disable the channel */
+ out_le32(sc->dma_regs +
+ PAS_DMA_TXCHAN_TCMDSTA(chan_index),
+ 0);
+
+ if (ring->desc_info)
+ kfree((void *) ring->desc_info);
+ if (ring->desc)
+ dma_free_coherent(&sc->dma_pdev->dev,
+ TX_RING_SIZE *
+ 2 * sizeof(u64),
+ (void *) ring->desc, ring->dma);
+ if (ring->irq != -1)
+ free_irq(ring->irq, sc);
+
+ del_timer(&ring->crypto_timer);
+}
+
+static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
+{
+ struct pasemi_softc *sc = pci_get_drvdata(pdev);
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (sc->sc_cid >= 0) {
+ crypto_unregister_all(sc->sc_cid);
+ }
+
+ if (sc->tx) {
+ for (i = 0; i < sc->sc_num_channels; i++)
+ pasemi_free_tx_resources(sc, i);
+
+ kfree(sc->tx);
+ }
+ if (sc->sc_sessions) {
+ for (i = 0; i < sc->sc_nsessions; i++)
+ kfree(sc->sc_sessions[i]);
+ kfree(sc->sc_sessions);
+ }
+ if (sc->iob_pdev)
+ pci_dev_put(sc->iob_pdev);
+ if (sc->dma_regs)
+ iounmap(sc->dma_regs);
+ if (sc->iob_regs)
+ iounmap(sc->iob_regs);
+ kfree(sc);
+}
+
+static struct pci_device_id pasemi_dma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
+
+static struct pci_driver pasemi_dma_driver = {
+ .name = "pasemi_dma",
+ .id_table = pasemi_dma_pci_tbl,
+ .probe = pasemi_dma_probe,
+ .remove = __devexit_p(pasemi_dma_remove),
+};
+
+static void __exit pasemi_dma_cleanup_module(void)
+{
+ pci_unregister_driver(&pasemi_dma_driver);
+ __iounmap(dma_status);
+ dma_status = NULL;
+}
+
+int pasemi_dma_init_module(void)
+{
+ return pci_register_driver(&pasemi_dma_driver);
+}
+
+module_init(pasemi_dma_init_module);
+module_exit(pasemi_dma_cleanup_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
+MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
diff --git a/target/linux/generic/files/crypto/ocf/pasemi/pasemi_fnu.h b/target/linux/generic/files/crypto/ocf/pasemi/pasemi_fnu.h
new file mode 100644
index 000000000..1a0dcc8bb
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/pasemi/pasemi_fnu.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
+ * hardware register layouts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef PASEMI_FNU_H
+#define PASEMI_FNU_H
+
+#include <linux/spinlock.h>
+
+#define PASEMI_SESSION(sid) ((sid) & 0xffffffff)
+#define PASEMI_SID(sesn) ((sesn) & 0xffffffff)
+#define DPRINTF(a...) if (debug) { printk(DRV_NAME ": " a); }
+
+/* Must be a power of two */
+#define RX_RING_SIZE 512
+#define TX_RING_SIZE 512
+#define TX_DESC(ring, num) ((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
+#define TX_DESC_INFO(ring, num) ((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
+#define MAX_DESC_SIZE 8
+#define PASEMI_INITIAL_SESSIONS 10
+#define PASEMI_FNU_CHANNELS 8
+
+/* DMA descriptor */
+struct pasemi_desc {
+ u64 quad[2*MAX_DESC_SIZE];
+ int quad_cnt;
+ int size;
+ int postop;
+};
+
+/*
+ * Holds per descriptor data
+ */
+struct pasemi_desc_info {
+ int desc_size;
+ int desc_postop;
+#define PASEMI_CHECK_SIG 0x1
+
+ struct cryptop *cf_crp;
+};
+
+/*
+ * Holds per channel data
+ */
+struct pasemi_fnu_txring {
+ volatile u64 *desc;
+ volatile struct
+ pasemi_desc_info *desc_info;
+ dma_addr_t dma;
+ struct timer_list crypto_timer;
+ spinlock_t fill_lock;
+ spinlock_t clean_lock;
+ unsigned int next_to_fill;
+ unsigned int next_to_clean;
+ u16 total_pktcnt;
+ int irq;
+ int sesn;
+ char irq_name[10];
+};
+
+/*
+ * Holds data specific to a single pasemi device.
+ */
+struct pasemi_softc {
+ softc_device_decl sc_cdev;
+ struct pci_dev *dma_pdev; /* device backpointer */
+ struct pci_dev *iob_pdev; /* device backpointer */
+ void __iomem *dma_regs;
+ void __iomem *iob_regs;
+ int base_irq;
+ int base_chan;
+ int32_t sc_cid; /* crypto tag */
+ int sc_nsessions;
+ struct pasemi_session **sc_sessions;
+ int sc_num_channels;/* number of crypto channels */
+
+ /* pointer to the array of txring datastructures, one txring per channel */
+ struct pasemi_fnu_txring *tx;
+
+ /*
+ * mutual exclusion for the channel scheduler
+ */
+ spinlock_t sc_chnlock;
+ /* last channel used, for now use round-robin to allocate channels */
+ int sc_lastchn;
+};
+
+struct pasemi_session {
+ u64 civ[2];
+ u64 keysz;
+ u64 key[4];
+ u64 ccmd;
+ u64 hkey[4];
+ u64 hseq;
+ u64 giv[2];
+ u64 hiv[4];
+
+ int used;
+ dma_addr_t dma_addr;
+ int chan;
+};
+
+/* status register layout in IOB region, at 0xfd800000 */
+struct pasdma_status {
+ u64 rx_sta[64];
+ u64 tx_sta[20];
+};
+
+#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC) || \
+ (alg == CRYPTO_3DES_CBC) || \
+ (alg == CRYPTO_AES_CBC) || \
+ (alg == CRYPTO_ARC4) || \
+ (alg == CRYPTO_NULL_CBC))
+
+#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5) || \
+ (alg == CRYPTO_MD5_HMAC) || \
+ (alg == CRYPTO_SHA1) || \
+ (alg == CRYPTO_SHA1_HMAC) || \
+ (alg == CRYPTO_NULL_HMAC))
+
+enum {
+ PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
+ PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
+ PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
+ PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
+ PAS_DMA_COM_CFG = 0x114, /* DMA Configuration Register */
+};
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+
+#define PAS_DMA_COM_CFG_FWF 0x18000000
+
+#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
+
+#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
+#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
+#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
+#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
+#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
+#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
+#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
+#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
+#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
+#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
+#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_CFG_TY_FUNC 0x00000002 /* Type = interface */
+#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
+#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
+#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
+#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+ PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0
+#define PAS_DMA_TXCHAN_CFG_WT_S 6
+#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+ PAS_DMA_TXCHAN_CFG_WT_M)
+#define PAS_DMA_TXCHAN_CFG_LPSQ_FAST 0x00000400
+#define PAS_DMA_TXCHAN_CFG_LPDQ_FAST 0x00000800
+#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
+#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
+#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
+#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
+#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
+#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+ PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
+#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
+#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+ PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
+#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
+#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+ PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define PAS_STATUS_PCNT_M 0x000000000000ffffull
+#define PAS_STATUS_PCNT_S 0
+#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
+#define PAS_STATUS_DCNT_S 16
+#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
+#define PAS_STATUS_BPCNT_S 32
+#define PAS_STATUS_CAUSE_M 0xf000000000000000ull
+#define PAS_STATUS_TIMER 0x1000000000000000ull
+#define PAS_STATUS_ERROR 0x2000000000000000ull
+#define PAS_STATUS_SOFT 0x4000000000000000ull
+#define PAS_STATUS_INT 0x8000000000000000ull
+
+#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
+#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+ PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
+#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+ PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
+#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
+#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+ PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
+#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
+#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+ PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
+#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
+#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16
+#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+ PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
+#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
+#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
+#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
+#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
+#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
+#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
+#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16
+#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+ PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
+#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
+#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
+#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
+#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
+#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
+#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+ PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+/* Transmit descriptor fields */
+#define XCT_MACTX_T 0x8000000000000000ull
+#define XCT_MACTX_ST 0x4000000000000000ull
+#define XCT_MACTX_NORES 0x0000000000000000ull
+#define XCT_MACTX_8BRES 0x1000000000000000ull
+#define XCT_MACTX_24BRES 0x2000000000000000ull
+#define XCT_MACTX_40BRES 0x3000000000000000ull
+#define XCT_MACTX_I 0x0800000000000000ull
+#define XCT_MACTX_O 0x0400000000000000ull
+#define XCT_MACTX_E 0x0200000000000000ull
+#define XCT_MACTX_VLAN_M 0x0180000000000000ull
+#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
+#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
+#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
+#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
+#define XCT_MACTX_CRC_M 0x0060000000000000ull
+#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
+#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
+#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
+#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
+#define XCT_MACTX_SS 0x0010000000000000ull
+#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
+#define XCT_MACTX_LLEN_S 32ull
+#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
+ XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M 0x00000000f8000000ull
+#define XCT_MACTX_IPH_S 27ull
+#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
+ XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M 0x0000000007c00000ull
+#define XCT_MACTX_IPO_S 22ull
+#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
+ XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M 0x0000000000000060ull
+#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
+#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
+#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
+#define XCT_MACTX_V6 0x0000000000000010ull
+#define XCT_MACTX_C 0x0000000000000004ull
+#define XCT_MACTX_AL2 0x0000000000000002ull
+
+#define XCT_PTR_T 0x8000000000000000ull
+#define XCT_PTR_LEN_M 0x7ffff00000000000ull
+#define XCT_PTR_LEN_S 44
+#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
+ XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M 0x00000fffffffffffull
+#define XCT_PTR_ADDR_S 0
+#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
+ XCT_PTR_ADDR_M)
+
+/* Function descriptor fields */
+#define XCT_FUN_T 0x8000000000000000ull
+#define XCT_FUN_ST 0x4000000000000000ull
+#define XCT_FUN_NORES 0x0000000000000000ull
+#define XCT_FUN_8BRES 0x1000000000000000ull
+#define XCT_FUN_24BRES 0x2000000000000000ull
+#define XCT_FUN_40BRES 0x3000000000000000ull
+#define XCT_FUN_I 0x0800000000000000ull
+#define XCT_FUN_O 0x0400000000000000ull
+#define XCT_FUN_E 0x0200000000000000ull
+#define XCT_FUN_FUN_S 54
+#define XCT_FUN_FUN_M 0x01c0000000000000ull
+#define XCT_FUN_FUN(num) ((((long)(num)) << XCT_FUN_FUN_S) & \
+ XCT_FUN_FUN_M)
+#define XCT_FUN_CRM_NOP 0x0000000000000000ull
+#define XCT_FUN_CRM_SIG 0x0008000000000000ull
+#define XCT_FUN_CRM_ENC 0x0010000000000000ull
+#define XCT_FUN_CRM_DEC 0x0018000000000000ull
+#define XCT_FUN_CRM_SIG_ENC 0x0020000000000000ull
+#define XCT_FUN_CRM_ENC_SIG 0x0028000000000000ull
+#define XCT_FUN_CRM_SIG_DEC 0x0030000000000000ull
+#define XCT_FUN_CRM_DEC_SIG 0x0038000000000000ull
+#define XCT_FUN_LLEN_M 0x0007ffff00000000ull
+#define XCT_FUN_LLEN_S 32ULL
+#define XCT_FUN_LLEN(x) ((((long)(x)) << XCT_FUN_LLEN_S) & \
+ XCT_FUN_LLEN_M)
+#define XCT_FUN_SHL_M 0x00000000f8000000ull
+#define XCT_FUN_SHL_S 27ull
+#define XCT_FUN_SHL(x) ((((long)(x)) << XCT_FUN_SHL_S) & \
+ XCT_FUN_SHL_M)
+#define XCT_FUN_CHL_M 0x0000000007c00000ull
+#define XCT_FUN_CHL_S 22ull
+#define XCT_FUN_CHL(x) ((((long)(x)) << XCT_FUN_CHL_S) & \
+ XCT_FUN_CHL_M)
+#define XCT_FUN_HSZ_M 0x00000000003c0000ull
+#define XCT_FUN_HSZ_S 18ull
+#define XCT_FUN_HSZ(x) ((((long)(x)) << XCT_FUN_HSZ_S) & \
+ XCT_FUN_HSZ_M)
+#define XCT_FUN_ALG_DES 0x0000000000000000ull
+#define XCT_FUN_ALG_3DES 0x0000000000008000ull
+#define XCT_FUN_ALG_AES 0x0000000000010000ull
+#define XCT_FUN_ALG_ARC 0x0000000000018000ull
+#define XCT_FUN_ALG_KASUMI 0x0000000000020000ull
+#define XCT_FUN_BCM_ECB 0x0000000000000000ull
+#define XCT_FUN_BCM_CBC 0x0000000000001000ull
+#define XCT_FUN_BCM_CFB 0x0000000000002000ull
+#define XCT_FUN_BCM_OFB 0x0000000000003000ull
+#define XCT_FUN_BCM_CNT 0x0000000000003800ull
+#define XCT_FUN_BCM_KAS_F8 0x0000000000002800ull
+#define XCT_FUN_BCM_KAS_F9 0x0000000000001800ull
+#define XCT_FUN_BCP_NO_PAD 0x0000000000000000ull
+#define XCT_FUN_BCP_ZRO 0x0000000000000200ull
+#define XCT_FUN_BCP_PL 0x0000000000000400ull
+#define XCT_FUN_BCP_INCR 0x0000000000000600ull
+#define XCT_FUN_SIG_MD5 (0ull << 4)
+#define XCT_FUN_SIG_SHA1 (2ull << 4)
+#define XCT_FUN_SIG_HMAC_MD5 (8ull << 4)
+#define XCT_FUN_SIG_HMAC_SHA1 (10ull << 4)
+#define XCT_FUN_A 0x0000000000000008ull
+#define XCT_FUN_C 0x0000000000000004ull
+#define XCT_FUN_AL2 0x0000000000000002ull
+#define XCT_FUN_SE 0x0000000000000001ull
+
+#define XCT_FUN_SRC_PTR(len, addr) (XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
+#define XCT_FUN_DST_PTR(len, addr) (XCT_FUN_SRC_PTR(len, addr) | \
+ 0x8000000000000000ull)
+
+#define XCT_CTRL_HDR_FUN_NUM_M 0x01c0000000000000ull
+#define XCT_CTRL_HDR_FUN_NUM_S 54
+#define XCT_CTRL_HDR_LEN_M 0x0007ffff00000000ull
+#define XCT_CTRL_HDR_LEN_S 32
+#define XCT_CTRL_HDR_REG_M 0x00000000000000ffull
+#define XCT_CTRL_HDR_REG_S 0
+
+#define XCT_CTRL_HDR(funcN,len,reg) (0x9400000000000000ull | \
+ ((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
+ & XCT_CTRL_HDR_FUN_NUM_M) | \
+ ((((long)(len)) << \
+ XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
+ ((((long)(reg)) << \
+ XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
+
+/* Function config command options */
+#define DMA_CALGO_DES 0x00
+#define DMA_CALGO_3DES 0x01
+#define DMA_CALGO_AES 0x02
+#define DMA_CALGO_ARC 0x03
+
+#define DMA_FN_CIV0 0x02
+#define DMA_FN_CIV1 0x03
+#define DMA_FN_HKEY0 0x0a
+
+#define XCT_PTR_ADDR_LEN(ptr) ((ptr) & XCT_PTR_ADDR_M), \
+ (((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
+
+#endif /* PASEMI_FNU_H */
diff --git a/target/linux/generic/files/crypto/ocf/random.c b/target/linux/generic/files/crypto/ocf/random.c
new file mode 100644
index 000000000..4bb773ffb
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/random.c
@@ -0,0 +1,317 @@
+/*
+ * A system independant way of adding entropy to the kernels pool
+ * this way the drivers can focus on the real work and we can take
+ * care of pushing it to the appropriate place in the kernel.
+ *
+ * This should be fast and callable from timers/interrupts
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ * 1. distributions of this source code include the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ *
+ * 2. distributions in binary form include the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other associated materials;
+ *
+ * 3. the copyright holder's name is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/unistd.h>
+#include <linux/poll.h>
+#include <linux/random.h>
+#include <cryptodev.h>
+
+#ifdef CONFIG_OCF_FIPS
+#include "rndtest.h"
+#endif
+
+#ifndef HAS_RANDOM_INPUT_WAIT
+#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
+#endif
+
+/*
+ * a hack to access the debug levels from the crypto driver
+ */
+extern int crypto_debug;
+#define debug crypto_debug
+
+/*
+ * a list of all registered random providers
+ */
+static LIST_HEAD(random_ops);
+static int started = 0;
+static int initted = 0;
+
+struct random_op {
+ struct list_head random_list;
+ u_int32_t driverid;
+ int (*read_random)(void *arg, u_int32_t *buf, int len);
+ void *arg;
+};
+
+static int random_proc(void *arg);
+
+static pid_t randomproc = (pid_t) -1;
+static spinlock_t random_lock;
+
+/*
+ * just init the spin locks
+ */
+static int
+crypto_random_init(void)
+{
+ spin_lock_init(&random_lock);
+ initted = 1;
+ return(0);
+}
+
+/*
+ * Add the given random reader to our list (if not present)
+ * and start the thread (if not already started)
+ *
+ * we have to assume that driver id is ok for now
+ */
+int
+crypto_rregister(
+ u_int32_t driverid,
+ int (*read_random)(void *arg, u_int32_t *buf, int len),
+ void *arg)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct random_op *rops, *tmp;
+
+ dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
+ __FUNCTION__, driverid, read_random, arg);
+
+ if (!initted)
+ crypto_random_init();
+
+#if 0
+ struct cryptocap *cap;
+
+ cap = crypto_checkdriver(driverid);
+ if (!cap)
+ return EINVAL;
+#endif
+
+ list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
+ if (rops->driverid == driverid && rops->read_random == read_random)
+ return EEXIST;
+ }
+
+ rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
+ if (!rops)
+ return ENOMEM;
+
+ rops->driverid = driverid;
+ rops->read_random = read_random;
+ rops->arg = arg;
+
+ spin_lock_irqsave(&random_lock, flags);
+ list_add_tail(&rops->random_list, &random_ops);
+ if (!started) {
+ randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
+ if (randomproc < 0) {
+ ret = randomproc;
+ printk("crypto: crypto_rregister cannot start random thread; "
+ "error %d", ret);
+ } else
+ started = 1;
+ }
+ spin_unlock_irqrestore(&random_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(crypto_rregister);
+
+int
+crypto_runregister_all(u_int32_t driverid)
+{
+ struct random_op *rops, *tmp;
+ unsigned long flags;
+
+ dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
+
+ list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
+ if (rops->driverid == driverid) {
+ list_del(&rops->random_list);
+ kfree(rops);
+ }
+ }
+
+ spin_lock_irqsave(&random_lock, flags);
+ if (list_empty(&random_ops) && started)
+ kill_proc(randomproc, SIGKILL, 1);
+ spin_unlock_irqrestore(&random_lock, flags);
+ return(0);
+}
+EXPORT_SYMBOL(crypto_runregister_all);
+
+/*
+ * while we can add entropy to random.c continue to read random data from
+ * the drivers and push it to random.
+ */
+static int
+random_proc(void *arg)
+{
+ int n;
+ int wantcnt;
+ int bufcnt = 0;
+ int retval = 0;
+ int *buf = NULL;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ daemonize();
+ spin_lock_irq(&current->sigmask_lock);
+ sigemptyset(&current->blocked);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ sprintf(current->comm, "ocf-random");
+#else
+ daemonize("ocf-random");
+ allow_signal(SIGKILL);
+#endif
+
+ (void) get_fs();
+ set_fs(get_ds());
+
+#ifdef CONFIG_OCF_FIPS
+#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
+#else
+#define NUM_INT 32
+#endif
+
+ /*
+ * some devices can transferr their RNG data direct into memory,
+ * so make sure it is device friendly
+ */
+ buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
+ if (NULL == buf) {
+ printk("crypto: RNG could not allocate memory\n");
+ retval = -ENOMEM;
+ goto bad_alloc;
+ }
+
+ wantcnt = NUM_INT; /* start by adding some entropy */
+
+ /*
+ * its possible due to errors or driver removal that we no longer
+ * have anything to do, if so exit or we will consume all the CPU
+ * doing nothing
+ */
+ while (!list_empty(&random_ops)) {
+ struct random_op *rops, *tmp;
+
+#ifdef CONFIG_OCF_FIPS
+ if (wantcnt)
+ wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
+#endif
+
+ /* see if we can get enough entropy to make the world
+ * a better place.
+ */
+ while (bufcnt < wantcnt && bufcnt < NUM_INT) {
+ list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
+
+ n = (*rops->read_random)(rops->arg, &buf[bufcnt],
+ NUM_INT - bufcnt);
+
+ /* on failure remove the random number generator */
+ if (n == -1) {
+ list_del(&rops->random_list);
+ printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
+ rops->driverid);
+ kfree(rops);
+ } else if (n > 0)
+ bufcnt += n;
+ }
+ /* give up CPU for a bit, just in case as this is a loop */
+ schedule();
+ }
+
+
+#ifdef CONFIG_OCF_FIPS
+ if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
+ dprintk("crypto: buffer had fips errors, discarding\n");
+ bufcnt = 0;
+ }
+#endif
+
+ /*
+ * if we have a certified buffer, we can send some data
+ * to /dev/random and move along
+ */
+ if (bufcnt > 0) {
+ /* add what we have */
+ random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
+ bufcnt = 0;
+ }
+
+ /* give up CPU for a bit so we don't hog while filling */
+ schedule();
+
+ /* wait for needing more */
+ wantcnt = random_input_wait();
+
+ if (wantcnt <= 0)
+ wantcnt = 0; /* try to get some info again */
+ else
+ /* round up to one word or we can loop forever */
+ wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
+ if (wantcnt > NUM_INT) {
+ wantcnt = NUM_INT;
+ }
+
+ if (signal_pending(current)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ spin_lock_irq(&current->sigmask_lock);
+#endif
+ flush_signals(current);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ spin_unlock_irq(&current->sigmask_lock);
+#endif
+ }
+ }
+
+ kfree(buf);
+
+bad_alloc:
+ spin_lock_irq(&random_lock);
+ randomproc = (pid_t) -1;
+ started = 0;
+ spin_unlock_irq(&random_lock);
+
+ return retval;
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/rndtest.c b/target/linux/generic/files/crypto/ocf/rndtest.c
new file mode 100644
index 000000000..7bed6a193
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/rndtest.c
@@ -0,0 +1,300 @@
+/* $OpenBSD$ */
+
+/*
+ * OCF/Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Jason L. Wright
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/unistd.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <cryptodev.h>
+#include "rndtest.h"
+
+static struct rndtest_stats rndstats;
+
+static void rndtest_test(struct rndtest_state *);
+
+/* The tests themselves */
+static int rndtest_monobit(struct rndtest_state *);
+static int rndtest_runs(struct rndtest_state *);
+static int rndtest_longruns(struct rndtest_state *);
+static int rndtest_chi_4(struct rndtest_state *);
+
+static int rndtest_runs_check(struct rndtest_state *, int, int *);
+static void rndtest_runs_record(struct rndtest_state *, int, int *);
+
+static const struct rndtest_testfunc {
+ int (*test)(struct rndtest_state *);
+} rndtest_funcs[] = {
+ { rndtest_monobit },
+ { rndtest_runs },
+ { rndtest_chi_4 },
+ { rndtest_longruns },
+};
+
+#define RNDTEST_NTESTS (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
+
+static void
+rndtest_test(struct rndtest_state *rsp)
+{
+ int i, rv = 0;
+
+ rndstats.rst_tests++;
+ for (i = 0; i < RNDTEST_NTESTS; i++)
+ rv |= (*rndtest_funcs[i].test)(rsp);
+ rsp->rs_discard = (rv != 0);
+}
+
+
+extern int crypto_debug;
+#define rndtest_verbose 2
+#define rndtest_report(rsp, failure, fmt, a...) \
+ { if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
+
+#define RNDTEST_MONOBIT_MINONES 9725
+#define RNDTEST_MONOBIT_MAXONES 10275
+
+static int
+rndtest_monobit(struct rndtest_state *rsp)
+{
+ int i, ones = 0, j;
+ u_int8_t r;
+
+ for (i = 0; i < RNDTEST_NBYTES; i++) {
+ r = rsp->rs_buf[i];
+ for (j = 0; j < 8; j++, r <<= 1)
+ if (r & 0x80)
+ ones++;
+ }
+ if (ones > RNDTEST_MONOBIT_MINONES &&
+ ones < RNDTEST_MONOBIT_MAXONES) {
+ if (rndtest_verbose > 1)
+ rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
+ RNDTEST_MONOBIT_MINONES, ones,
+ RNDTEST_MONOBIT_MAXONES);
+ return (0);
+ } else {
+ if (rndtest_verbose)
+ rndtest_report(rsp, 1,
+ "monobit failed (%d ones)", ones);
+ rndstats.rst_monobit++;
+ return (-1);
+ }
+}
+
+#define RNDTEST_RUNS_NINTERVAL 6
+
+static const struct rndtest_runs_tabs {
+ u_int16_t min, max;
+} rndtest_runs_tab[] = {
+ { 2343, 2657 },
+ { 1135, 1365 },
+ { 542, 708 },
+ { 251, 373 },
+ { 111, 201 },
+ { 111, 201 },
+};
+
+static int
+rndtest_runs(struct rndtest_state *rsp)
+{
+ int i, j, ones, zeros, rv = 0;
+ int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
+ u_int8_t c;
+
+ bzero(onei, sizeof(onei));
+ bzero(zeroi, sizeof(zeroi));
+ ones = zeros = 0;
+ for (i = 0; i < RNDTEST_NBYTES; i++) {
+ c = rsp->rs_buf[i];
+ for (j = 0; j < 8; j++, c <<= 1) {
+ if (c & 0x80) {
+ ones++;
+ rndtest_runs_record(rsp, zeros, zeroi);
+ zeros = 0;
+ } else {
+ zeros++;
+ rndtest_runs_record(rsp, ones, onei);
+ ones = 0;
+ }
+ }
+ }
+ rndtest_runs_record(rsp, ones, onei);
+ rndtest_runs_record(rsp, zeros, zeroi);
+
+ rv |= rndtest_runs_check(rsp, 0, zeroi);
+ rv |= rndtest_runs_check(rsp, 1, onei);
+
+ if (rv)
+ rndstats.rst_runs++;
+
+ return (rv);
+}
+
+static void
+rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
+{
+ if (len == 0)
+ return;
+ if (len > RNDTEST_RUNS_NINTERVAL)
+ len = RNDTEST_RUNS_NINTERVAL;
+ len -= 1;
+ intrv[len]++;
+}
+
+static int
+rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
+{
+ int i, rv = 0;
+
+ for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
+ if (src[i] < rndtest_runs_tab[i].min ||
+ src[i] > rndtest_runs_tab[i].max) {
+ rndtest_report(rsp, 1,
+ "%s interval %d failed (%d, %d-%d)",
+ val ? "ones" : "zeros",
+ i + 1, src[i], rndtest_runs_tab[i].min,
+ rndtest_runs_tab[i].max);
+ rv = -1;
+ } else {
+ rndtest_report(rsp, 0,
+ "runs pass %s interval %d (%d < %d < %d)",
+ val ? "ones" : "zeros",
+ i + 1, rndtest_runs_tab[i].min, src[i],
+ rndtest_runs_tab[i].max);
+ }
+ }
+ return (rv);
+}
+
+static int
+rndtest_longruns(struct rndtest_state *rsp)
+{
+ int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
+ u_int8_t c;
+
+ for (i = 0; i < RNDTEST_NBYTES; i++) {
+ c = rsp->rs_buf[i];
+ for (j = 0; j < 8; j++, c <<= 1) {
+ if (c & 0x80) {
+ zeros = 0;
+ ones++;
+ if (ones > maxones)
+ maxones = ones;
+ } else {
+ ones = 0;
+ zeros++;
+ if (zeros > maxzeros)
+ maxzeros = zeros;
+ }
+ }
+ }
+
+ if (maxones < 26 && maxzeros < 26) {
+ rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
+ maxones, maxzeros);
+ return (0);
+ } else {
+ rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
+ maxones, maxzeros);
+ rndstats.rst_longruns++;
+ return (-1);
+ }
+}
+
+/*
+ * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
+ * but it is really the chi^2 test over 4 bits (the poker test as described
+ * by Knuth vol 2 is something different, and I take him as authoritative
+ * on nomenclature over NIST).
+ */
+#define RNDTEST_CHI4_K 16
+#define RNDTEST_CHI4_K_MASK (RNDTEST_CHI4_K - 1)
+
+/*
+ * The unnormalized values are used so that we don't have to worry about
+ * fractional precision. The "real" value is found by:
+ * (V - 1562500) * (16 / 5000) = Vn (where V is the unnormalized value)
+ */
+#define RNDTEST_CHI4_VMIN 1563181 /* 2.1792 */
+#define RNDTEST_CHI4_VMAX 1576929 /* 46.1728 */
+
+static int
+rndtest_chi_4(struct rndtest_state *rsp)
+{
+ unsigned int freq[RNDTEST_CHI4_K], i, sum;
+
+ for (i = 0; i < RNDTEST_CHI4_K; i++)
+ freq[i] = 0;
+
+ /* Get number of occurances of each 4 bit pattern */
+ for (i = 0; i < RNDTEST_NBYTES; i++) {
+ freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
+ freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
+ }
+
+ for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
+ sum += freq[i] * freq[i];
+
+ if (sum >= 1563181 && sum <= 1576929) {
+ rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
+ return (0);
+ } else {
+ rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
+ rndstats.rst_chi++;
+ return (-1);
+ }
+}
+
+int
+rndtest_buf(unsigned char *buf)
+{
+ struct rndtest_state rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.rs_buf = buf;
+ rndtest_test(&rsp);
+ return(rsp.rs_discard);
+}
+
diff --git a/target/linux/generic/files/crypto/ocf/rndtest.h b/target/linux/generic/files/crypto/ocf/rndtest.h
new file mode 100644
index 000000000..e9d8ec8d3
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/rndtest.h
@@ -0,0 +1,54 @@
+/* $FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $ */
+/* $OpenBSD$ */
+
+/*
+ * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Jason L. Wright
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/* Some of the tests depend on these values */
+#define RNDTEST_NBYTES 2500
+#define RNDTEST_NBITS (8 * RNDTEST_NBYTES)
+
+struct rndtest_state {
+ int rs_discard; /* discard/accept random data */
+ u_int8_t *rs_buf;
+};
+
+struct rndtest_stats {
+ u_int32_t rst_discard; /* number of bytes discarded */
+ u_int32_t rst_tests; /* number of test runs */
+ u_int32_t rst_monobit; /* monobit test failures */
+ u_int32_t rst_runs; /* 0/1 runs failures */
+ u_int32_t rst_longruns; /* longruns failures */
+ u_int32_t rst_chi; /* chi^2 failures */
+};
+
+extern int rndtest_buf(unsigned char *buf);
diff --git a/target/linux/generic/files/crypto/ocf/safe/Makefile b/target/linux/generic/files/crypto/ocf/safe/Makefile
new file mode 100644
index 000000000..9a36b081e
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/Makefile
@@ -0,0 +1,12 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_SAFE) += safe.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/safe/hmachack.h b/target/linux/generic/files/crypto/ocf/safe/hmachack.h
new file mode 100644
index 000000000..598c95856
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/hmachack.h
@@ -0,0 +1,37 @@
+/*
+ * until we find a cleaner way, include the BSD md5/sha1 code
+ * here
+ */
+#ifdef HMAC_HACK
+#define LITTLE_ENDIAN 1234
+#define BIG_ENDIAN 4321
+#ifdef __LITTLE_ENDIAN
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+#ifdef __BIG_ENDIAN
+#define BYTE_ORDER BIG_ENDIAN
+#endif
+
+u_int8_t hmac_ipad_buffer[64] = {
+ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
+};
+
+u_int8_t hmac_opad_buffer[64] = {
+ 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+ 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+ 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+ 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+ 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+ 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+ 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+ 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
+};
+#endif /* HMAC_HACK */
+
diff --git a/target/linux/generic/files/crypto/ocf/safe/md5.c b/target/linux/generic/files/crypto/ocf/safe/md5.c
new file mode 100644
index 000000000..077c42e78
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/md5.c
@@ -0,0 +1,308 @@
+/* $KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
+
+#include <sys/types.h>
+#include <sys/cdefs.h>
+#include <sys/time.h>
+#include <sys/systm.h>
+#include <crypto/md5.h>
+#endif
+
+#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
+
+#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
+#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
+#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
+#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
+
+#define ROUND1(a, b, c, d, k, s, i) { \
+ (a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
+ (a) = SHIFT((a), (s)); \
+ (a) = (b) + (a); \
+}
+
+#define ROUND2(a, b, c, d, k, s, i) { \
+ (a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
+ (a) = SHIFT((a), (s)); \
+ (a) = (b) + (a); \
+}
+
+#define ROUND3(a, b, c, d, k, s, i) { \
+ (a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
+ (a) = SHIFT((a), (s)); \
+ (a) = (b) + (a); \
+}
+
+#define ROUND4(a, b, c, d, k, s, i) { \
+ (a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
+ (a) = SHIFT((a), (s)); \
+ (a) = (b) + (a); \
+}
+
+#define Sa 7
+#define Sb 12
+#define Sc 17
+#define Sd 22
+
+#define Se 5
+#define Sf 9
+#define Sg 14
+#define Sh 20
+
+#define Si 4
+#define Sj 11
+#define Sk 16
+#define Sl 23
+
+#define Sm 6
+#define Sn 10
+#define So 15
+#define Sp 21
+
+#define MD5_A0 0x67452301
+#define MD5_B0 0xefcdab89
+#define MD5_C0 0x98badcfe
+#define MD5_D0 0x10325476
+
+/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
+static const u_int32_t T[65] = {
+ 0,
+ 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
+ 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
+ 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
+ 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
+
+ 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
+ 0xd62f105d, 0x2441453, 0xd8a1e681, 0xe7d3fbc8,
+ 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
+ 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
+
+ 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
+ 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
+ 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x4881d05,
+ 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
+
+ 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
+ 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
+ 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
+ 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
+};
+
+static const u_int8_t md5_paddat[MD5_BUFLEN] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static void md5_calc(u_int8_t *, md5_ctxt *);
+
+void md5_init(ctxt)
+ md5_ctxt *ctxt;
+{
+ ctxt->md5_n = 0;
+ ctxt->md5_i = 0;
+ ctxt->md5_sta = MD5_A0;
+ ctxt->md5_stb = MD5_B0;
+ ctxt->md5_stc = MD5_C0;
+ ctxt->md5_std = MD5_D0;
+ bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
+}
+
+void md5_loop(ctxt, input, len)
+ md5_ctxt *ctxt;
+ u_int8_t *input;
+ u_int len; /* number of bytes */
+{
+ u_int gap, i;
+
+ ctxt->md5_n += len * 8; /* byte to bit */
+ gap = MD5_BUFLEN - ctxt->md5_i;
+
+ if (len >= gap) {
+ bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
+ gap);
+ md5_calc(ctxt->md5_buf, ctxt);
+
+ for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
+ md5_calc((u_int8_t *)(input + i), ctxt);
+ }
+
+ ctxt->md5_i = len - i;
+ bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
+ } else {
+ bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
+ len);
+ ctxt->md5_i += len;
+ }
+}
+
+void md5_pad(ctxt)
+ md5_ctxt *ctxt;
+{
+ u_int gap;
+
+ /* Don't count up padding. Keep md5_n. */
+ gap = MD5_BUFLEN - ctxt->md5_i;
+ if (gap > 8) {
+ bcopy(md5_paddat,
+ (void *)(ctxt->md5_buf + ctxt->md5_i),
+ gap - sizeof(ctxt->md5_n));
+ } else {
+ /* including gap == 8 */
+ bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
+ gap);
+ md5_calc(ctxt->md5_buf, ctxt);
+ bcopy((md5_paddat + gap),
+ (void *)ctxt->md5_buf,
+ MD5_BUFLEN - sizeof(ctxt->md5_n));
+ }
+
+ /* 8 byte word */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ ctxt->md5_buf[56] = ctxt->md5_n8[7];
+ ctxt->md5_buf[57] = ctxt->md5_n8[6];
+ ctxt->md5_buf[58] = ctxt->md5_n8[5];
+ ctxt->md5_buf[59] = ctxt->md5_n8[4];
+ ctxt->md5_buf[60] = ctxt->md5_n8[3];
+ ctxt->md5_buf[61] = ctxt->md5_n8[2];
+ ctxt->md5_buf[62] = ctxt->md5_n8[1];
+ ctxt->md5_buf[63] = ctxt->md5_n8[0];
+#endif
+
+ md5_calc(ctxt->md5_buf, ctxt);
+}
+
+void md5_result(digest, ctxt)
+ u_int8_t *digest;
+ md5_ctxt *ctxt;
+{
+ /* 4 byte words */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ bcopy(&ctxt->md5_st8[0], digest, 16);
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
+ digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
+ digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
+ digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
+ digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
+ digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
+ digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
+ digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
+#endif
+}
+
+static void md5_calc(b64, ctxt)
+ u_int8_t *b64;
+ md5_ctxt *ctxt;
+{
+ u_int32_t A = ctxt->md5_sta;
+ u_int32_t B = ctxt->md5_stb;
+ u_int32_t C = ctxt->md5_stc;
+ u_int32_t D = ctxt->md5_std;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int32_t *X = (u_int32_t *)b64;
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ /* 4 byte words */
+ /* what a brute force but fast! */
+ u_int32_t X[16];
+ u_int8_t *y = (u_int8_t *)X;
+ y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
+ y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
+ y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
+ y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
+ y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
+ y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
+ y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
+ y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
+ y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
+ y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
+ y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
+ y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
+ y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
+ y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
+ y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
+ y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
+#endif
+
+ ROUND1(A, B, C, D, 0, Sa, 1); ROUND1(D, A, B, C, 1, Sb, 2);
+ ROUND1(C, D, A, B, 2, Sc, 3); ROUND1(B, C, D, A, 3, Sd, 4);
+ ROUND1(A, B, C, D, 4, Sa, 5); ROUND1(D, A, B, C, 5, Sb, 6);
+ ROUND1(C, D, A, B, 6, Sc, 7); ROUND1(B, C, D, A, 7, Sd, 8);
+ ROUND1(A, B, C, D, 8, Sa, 9); ROUND1(D, A, B, C, 9, Sb, 10);
+ ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
+ ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
+ ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
+
+ ROUND2(A, B, C, D, 1, Se, 17); ROUND2(D, A, B, C, 6, Sf, 18);
+ ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A, 0, Sh, 20);
+ ROUND2(A, B, C, D, 5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
+ ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A, 4, Sh, 24);
+ ROUND2(A, B, C, D, 9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
+ ROUND2(C, D, A, B, 3, Sg, 27); ROUND2(B, C, D, A, 8, Sh, 28);
+ ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C, 2, Sf, 30);
+ ROUND2(C, D, A, B, 7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
+
+ ROUND3(A, B, C, D, 5, Si, 33); ROUND3(D, A, B, C, 8, Sj, 34);
+ ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
+ ROUND3(A, B, C, D, 1, Si, 37); ROUND3(D, A, B, C, 4, Sj, 38);
+ ROUND3(C, D, A, B, 7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
+ ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C, 0, Sj, 42);
+ ROUND3(C, D, A, B, 3, Sk, 43); ROUND3(B, C, D, A, 6, Sl, 44);
+ ROUND3(A, B, C, D, 9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
+ ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A, 2, Sl, 48);
+
+ ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50);
+ ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52);
+ ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54);
+ ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56);
+ ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
+ ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
+ ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
+ ROUND4(C, D, A, B, 2, So, 63); ROUND4(B, C, D, A, 9, Sp, 64);
+
+ ctxt->md5_sta += A;
+ ctxt->md5_stb += B;
+ ctxt->md5_stc += C;
+ ctxt->md5_std += D;
+}
diff --git a/target/linux/generic/files/crypto/ocf/safe/md5.h b/target/linux/generic/files/crypto/ocf/safe/md5.h
new file mode 100644
index 000000000..690f5bfc1
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/md5.h
@@ -0,0 +1,76 @@
+/* $FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $ */
+/* $KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $ */
+
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETINET6_MD5_H_
+#define _NETINET6_MD5_H_
+
+#define MD5_BUFLEN 64
+
+typedef struct {
+ union {
+ u_int32_t md5_state32[4];
+ u_int8_t md5_state8[16];
+ } md5_st;
+
+#define md5_sta md5_st.md5_state32[0]
+#define md5_stb md5_st.md5_state32[1]
+#define md5_stc md5_st.md5_state32[2]
+#define md5_std md5_st.md5_state32[3]
+#define md5_st8 md5_st.md5_state8
+
+ union {
+ u_int64_t md5_count64;
+ u_int8_t md5_count8[8];
+ } md5_count;
+#define md5_n md5_count.md5_count64
+#define md5_n8 md5_count.md5_count8
+
+ u_int md5_i;
+ u_int8_t md5_buf[MD5_BUFLEN];
+} md5_ctxt;
+
+extern void md5_init(md5_ctxt *);
+extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
+extern void md5_pad(md5_ctxt *);
+extern void md5_result(u_int8_t *, md5_ctxt *);
+
+/* compatibility */
+#define MD5_CTX md5_ctxt
+#define MD5Init(x) md5_init((x))
+#define MD5Update(x, y, z) md5_loop((x), (y), (z))
+#define MD5Final(x, y) \
+do { \
+ md5_pad((y)); \
+ md5_result((x), (y)); \
+} while (0)
+
+#endif /* ! _NETINET6_MD5_H_*/
diff --git a/target/linux/generic/files/crypto/ocf/safe/safe.c b/target/linux/generic/files/crypto/ocf/safe/safe.c
new file mode 100644
index 000000000..141640e44
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/safe.c
@@ -0,0 +1,2230 @@
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2004-2010 David McCullough
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 2003 Sam Leffler, Errno Consulting
+ * Copyright (c) 2003 Global Technology Associates, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/io.h>
+
+/*
+ * SafeNet SafeXcel-1141 hardware crypto accelerator
+ */
+
+#include <cryptodev.h>
+#include <uio.h>
+#include <safe/safereg.h>
+#include <safe/safevar.h>
+
+#if 1
+#define DPRINTF(a) do { \
+ if (debug) { \
+ printk("%s: ", sc ? \
+ device_get_nameunit(sc->sc_dev) : "safe"); \
+ printk a; \
+ } \
+ } while (0)
+#else
+#define DPRINTF(a)
+#endif
+
+/*
+ * until we find a cleaner way, include the BSD md5/sha1 code
+ * here
+ */
+#define HMAC_HACK 1
+#ifdef HMAC_HACK
+#include <safe/hmachack.h>
+#include <safe/md5.h>
+#include <safe/md5.c>
+#include <safe/sha1.h>
+#include <safe/sha1.c>
+#endif /* HMAC_HACK */
+
+/* add proc entry for this */
+struct safe_stats safestats;
+
+#define debug safe_debug
+int safe_debug = 0;
+module_param(safe_debug, int, 0644);
+MODULE_PARM_DESC(safe_debug, "Enable debug");
+
+static void safe_callback(struct safe_softc *, struct safe_ringentry *);
+static void safe_feed(struct safe_softc *, struct safe_ringentry *);
+#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
+static void safe_rng_init(struct safe_softc *);
+int safe_rngbufsize = 8; /* 32 bytes each read */
+module_param(safe_rngbufsize, int, 0644);
+MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
+int safe_rngmaxalarm = 8; /* max alarms before reset */
+module_param(safe_rngmaxalarm, int, 0644);
+MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
+#endif /* SAFE_NO_RNG */
+
+static void safe_totalreset(struct safe_softc *sc);
+static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
+static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
+static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
+static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
+static int safe_kstart(struct safe_softc *sc);
+static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
+static void safe_kfeed(struct safe_softc *sc);
+static void safe_kpoll(unsigned long arg);
+static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
+ u_int32_t len, struct crparam *n);
+
+static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int safe_freesession(device_t, u_int64_t);
+static int safe_process(device_t, struct cryptop *, int);
+
+static device_method_t safe_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, safe_newsession),
+ DEVMETHOD(cryptodev_freesession,safe_freesession),
+ DEVMETHOD(cryptodev_process, safe_process),
+ DEVMETHOD(cryptodev_kprocess, safe_kprocess),
+};
+
+#define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
+#define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
+
+#define SAFE_MAX_CHIPS 8
+static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
+
+/*
+ * split our buffers up into safe DMAable byte fragments to avoid lockup
+ * bug in 1141 HW on rev 1.0.
+ */
+
+static int
+pci_map_linear(
+ struct safe_softc *sc,
+ struct safe_operand *buf,
+ void *addr,
+ int len)
+{
+ dma_addr_t tmp;
+ int chunk, tlen = len;
+
+ tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
+
+ buf->mapsize += len;
+ while (len > 0) {
+ chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
+ buf->segs[buf->nsegs].ds_addr = tmp;
+ buf->segs[buf->nsegs].ds_len = chunk;
+ buf->segs[buf->nsegs].ds_tlen = tlen;
+ buf->nsegs++;
+ tmp += chunk;
+ len -= chunk;
+ tlen = 0;
+ }
+ return 0;
+}
+
+/*
+ * map in a given uio buffer (great on some arches :-)
+ */
+
+static int
+pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
+{
+ struct iovec *iov = uio->uio_iov;
+ int n;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ buf->mapsize = 0;
+ buf->nsegs = 0;
+
+ for (n = 0; n < uio->uio_iovcnt; n++) {
+ pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
+ iov++;
+ }
+
+ /* identify this buffer by the first segment */
+ buf->map = (void *) buf->segs[0].ds_addr;
+ return(0);
+}
+
+/*
+ * map in a given sk_buff
+ */
+
+static int
+pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
+{
+ int i;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ buf->mapsize = 0;
+ buf->nsegs = 0;
+
+ pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ pci_map_linear(sc, buf,
+ page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+ skb_shinfo(skb)->frags[i].page_offset,
+ skb_shinfo(skb)->frags[i].size);
+ }
+
+ /* identify this buffer by the first segment */
+ buf->map = (void *) buf->segs[0].ds_addr;
+ return(0);
+}
+
+
+#if 0 /* not needed at this time */
+static void
+pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
+{
+ int i;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+ for (i = 0; i < buf->nsegs; i++)
+ pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
+ buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
+}
+#endif
+
+static void
+pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
+{
+ int i;
+ DPRINTF(("%s()\n", __FUNCTION__));
+ for (i = 0; i < buf->nsegs; i++) {
+ if (buf->segs[i].ds_tlen) {
+ DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
+ pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
+ buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
+ DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
+ }
+ buf->segs[i].ds_addr = 0;
+ buf->segs[i].ds_len = 0;
+ buf->segs[i].ds_tlen = 0;
+ }
+ buf->nsegs = 0;
+ buf->mapsize = 0;
+ buf->map = 0;
+}
+
+
+/*
+ * SafeXcel Interrupt routine
+ */
+static irqreturn_t
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+safe_intr(int irq, void *arg)
+#else
+safe_intr(int irq, void *arg, struct pt_regs *regs)
+#endif
+{
+ struct safe_softc *sc = arg;
+ int stat;
+ unsigned long flags;
+
+ stat = READ_REG(sc, SAFE_HM_STAT);
+
+ DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
+
+ if (stat == 0) /* shared irq, not for us */
+ return IRQ_NONE;
+
+ WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
+
+ if ((stat & SAFE_INT_PE_DDONE)) {
+ /*
+ * Descriptor(s) done; scan the ring and
+ * process completed operations.
+ */
+ spin_lock_irqsave(&sc->sc_ringmtx, flags);
+ while (sc->sc_back != sc->sc_front) {
+ struct safe_ringentry *re = sc->sc_back;
+
+#ifdef SAFE_DEBUG
+ if (debug) {
+ safe_dump_ringstate(sc, __func__);
+ safe_dump_request(sc, __func__, re);
+ }
+#endif
+ /*
+ * safe_process marks ring entries that were allocated
+ * but not used with a csr of zero. This insures the
+ * ring front pointer never needs to be set backwards
+ * in the event that an entry is allocated but not used
+ * because of a setup error.
+ */
+ DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
+ if (re->re_desc.d_csr != 0) {
+ if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
+ DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
+ break;
+ }
+ if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
+ DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
+ break;
+ }
+ sc->sc_nqchip--;
+ safe_callback(sc, re);
+ }
+ if (++(sc->sc_back) == sc->sc_ringtop)
+ sc->sc_back = sc->sc_ring;
+ }
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+ }
+
+ /*
+ * Check to see if we got any DMA Error
+ */
+ if (stat & SAFE_INT_PE_ERROR) {
+ printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
+ (int)READ_REG(sc, SAFE_PE_DMASTAT));
+ safestats.st_dmaerr++;
+ safe_totalreset(sc);
+#if 0
+ safe_feed(sc);
+#endif
+ }
+
+ if (sc->sc_needwakeup) { /* XXX check high watermark */
+ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
+ DPRINTF(("%s: wakeup crypto %x\n", __func__,
+ sc->sc_needwakeup));
+ sc->sc_needwakeup &= ~wakeup;
+ crypto_unblock(sc->sc_cid, wakeup);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * safe_feed() - post a request to chip
+ */
+static void
+safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
+{
+ DPRINTF(("%s()\n", __FUNCTION__));
+#ifdef SAFE_DEBUG
+ if (debug) {
+ safe_dump_ringstate(sc, __func__);
+ safe_dump_request(sc, __func__, re);
+ }
+#endif
+ sc->sc_nqchip++;
+ if (sc->sc_nqchip > safestats.st_maxqchip)
+ safestats.st_maxqchip = sc->sc_nqchip;
+ /* poke h/w to check descriptor ring, any value can be written */
+ WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
+}
+
+#define N(a) (sizeof(a) / sizeof (a[0]))
+static void
+safe_setup_enckey(struct safe_session *ses, caddr_t key)
+{
+ int i;
+
+ bcopy(key, ses->ses_key, ses->ses_klen / 8);
+
+ /* PE is little-endian, insure proper byte order */
+ for (i = 0; i < N(ses->ses_key); i++)
+ ses->ses_key[i] = htole32(ses->ses_key[i]);
+}
+
+static void
+safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
+{
+#ifdef HMAC_HACK
+ MD5_CTX md5ctx;
+ SHA1_CTX sha1ctx;
+ int i;
+
+
+ for (i = 0; i < klen; i++)
+ key[i] ^= HMAC_IPAD_VAL;
+
+ if (algo == CRYPTO_MD5_HMAC) {
+ MD5Init(&md5ctx);
+ MD5Update(&md5ctx, key, klen);
+ MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+ bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
+ } else {
+ SHA1Init(&sha1ctx);
+ SHA1Update(&sha1ctx, key, klen);
+ SHA1Update(&sha1ctx, hmac_ipad_buffer,
+ SHA1_HMAC_BLOCK_LEN - klen);
+ bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
+ }
+
+ for (i = 0; i < klen; i++)
+ key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
+
+ if (algo == CRYPTO_MD5_HMAC) {
+ MD5Init(&md5ctx);
+ MD5Update(&md5ctx, key, klen);
+ MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+ bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
+ } else {
+ SHA1Init(&sha1ctx);
+ SHA1Update(&sha1ctx, key, klen);
+ SHA1Update(&sha1ctx, hmac_opad_buffer,
+ SHA1_HMAC_BLOCK_LEN - klen);
+ bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
+ }
+
+ for (i = 0; i < klen; i++)
+ key[i] ^= HMAC_OPAD_VAL;
+
+#if 0
+ /*
+ * this code prevents SHA working on a BE host,
+ * so it is obviously wrong. I think the byte
+ * swap setup we do with the chip fixes this for us
+ */
+
+ /* PE is little-endian, insure proper byte order */
+ for (i = 0; i < N(ses->ses_hminner); i++) {
+ ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
+ ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
+ }
+#endif
+#else /* HMAC_HACK */
+ printk("safe: md5/sha not implemented\n");
+#endif /* HMAC_HACK */
+}
+#undef N
+
+/*
+ * Allocate a new 'session' and return an encoded session id. 'sidp'
+ * contains our registration id, and should contain an encoded session
+ * id on successful allocation.
+ */
+static int
+safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+ struct safe_softc *sc = device_get_softc(dev);
+ struct cryptoini *c, *encini = NULL, *macini = NULL;
+ struct safe_session *ses = NULL;
+ int sesn;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (sidp == NULL || cri == NULL || sc == NULL)
+ return (EINVAL);
+
+ for (c = cri; c != NULL; c = c->cri_next) {
+ if (c->cri_alg == CRYPTO_MD5_HMAC ||
+ c->cri_alg == CRYPTO_SHA1_HMAC ||
+ c->cri_alg == CRYPTO_NULL_HMAC) {
+ if (macini)
+ return (EINVAL);
+ macini = c;
+ } else if (c->cri_alg == CRYPTO_DES_CBC ||
+ c->cri_alg == CRYPTO_3DES_CBC ||
+ c->cri_alg == CRYPTO_AES_CBC ||
+ c->cri_alg == CRYPTO_NULL_CBC) {
+ if (encini)
+ return (EINVAL);
+ encini = c;
+ } else
+ return (EINVAL);
+ }
+ if (encini == NULL && macini == NULL)
+ return (EINVAL);
+ if (encini) { /* validate key length */
+ switch (encini->cri_alg) {
+ case CRYPTO_DES_CBC:
+ if (encini->cri_klen != 64)
+ return (EINVAL);
+ break;
+ case CRYPTO_3DES_CBC:
+ if (encini->cri_klen != 192)
+ return (EINVAL);
+ break;
+ case CRYPTO_AES_CBC:
+ if (encini->cri_klen != 128 &&
+ encini->cri_klen != 192 &&
+ encini->cri_klen != 256)
+ return (EINVAL);
+ break;
+ }
+ }
+
+ if (sc->sc_sessions == NULL) {
+ ses = sc->sc_sessions = (struct safe_session *)
+ kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
+ if (ses == NULL)
+ return (ENOMEM);
+ memset(ses, 0, sizeof(struct safe_session));
+ sesn = 0;
+ sc->sc_nsessions = 1;
+ } else {
+ for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+ if (sc->sc_sessions[sesn].ses_used == 0) {
+ ses = &sc->sc_sessions[sesn];
+ break;
+ }
+ }
+
+ if (ses == NULL) {
+ sesn = sc->sc_nsessions;
+ ses = (struct safe_session *)
+ kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
+ if (ses == NULL)
+ return (ENOMEM);
+ memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
+ bcopy(sc->sc_sessions, ses, sesn *
+ sizeof(struct safe_session));
+ bzero(sc->sc_sessions, sesn *
+ sizeof(struct safe_session));
+ kfree(sc->sc_sessions);
+ sc->sc_sessions = ses;
+ ses = &sc->sc_sessions[sesn];
+ sc->sc_nsessions++;
+ }
+ }
+
+ bzero(ses, sizeof(struct safe_session));
+ ses->ses_used = 1;
+
+ if (encini) {
+ ses->ses_klen = encini->cri_klen;
+ if (encini->cri_key != NULL)
+ safe_setup_enckey(ses, encini->cri_key);
+ }
+
+ if (macini) {
+ ses->ses_mlen = macini->cri_mlen;
+ if (ses->ses_mlen == 0) {
+ if (macini->cri_alg == CRYPTO_MD5_HMAC)
+ ses->ses_mlen = MD5_HASH_LEN;
+ else
+ ses->ses_mlen = SHA1_HASH_LEN;
+ }
+
+ if (macini->cri_key != NULL) {
+ safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
+ macini->cri_klen / 8);
+ }
+ }
+
+ *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
+ return (0);
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+safe_freesession(device_t dev, u_int64_t tid)
+{
+ struct safe_softc *sc = device_get_softc(dev);
+ int session, ret;
+ u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (sc == NULL)
+ return (EINVAL);
+
+ session = SAFE_SESSION(sid);
+ if (session < sc->sc_nsessions) {
+ bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
+ ret = 0;
+ } else
+ ret = EINVAL;
+ return (ret);
+}
+
+
+static int
+safe_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct safe_softc *sc = device_get_softc(dev);
+ int err = 0, i, nicealign, uniform;
+ struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+ int bypass, oplen, ivsize;
+ caddr_t iv;
+ int16_t coffset;
+ struct safe_session *ses;
+ struct safe_ringentry *re;
+ struct safe_sarec *sa;
+ struct safe_pdesc *pd;
+ u_int32_t cmd0, cmd1, staterec, rand_iv[4];
+ unsigned long flags;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
+ safestats.st_invalid++;
+ return (EINVAL);
+ }
+ if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
+ safestats.st_badsession++;
+ return (EINVAL);
+ }
+
+ spin_lock_irqsave(&sc->sc_ringmtx, flags);
+ if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
+ safestats.st_ringfull++;
+ sc->sc_needwakeup |= CRYPTO_SYMQ;
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+ return (ERESTART);
+ }
+ re = sc->sc_front;
+
+ staterec = re->re_sa.sa_staterec; /* save */
+ /* NB: zero everything but the PE descriptor */
+ bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
+ re->re_sa.sa_staterec = staterec; /* restore */
+
+ re->re_crp = crp;
+ re->re_sesn = SAFE_SESSION(crp->crp_sid);
+
+ re->re_src.nsegs = 0;
+ re->re_dst.nsegs = 0;
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ re->re_src_skb = (struct sk_buff *)crp->crp_buf;
+ re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ re->re_src_io = (struct uio *)crp->crp_buf;
+ re->re_dst_io = (struct uio *)crp->crp_buf;
+ } else {
+ safestats.st_badflags++;
+ err = EINVAL;
+ goto errout; /* XXX we don't handle contiguous blocks! */
+ }
+
+ sa = &re->re_sa;
+ ses = &sc->sc_sessions[re->re_sesn];
+
+ crd1 = crp->crp_desc;
+ if (crd1 == NULL) {
+ safestats.st_nodesc++;
+ err = EINVAL;
+ goto errout;
+ }
+ crd2 = crd1->crd_next;
+
+ cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
+ cmd1 = 0;
+ if (crd2 == NULL) {
+ if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd1->crd_alg == CRYPTO_NULL_HMAC) {
+ maccrd = crd1;
+ enccrd = NULL;
+ cmd0 |= SAFE_SA_CMD0_OP_HASH;
+ } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+ crd1->crd_alg == CRYPTO_3DES_CBC ||
+ crd1->crd_alg == CRYPTO_AES_CBC ||
+ crd1->crd_alg == CRYPTO_NULL_CBC) {
+ maccrd = NULL;
+ enccrd = crd1;
+ cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
+ } else {
+ safestats.st_badalg++;
+ err = EINVAL;
+ goto errout;
+ }
+ } else {
+ if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd1->crd_alg == CRYPTO_NULL_HMAC) &&
+ (crd2->crd_alg == CRYPTO_DES_CBC ||
+ crd2->crd_alg == CRYPTO_3DES_CBC ||
+ crd2->crd_alg == CRYPTO_AES_CBC ||
+ crd2->crd_alg == CRYPTO_NULL_CBC) &&
+ ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+ maccrd = crd1;
+ enccrd = crd2;
+ } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+ crd1->crd_alg == CRYPTO_3DES_CBC ||
+ crd1->crd_alg == CRYPTO_AES_CBC ||
+ crd1->crd_alg == CRYPTO_NULL_CBC) &&
+ (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+ crd2->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd2->crd_alg == CRYPTO_NULL_HMAC) &&
+ (crd1->crd_flags & CRD_F_ENCRYPT)) {
+ enccrd = crd1;
+ maccrd = crd2;
+ } else {
+ safestats.st_badalg++;
+ err = EINVAL;
+ goto errout;
+ }
+ cmd0 |= SAFE_SA_CMD0_OP_BOTH;
+ }
+
+ if (enccrd) {
+ if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
+ safe_setup_enckey(ses, enccrd->crd_key);
+
+ if (enccrd->crd_alg == CRYPTO_DES_CBC) {
+ cmd0 |= SAFE_SA_CMD0_DES;
+ cmd1 |= SAFE_SA_CMD1_CBC;
+ ivsize = 2*sizeof(u_int32_t);
+ } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
+ cmd0 |= SAFE_SA_CMD0_3DES;
+ cmd1 |= SAFE_SA_CMD1_CBC;
+ ivsize = 2*sizeof(u_int32_t);
+ } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
+ cmd0 |= SAFE_SA_CMD0_AES;
+ cmd1 |= SAFE_SA_CMD1_CBC;
+ if (ses->ses_klen == 128)
+ cmd1 |= SAFE_SA_CMD1_AES128;
+ else if (ses->ses_klen == 192)
+ cmd1 |= SAFE_SA_CMD1_AES192;
+ else
+ cmd1 |= SAFE_SA_CMD1_AES256;
+ ivsize = 4*sizeof(u_int32_t);
+ } else {
+ cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
+ ivsize = 0;
+ }
+
+ /*
+ * Setup encrypt/decrypt state. When using basic ops
+ * we can't use an inline IV because hash/crypt offset
+ * must be from the end of the IV to the start of the
+ * crypt data and this leaves out the preceding header
+ * from the hash calculation. Instead we place the IV
+ * in the state record and set the hash/crypt offset to
+ * copy both the header+IV.
+ */
+ if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+ cmd0 |= SAFE_SA_CMD0_OUTBOUND;
+
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ iv = enccrd->crd_iv;
+ else
+ read_random((iv = (caddr_t) &rand_iv[0]), sizeof(rand_iv));
+ if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ enccrd->crd_inject, ivsize, iv);
+ }
+ bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
+ /* make iv LE */
+ for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
+ re->re_sastate.sa_saved_iv[i] =
+ cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
+ cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
+ re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
+ } else {
+ cmd0 |= SAFE_SA_CMD0_INBOUND;
+
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+ bcopy(enccrd->crd_iv,
+ re->re_sastate.sa_saved_iv, ivsize);
+ } else {
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ enccrd->crd_inject, ivsize,
+ (caddr_t)re->re_sastate.sa_saved_iv);
+ }
+ /* make iv LE */
+ for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
+ re->re_sastate.sa_saved_iv[i] =
+ cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
+ cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
+ }
+ /*
+ * For basic encryption use the zero pad algorithm.
+ * This pads results to an 8-byte boundary and
+ * suppresses padding verification for inbound (i.e.
+ * decrypt) operations.
+ *
+ * NB: Not sure if the 8-byte pad boundary is a problem.
+ */
+ cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
+
+ /* XXX assert key bufs have the same size */
+ bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
+ }
+
+ if (maccrd) {
+ if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ safe_setup_mackey(ses, maccrd->crd_alg,
+ maccrd->crd_key, maccrd->crd_klen / 8);
+ }
+
+ if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
+ cmd0 |= SAFE_SA_CMD0_MD5;
+ cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
+ } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
+ cmd0 |= SAFE_SA_CMD0_SHA1;
+ cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
+ } else {
+ cmd0 |= SAFE_SA_CMD0_HASH_NULL;
+ }
+ /*
+ * Digest data is loaded from the SA and the hash
+ * result is saved to the state block where we
+ * retrieve it for return to the caller.
+ */
+ /* XXX assert digest bufs have the same size */
+ bcopy(ses->ses_hminner, sa->sa_indigest,
+ sizeof(sa->sa_indigest));
+ bcopy(ses->ses_hmouter, sa->sa_outdigest,
+ sizeof(sa->sa_outdigest));
+
+ cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
+ re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
+ }
+
+ if (enccrd && maccrd) {
+ /*
+ * The offset from hash data to the start of
+ * crypt data is the difference in the skips.
+ */
+ bypass = maccrd->crd_skip;
+ coffset = enccrd->crd_skip - maccrd->crd_skip;
+ if (coffset < 0) {
+ DPRINTF(("%s: hash does not precede crypt; "
+ "mac skip %u enc skip %u\n",
+ __func__, maccrd->crd_skip, enccrd->crd_skip));
+ safestats.st_skipmismatch++;
+ err = EINVAL;
+ goto errout;
+ }
+ oplen = enccrd->crd_skip + enccrd->crd_len;
+ if (maccrd->crd_skip + maccrd->crd_len != oplen) {
+ DPRINTF(("%s: hash amount %u != crypt amount %u\n",
+ __func__, maccrd->crd_skip + maccrd->crd_len,
+ oplen));
+ safestats.st_lenmismatch++;
+ err = EINVAL;
+ goto errout;
+ }
+#ifdef SAFE_DEBUG
+ if (debug) {
+ printf("mac: skip %d, len %d, inject %d\n",
+ maccrd->crd_skip, maccrd->crd_len,
+ maccrd->crd_inject);
+ printf("enc: skip %d, len %d, inject %d\n",
+ enccrd->crd_skip, enccrd->crd_len,
+ enccrd->crd_inject);
+ printf("bypass %d coffset %d oplen %d\n",
+ bypass, coffset, oplen);
+ }
+#endif
+ if (coffset & 3) { /* offset must be 32-bit aligned */
+ DPRINTF(("%s: coffset %u misaligned\n",
+ __func__, coffset));
+ safestats.st_coffmisaligned++;
+ err = EINVAL;
+ goto errout;
+ }
+ coffset >>= 2;
+ if (coffset > 255) { /* offset must be <256 dwords */
+ DPRINTF(("%s: coffset %u too big\n",
+ __func__, coffset));
+ safestats.st_cofftoobig++;
+ err = EINVAL;
+ goto errout;
+ }
+ /*
+ * Tell the hardware to copy the header to the output.
+ * The header is defined as the data from the end of
+ * the bypass to the start of data to be encrypted.
+ * Typically this is the inline IV. Note that you need
+ * to do this even if src+dst are the same; it appears
+ * that w/o this bit the crypted data is written
+ * immediately after the bypass data.
+ */
+ cmd1 |= SAFE_SA_CMD1_HDRCOPY;
+ /*
+ * Disable IP header mutable bit handling. This is
+ * needed to get correct HMAC calculations.
+ */
+ cmd1 |= SAFE_SA_CMD1_MUTABLE;
+ } else {
+ if (enccrd) {
+ bypass = enccrd->crd_skip;
+ oplen = bypass + enccrd->crd_len;
+ } else {
+ bypass = maccrd->crd_skip;
+ oplen = bypass + maccrd->crd_len;
+ }
+ coffset = 0;
+ }
+ /* XXX verify multiple of 4 when using s/g */
+ if (bypass > 96) { /* bypass offset must be <= 96 bytes */
+ DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
+ safestats.st_bypasstoobig++;
+ err = EINVAL;
+ goto errout;
+ }
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
+ safestats.st_noload++;
+ err = ENOMEM;
+ goto errout;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
+ safestats.st_noload++;
+ err = ENOMEM;
+ goto errout;
+ }
+ }
+ nicealign = safe_dmamap_aligned(sc, &re->re_src);
+ uniform = safe_dmamap_uniform(sc, &re->re_src);
+
+ DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
+ nicealign, uniform, re->re_src.nsegs));
+ if (re->re_src.nsegs > 1) {
+ re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
+ ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
+ for (i = 0; i < re->re_src_nsegs; i++) {
+ /* NB: no need to check if there's space */
+ pd = sc->sc_spfree;
+ if (++(sc->sc_spfree) == sc->sc_springtop)
+ sc->sc_spfree = sc->sc_spring;
+
+ KASSERT((pd->pd_flags&3) == 0 ||
+ (pd->pd_flags&3) == SAFE_PD_DONE,
+ ("bogus source particle descriptor; flags %x",
+ pd->pd_flags));
+ pd->pd_addr = re->re_src_segs[i].ds_addr;
+ pd->pd_size = re->re_src_segs[i].ds_len;
+ pd->pd_flags = SAFE_PD_READY;
+ }
+ cmd0 |= SAFE_SA_CMD0_IGATHER;
+ } else {
+ /*
+ * No need for gather, reference the operand directly.
+ */
+ re->re_desc.d_src = re->re_src_segs[0].ds_addr;
+ }
+
+ if (enccrd == NULL && maccrd != NULL) {
+ /*
+ * Hash op; no destination needed.
+ */
+ } else {
+ if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
+ if (!nicealign) {
+ safestats.st_iovmisaligned++;
+ err = EINVAL;
+ goto errout;
+ }
+ if (uniform != 1) {
+ device_printf(sc->sc_dev, "!uniform source\n");
+ if (!uniform) {
+ /*
+ * There's no way to handle the DMA
+ * requirements with this uio. We
+ * could create a separate DMA area for
+ * the result and then copy it back,
+ * but for now we just bail and return
+ * an error. Note that uio requests
+ * > SAFE_MAX_DSIZE are handled because
+ * the DMA map and segment list for the
+ * destination wil result in a
+ * destination particle list that does
+ * the necessary scatter DMA.
+ */
+ safestats.st_iovnotuniform++;
+ err = EINVAL;
+ goto errout;
+ }
+ } else
+ re->re_dst = re->re_src;
+ } else {
+ safestats.st_badflags++;
+ err = EINVAL;
+ goto errout;
+ }
+
+ if (re->re_dst.nsegs > 1) {
+ re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
+ ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
+ for (i = 0; i < re->re_dst_nsegs; i++) {
+ pd = sc->sc_dpfree;
+ KASSERT((pd->pd_flags&3) == 0 ||
+ (pd->pd_flags&3) == SAFE_PD_DONE,
+ ("bogus dest particle descriptor; flags %x",
+ pd->pd_flags));
+ if (++(sc->sc_dpfree) == sc->sc_dpringtop)
+ sc->sc_dpfree = sc->sc_dpring;
+ pd->pd_addr = re->re_dst_segs[i].ds_addr;
+ pd->pd_flags = SAFE_PD_READY;
+ }
+ cmd0 |= SAFE_SA_CMD0_OSCATTER;
+ } else {
+ /*
+ * No need for scatter, reference the operand directly.
+ */
+ re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
+ }
+ }
+
+ /*
+ * All done with setup; fillin the SA command words
+ * and the packet engine descriptor. The operation
+ * is now ready for submission to the hardware.
+ */
+ sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
+ sa->sa_cmd1 = cmd1
+ | (coffset << SAFE_SA_CMD1_OFFSET_S)
+ | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
+ | SAFE_SA_CMD1_SRPCI
+ ;
+ /*
+ * NB: the order of writes is important here. In case the
+ * chip is scanning the ring because of an outstanding request
+ * it might nab this one too. In that case we need to make
+ * sure the setup is complete before we write the length
+ * field of the descriptor as it signals the descriptor is
+ * ready for processing.
+ */
+ re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
+ if (maccrd)
+ re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
+ wmb();
+ re->re_desc.d_len = oplen
+ | SAFE_PE_LEN_READY
+ | (bypass << SAFE_PE_LEN_BYPASS_S)
+ ;
+
+ safestats.st_ipackets++;
+ safestats.st_ibytes += oplen;
+
+ if (++(sc->sc_front) == sc->sc_ringtop)
+ sc->sc_front = sc->sc_ring;
+
+ /* XXX honor batching */
+ safe_feed(sc, re);
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+ return (0);
+
+errout:
+ if (re->re_src.map != re->re_dst.map)
+ pci_unmap_operand(sc, &re->re_dst);
+ if (re->re_src.map)
+ pci_unmap_operand(sc, &re->re_src);
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+ if (err != ERESTART) {
+ crp->crp_etype = err;
+ crypto_done(crp);
+ } else {
+ sc->sc_needwakeup |= CRYPTO_SYMQ;
+ }
+ return (err);
+}
+
+static void
+safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
+{
+ struct cryptop *crp = (struct cryptop *)re->re_crp;
+ struct cryptodesc *crd;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ safestats.st_opackets++;
+ safestats.st_obytes += re->re_dst.mapsize;
+
+ if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
+ device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
+ re->re_desc.d_csr,
+ re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
+ safestats.st_peoperr++;
+ crp->crp_etype = EIO; /* something more meaningful? */
+ }
+
+ if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
+ pci_unmap_operand(sc, &re->re_dst);
+ pci_unmap_operand(sc, &re->re_src);
+
+ /*
+ * If result was written to a differet mbuf chain, swap
+ * it in as the return value and reclaim the original.
+ */
+ if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
+ device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
+ /* kfree_skb(skb) */
+ /* crp->crp_buf = (caddr_t)re->re_dst_skb */
+ return;
+ }
+
+ if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
+ /* copy out ICV result */
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+ if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
+ crd->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd->crd_alg == CRYPTO_NULL_HMAC))
+ continue;
+ if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
+ /*
+ * SHA-1 ICV's are byte-swapped; fix 'em up
+ * before copy them to their destination.
+ */
+ re->re_sastate.sa_saved_indigest[0] =
+ cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
+ re->re_sastate.sa_saved_indigest[1] =
+ cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
+ re->re_sastate.sa_saved_indigest[2] =
+ cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
+ } else {
+ re->re_sastate.sa_saved_indigest[0] =
+ cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
+ re->re_sastate.sa_saved_indigest[1] =
+ cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
+ re->re_sastate.sa_saved_indigest[2] =
+ cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
+ }
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject,
+ sc->sc_sessions[re->re_sesn].ses_mlen,
+ (caddr_t)re->re_sastate.sa_saved_indigest);
+ break;
+ }
+ }
+ crypto_done(crp);
+}
+
+
+#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
+#define SAFE_RNG_MAXWAIT 1000
+
+static void
+safe_rng_init(struct safe_softc *sc)
+{
+ u_int32_t w, v;
+ int i;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ WRITE_REG(sc, SAFE_RNG_CTRL, 0);
+ /* use default value according to the manual */
+ WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
+ WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
+
+ /*
+ * There is a bug in rev 1.0 of the 1140 that when the RNG
+ * is brought out of reset the ready status flag does not
+ * work until the RNG has finished its internal initialization.
+ *
+ * So in order to determine the device is through its
+ * initialization we must read the data register, using the
+ * status reg in the read in case it is initialized. Then read
+ * the data register until it changes from the first read.
+ * Once it changes read the data register until it changes
+ * again. At this time the RNG is considered initialized.
+ * This could take between 750ms - 1000ms in time.
+ */
+ i = 0;
+ w = READ_REG(sc, SAFE_RNG_OUT);
+ do {
+ v = READ_REG(sc, SAFE_RNG_OUT);
+ if (v != w) {
+ w = v;
+ break;
+ }
+ DELAY(10);
+ } while (++i < SAFE_RNG_MAXWAIT);
+
+ /* Wait Until data changes again */
+ i = 0;
+ do {
+ v = READ_REG(sc, SAFE_RNG_OUT);
+ if (v != w)
+ break;
+ DELAY(10);
+ } while (++i < SAFE_RNG_MAXWAIT);
+}
+
+static __inline void
+safe_rng_disable_short_cycle(struct safe_softc *sc)
+{
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ WRITE_REG(sc, SAFE_RNG_CTRL,
+ READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
+}
+
+static __inline void
+safe_rng_enable_short_cycle(struct safe_softc *sc)
+{
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ WRITE_REG(sc, SAFE_RNG_CTRL,
+ READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
+}
+
+static __inline u_int32_t
+safe_rng_read(struct safe_softc *sc)
+{
+ int i;
+
+ i = 0;
+ while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
+ ;
+ return READ_REG(sc, SAFE_RNG_OUT);
+}
+
+static int
+safe_read_random(void *arg, u_int32_t *buf, int maxwords)
+{
+ struct safe_softc *sc = (struct safe_softc *) arg;
+ int i, rc;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ safestats.st_rng++;
+ /*
+ * Fetch the next block of data.
+ */
+ if (maxwords > safe_rngbufsize)
+ maxwords = safe_rngbufsize;
+ if (maxwords > SAFE_RNG_MAXBUFSIZ)
+ maxwords = SAFE_RNG_MAXBUFSIZ;
+retry:
+ /* read as much as we can */
+ for (rc = 0; rc < maxwords; rc++) {
+ if (READ_REG(sc, SAFE_RNG_STAT) != 0)
+ break;
+ buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
+ }
+ if (rc == 0)
+ return 0;
+ /*
+ * Check the comparator alarm count and reset the h/w if
+ * it exceeds our threshold. This guards against the
+ * hardware oscillators resonating with external signals.
+ */
+ if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
+ u_int32_t freq_inc, w;
+
+ DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
+ (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
+ safestats.st_rngalarm++;
+ safe_rng_enable_short_cycle(sc);
+ freq_inc = 18;
+ for (i = 0; i < 64; i++) {
+ w = READ_REG(sc, SAFE_RNG_CNFG);
+ freq_inc = ((w + freq_inc) & 0x3fL);
+ w = ((w & ~0x3fL) | freq_inc);
+ WRITE_REG(sc, SAFE_RNG_CNFG, w);
+
+ WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
+
+ (void) safe_rng_read(sc);
+ DELAY(25);
+
+ if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
+ safe_rng_disable_short_cycle(sc);
+ goto retry;
+ }
+ freq_inc = 1;
+ }
+ safe_rng_disable_short_cycle(sc);
+ } else
+ WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
+
+ return(rc);
+}
+#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
+
+
+/*
+ * Resets the board. Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+static void
+safe_reset_board(struct safe_softc *sc)
+{
+ u_int32_t v;
+ /*
+ * Reset the device. The manual says no delay
+ * is needed between marking and clearing reset.
+ */
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ v = READ_REG(sc, SAFE_PE_DMACFG) &~
+ (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
+ SAFE_PE_DMACFG_SGRESET);
+ WRITE_REG(sc, SAFE_PE_DMACFG, v
+ | SAFE_PE_DMACFG_PERESET
+ | SAFE_PE_DMACFG_PDRRESET
+ | SAFE_PE_DMACFG_SGRESET);
+ WRITE_REG(sc, SAFE_PE_DMACFG, v);
+}
+
+/*
+ * Initialize registers we need to touch only once.
+ */
+static void
+safe_init_board(struct safe_softc *sc)
+{
+ u_int32_t v, dwords;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ v = READ_REG(sc, SAFE_PE_DMACFG);
+ v &=~ ( SAFE_PE_DMACFG_PEMODE
+ | SAFE_PE_DMACFG_FSENA /* failsafe enable */
+ | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
+ | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
+ | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
+ | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
+ | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
+ | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
+ );
+ v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
+ | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
+ | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
+ | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
+ | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
+ | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
+#if 0
+ | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
+#endif
+ ;
+ WRITE_REG(sc, SAFE_PE_DMACFG, v);
+
+#ifdef __BIG_ENDIAN
+ /* tell the safenet that we are 4321 and not 1234 */
+ WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
+#endif
+
+ if (sc->sc_chiprev == SAFE_REV(1,0)) {
+ /*
+ * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
+ * "target mode transfers" done while the chip is DMA'ing
+ * >1020 bytes cause the hardware to lockup. To avoid this
+ * we reduce the max PCI transfer size and use small source
+ * particle descriptors (<= 256 bytes).
+ */
+ WRITE_REG(sc, SAFE_DMA_CFG, 256);
+ device_printf(sc->sc_dev,
+ "Reduce max DMA size to %u words for rev %u.%u WAR\n",
+ (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
+ (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
+ (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
+ sc->sc_max_dsize = 256;
+ } else {
+ sc->sc_max_dsize = SAFE_MAX_DSIZE;
+ }
+
+ /* NB: operands+results are overlaid */
+ WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
+ WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
+ /*
+ * Configure ring entry size and number of items in the ring.
+ */
+ KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
+ ("PE ring entry not 32-bit aligned!"));
+ dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
+ WRITE_REG(sc, SAFE_PE_RINGCFG,
+ (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
+ WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
+
+ WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
+ WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
+ WRITE_REG(sc, SAFE_PE_PARTSIZE,
+ (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
+ /*
+ * NB: destination particles are fixed size. We use
+ * an mbuf cluster and require all results go to
+ * clusters or smaller.
+ */
+ WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
+
+ /* it's now safe to enable PE mode, do it */
+ WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
+
+ /*
+ * Configure hardware to use level-triggered interrupts and
+ * to interrupt after each descriptor is processed.
+ */
+ WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
+ WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
+ WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
+ WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
+}
+
+
+/*
+ * Clean up after a chip crash.
+ * It is assumed that the caller in splimp()
+ */
+static void
+safe_cleanchip(struct safe_softc *sc)
+{
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (sc->sc_nqchip != 0) {
+ struct safe_ringentry *re = sc->sc_back;
+
+ while (re != sc->sc_front) {
+ if (re->re_desc.d_csr != 0)
+ safe_free_entry(sc, re);
+ if (++re == sc->sc_ringtop)
+ re = sc->sc_ring;
+ }
+ sc->sc_back = re;
+ sc->sc_nqchip = 0;
+ }
+}
+
+/*
+ * free a safe_q
+ * It is assumed that the caller is within splimp().
+ */
+static int
+safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
+{
+ struct cryptop *crp;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ /*
+ * Free header MCR
+ */
+ if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
+#ifdef NOTYET
+ m_freem(re->re_dst_m);
+#else
+ printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
+#endif
+
+ crp = (struct cryptop *)re->re_crp;
+
+ re->re_desc.d_csr = 0;
+
+ crp->crp_etype = EFAULT;
+ crypto_done(crp);
+ return(0);
+}
+
+/*
+ * Routine to reset the chip and clean up.
+ * It is assumed that the caller is in splimp()
+ */
+static void
+safe_totalreset(struct safe_softc *sc)
+{
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ safe_reset_board(sc);
+ safe_init_board(sc);
+ safe_cleanchip(sc);
+}
+
+/*
+ * Is the operand suitable aligned for direct DMA. Each
+ * segment must be aligned on a 32-bit boundary and all
+ * but the last segment must be a multiple of 4 bytes.
+ */
+static int
+safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
+{
+ int i;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ for (i = 0; i < op->nsegs; i++) {
+ if (op->segs[i].ds_addr & 3)
+ return (0);
+ if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
+ return (0);
+ }
+ return (1);
+}
+
+/*
+ * Is the operand suitable for direct DMA as the destination
+ * of an operation. The hardware requires that each ``particle''
+ * but the last in an operation result have the same size. We
+ * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
+ * 0 if some segment is not a multiple of of this size, 1 if all
+ * segments are exactly this size, or 2 if segments are at worst
+ * a multple of this size.
+ */
+static int
+safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
+{
+ int result = 1;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (op->nsegs > 0) {
+ int i;
+
+ for (i = 0; i < op->nsegs-1; i++) {
+ if (op->segs[i].ds_len % sc->sc_max_dsize)
+ return (0);
+ if (op->segs[i].ds_len != sc->sc_max_dsize)
+ result = 2;
+ }
+ }
+ return (result);
+}
+
+static int
+safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
+{
+ struct safe_softc *sc = device_get_softc(dev);
+ struct safe_pkq *q;
+ unsigned long flags;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (sc == NULL) {
+ krp->krp_status = EINVAL;
+ goto err;
+ }
+
+ if (krp->krp_op != CRK_MOD_EXP) {
+ krp->krp_status = EOPNOTSUPP;
+ goto err;
+ }
+
+ q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
+ if (q == NULL) {
+ krp->krp_status = ENOMEM;
+ goto err;
+ }
+ memset(q, 0, sizeof(*q));
+ q->pkq_krp = krp;
+ INIT_LIST_HEAD(&q->pkq_list);
+
+ spin_lock_irqsave(&sc->sc_pkmtx, flags);
+ list_add_tail(&q->pkq_list, &sc->sc_pkq);
+ safe_kfeed(sc);
+ spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
+ return (0);
+
+err:
+ crypto_kdone(krp);
+ return (0);
+}
+
+#define SAFE_CRK_PARAM_BASE 0
+#define SAFE_CRK_PARAM_EXP 1
+#define SAFE_CRK_PARAM_MOD 2
+
+static int
+safe_kstart(struct safe_softc *sc)
+{
+ struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
+ int exp_bits, mod_bits, base_bits;
+ u_int32_t op, a_off, b_off, c_off, d_off;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
+ krp->krp_status = EINVAL;
+ return (1);
+ }
+
+ base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
+ if (base_bits > 2048)
+ goto too_big;
+ if (base_bits <= 0) /* 5. base not zero */
+ goto too_small;
+
+ exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
+ if (exp_bits > 2048)
+ goto too_big;
+ if (exp_bits <= 0) /* 1. exponent word length > 0 */
+ goto too_small; /* 4. exponent not zero */
+
+ mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
+ if (mod_bits > 2048)
+ goto too_big;
+ if (mod_bits <= 32) /* 2. modulus word length > 1 */
+ goto too_small; /* 8. MSW of modulus != zero */
+ if (mod_bits < exp_bits) /* 3 modulus len >= exponent len */
+ goto too_small;
+ if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
+ goto bad_domain; /* 6. modulus is odd */
+ if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
+ goto too_small; /* make sure result will fit */
+
+ /* 7. modulus > base */
+ if (mod_bits < base_bits)
+ goto too_small;
+ if (mod_bits == base_bits) {
+ u_int8_t *basep, *modp;
+ int i;
+
+ basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
+ ((base_bits + 7) / 8) - 1;
+ modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
+ ((mod_bits + 7) / 8) - 1;
+
+ for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
+ if (*modp < *basep)
+ goto too_small;
+ if (*modp > *basep)
+ break;
+ }
+ }
+
+ /* And on the 9th step, he rested. */
+
+ WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
+ WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
+ if (mod_bits > 1024) {
+ op = SAFE_PK_FUNC_EXP4;
+ a_off = 0x000;
+ b_off = 0x100;
+ c_off = 0x200;
+ d_off = 0x300;
+ } else {
+ op = SAFE_PK_FUNC_EXP16;
+ a_off = 0x000;
+ b_off = 0x080;
+ c_off = 0x100;
+ d_off = 0x180;
+ }
+ sc->sc_pk_reslen = b_off - a_off;
+ sc->sc_pk_resoff = d_off;
+
+ /* A is exponent, B is modulus, C is base, D is result */
+ safe_kload_reg(sc, a_off, b_off - a_off,
+ &krp->krp_param[SAFE_CRK_PARAM_EXP]);
+ WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
+ safe_kload_reg(sc, b_off, b_off - a_off,
+ &krp->krp_param[SAFE_CRK_PARAM_MOD]);
+ WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
+ safe_kload_reg(sc, c_off, b_off - a_off,
+ &krp->krp_param[SAFE_CRK_PARAM_BASE]);
+ WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
+ WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
+
+ WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
+
+ return (0);
+
+too_big:
+ krp->krp_status = E2BIG;
+ return (1);
+too_small:
+ krp->krp_status = ERANGE;
+ return (1);
+bad_domain:
+ krp->krp_status = EDOM;
+ return (1);
+}
+
+static int
+safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
+{
+ u_int plen = (cr->crp_nbits + 7) / 8;
+ int i, sig = plen * 8;
+ u_int8_t c, *p = cr->crp_p;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ for (i = plen - 1; i >= 0; i--) {
+ c = p[i];
+ if (c != 0) {
+ while ((c & 0x80) == 0) {
+ sig--;
+ c <<= 1;
+ }
+ break;
+ }
+ sig -= 8;
+ }
+ return (sig);
+}
+
+static void
+safe_kfeed(struct safe_softc *sc)
+{
+ struct safe_pkq *q, *tmp;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
+ return;
+ if (sc->sc_pkq_cur != NULL)
+ return;
+ list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
+ sc->sc_pkq_cur = q;
+ list_del(&q->pkq_list);
+ if (safe_kstart(sc) != 0) {
+ crypto_kdone(q->pkq_krp);
+ kfree(q);
+ sc->sc_pkq_cur = NULL;
+ } else {
+ /* op started, start polling */
+ mod_timer(&sc->sc_pkto, jiffies + 1);
+ break;
+ }
+ }
+}
+
+static void
+safe_kpoll(unsigned long arg)
+{
+ struct safe_softc *sc = NULL;
+ struct safe_pkq *q;
+ struct crparam *res;
+ int i;
+ u_int32_t buf[64];
+ unsigned long flags;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (arg >= SAFE_MAX_CHIPS)
+ return;
+ sc = safe_chip_idx[arg];
+ if (!sc) {
+ DPRINTF(("%s() - bad callback\n", __FUNCTION__));
+ return;
+ }
+
+ spin_lock_irqsave(&sc->sc_pkmtx, flags);
+ if (sc->sc_pkq_cur == NULL)
+ goto out;
+ if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
+ /* still running, check back later */
+ mod_timer(&sc->sc_pkto, jiffies + 1);
+ goto out;
+ }
+
+ q = sc->sc_pkq_cur;
+ res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
+ bzero(buf, sizeof(buf));
+ bzero(res->crp_p, (res->crp_nbits + 7) / 8);
+ for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
+ buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
+ sc->sc_pk_resoff + (i << 2)));
+ bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
+ /*
+ * reduce the bits that need copying if possible
+ */
+ res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
+ res->crp_nbits = safe_ksigbits(sc, res);
+
+ for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
+ WRITE_REG(sc, i, 0);
+
+ crypto_kdone(q->pkq_krp);
+ kfree(q);
+ sc->sc_pkq_cur = NULL;
+
+ safe_kfeed(sc);
+out:
+ spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
+}
+
+static void
+safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
+ struct crparam *n)
+{
+ u_int32_t buf[64], i;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ bzero(buf, sizeof(buf));
+ bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
+
+ for (i = 0; i < len >> 2; i++)
+ WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
+ cpu_to_le32(buf[i]));
+}
+
+#ifdef SAFE_DEBUG
+static void
+safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
+{
+ printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
+ , tag
+ , READ_REG(sc, SAFE_DMA_ENDIAN)
+ , READ_REG(sc, SAFE_DMA_SRCADDR)
+ , READ_REG(sc, SAFE_DMA_DSTADDR)
+ , READ_REG(sc, SAFE_DMA_STAT)
+ );
+}
+
+static void
+safe_dump_intrstate(struct safe_softc *sc, const char *tag)
+{
+ printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
+ , tag
+ , READ_REG(sc, SAFE_HI_CFG)
+ , READ_REG(sc, SAFE_HI_MASK)
+ , READ_REG(sc, SAFE_HI_DESC_CNT)
+ , READ_REG(sc, SAFE_HU_STAT)
+ , READ_REG(sc, SAFE_HM_STAT)
+ );
+}
+
+static void
+safe_dump_ringstate(struct safe_softc *sc, const char *tag)
+{
+ u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
+
+ /* NB: assume caller has lock on ring */
+ printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
+ tag,
+ estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
+ (unsigned long)(sc->sc_back - sc->sc_ring),
+ (unsigned long)(sc->sc_front - sc->sc_ring));
+}
+
+static void
+safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
+{
+ int ix, nsegs;
+
+ ix = re - sc->sc_ring;
+ printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
+ , tag
+ , re, ix
+ , re->re_desc.d_csr
+ , re->re_desc.d_src
+ , re->re_desc.d_dst
+ , re->re_desc.d_sa
+ , re->re_desc.d_len
+ );
+ if (re->re_src.nsegs > 1) {
+ ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
+ sizeof(struct safe_pdesc);
+ for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
+ printf(" spd[%u] %p: %p size %u flags %x"
+ , ix, &sc->sc_spring[ix]
+ , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
+ , sc->sc_spring[ix].pd_size
+ , sc->sc_spring[ix].pd_flags
+ );
+ if (sc->sc_spring[ix].pd_size == 0)
+ printf(" (zero!)");
+ printf("\n");
+ if (++ix == SAFE_TOTAL_SPART)
+ ix = 0;
+ }
+ }
+ if (re->re_dst.nsegs > 1) {
+ ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
+ sizeof(struct safe_pdesc);
+ for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
+ printf(" dpd[%u] %p: %p flags %x\n"
+ , ix, &sc->sc_dpring[ix]
+ , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
+ , sc->sc_dpring[ix].pd_flags
+ );
+ if (++ix == SAFE_TOTAL_DPART)
+ ix = 0;
+ }
+ }
+ printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
+ re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
+ printf("sa: key %x %x %x %x %x %x %x %x\n"
+ , re->re_sa.sa_key[0]
+ , re->re_sa.sa_key[1]
+ , re->re_sa.sa_key[2]
+ , re->re_sa.sa_key[3]
+ , re->re_sa.sa_key[4]
+ , re->re_sa.sa_key[5]
+ , re->re_sa.sa_key[6]
+ , re->re_sa.sa_key[7]
+ );
+ printf("sa: indigest %x %x %x %x %x\n"
+ , re->re_sa.sa_indigest[0]
+ , re->re_sa.sa_indigest[1]
+ , re->re_sa.sa_indigest[2]
+ , re->re_sa.sa_indigest[3]
+ , re->re_sa.sa_indigest[4]
+ );
+ printf("sa: outdigest %x %x %x %x %x\n"
+ , re->re_sa.sa_outdigest[0]
+ , re->re_sa.sa_outdigest[1]
+ , re->re_sa.sa_outdigest[2]
+ , re->re_sa.sa_outdigest[3]
+ , re->re_sa.sa_outdigest[4]
+ );
+ printf("sr: iv %x %x %x %x\n"
+ , re->re_sastate.sa_saved_iv[0]
+ , re->re_sastate.sa_saved_iv[1]
+ , re->re_sastate.sa_saved_iv[2]
+ , re->re_sastate.sa_saved_iv[3]
+ );
+ printf("sr: hashbc %u indigest %x %x %x %x %x\n"
+ , re->re_sastate.sa_saved_hashbc
+ , re->re_sastate.sa_saved_indigest[0]
+ , re->re_sastate.sa_saved_indigest[1]
+ , re->re_sastate.sa_saved_indigest[2]
+ , re->re_sastate.sa_saved_indigest[3]
+ , re->re_sastate.sa_saved_indigest[4]
+ );
+}
+
+static void
+safe_dump_ring(struct safe_softc *sc, const char *tag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sc->sc_ringmtx, flags);
+ printf("\nSafeNet Ring State:\n");
+ safe_dump_intrstate(sc, tag);
+ safe_dump_dmastatus(sc, tag);
+ safe_dump_ringstate(sc, tag);
+ if (sc->sc_nqchip) {
+ struct safe_ringentry *re = sc->sc_back;
+ do {
+ safe_dump_request(sc, tag, re);
+ if (++re == sc->sc_ringtop)
+ re = sc->sc_ring;
+ } while (re != sc->sc_front);
+ }
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+}
+#endif /* SAFE_DEBUG */
+
+
+static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct safe_softc *sc = NULL;
+ u32 mem_start, mem_len, cmd;
+ int i, rc, devinfo;
+ dma_addr_t raddr;
+ static int num_chips = 0;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ if (pci_enable_device(dev) < 0)
+ return(-ENODEV);
+
+ if (!dev->irq) {
+ printk("safe: found device with no IRQ assigned. check BIOS settings!");
+ pci_disable_device(dev);
+ return(-ENODEV);
+ }
+
+ if (pci_set_mwi(dev)) {
+ printk("safe: pci_set_mwi failed!");
+ return(-ENODEV);
+ }
+
+ sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return(-ENOMEM);
+ memset(sc, 0, sizeof(*sc));
+
+ softc_device_init(sc, "safe", num_chips, safe_methods);
+
+ sc->sc_irq = -1;
+ sc->sc_cid = -1;
+ sc->sc_pcidev = dev;
+ if (num_chips < SAFE_MAX_CHIPS) {
+ safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
+ num_chips++;
+ }
+
+ INIT_LIST_HEAD(&sc->sc_pkq);
+ spin_lock_init(&sc->sc_pkmtx);
+
+ pci_set_drvdata(sc->sc_pcidev, sc);
+
+ /* we read its hardware registers as memory */
+ mem_start = pci_resource_start(sc->sc_pcidev, 0);
+ mem_len = pci_resource_len(sc->sc_pcidev, 0);
+
+ sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
+ if (!sc->sc_base_addr) {
+ device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
+ mem_start, mem_start + mem_len - 1);
+ goto out;
+ }
+
+ /* fix up the bus size */
+ if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
+ device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
+ goto out;
+ }
+ if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
+ device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
+ goto out;
+ }
+
+ pci_set_master(sc->sc_pcidev);
+
+ pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
+
+ if (!(cmd & PCI_COMMAND_MEMORY)) {
+ device_printf(sc->sc_dev, "failed to enable memory mapping\n");
+ goto out;
+ }
+
+ if (!(cmd & PCI_COMMAND_MASTER)) {
+ device_printf(sc->sc_dev, "failed to enable bus mastering\n");
+ goto out;
+ }
+
+ rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
+ if (rc) {
+ device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
+ goto out;
+ }
+ sc->sc_irq = dev->irq;
+
+ sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
+ (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
+
+ /*
+ * Allocate packet engine descriptors.
+ */
+ sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
+ SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
+ &sc->sc_ringalloc.dma_paddr);
+ if (!sc->sc_ringalloc.dma_vaddr) {
+ device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
+ goto out;
+ }
+
+ /*
+ * Hookup the static portion of all our data structures.
+ */
+ sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
+ sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
+ sc->sc_front = sc->sc_ring;
+ sc->sc_back = sc->sc_ring;
+ raddr = sc->sc_ringalloc.dma_paddr;
+ bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
+ for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
+ struct safe_ringentry *re = &sc->sc_ring[i];
+
+ re->re_desc.d_sa = raddr +
+ offsetof(struct safe_ringentry, re_sa);
+ re->re_sa.sa_staterec = raddr +
+ offsetof(struct safe_ringentry, re_sastate);
+
+ raddr += sizeof (struct safe_ringentry);
+ }
+ spin_lock_init(&sc->sc_ringmtx);
+
+ /*
+ * Allocate scatter and gather particle descriptors.
+ */
+ sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
+ SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
+ &sc->sc_spalloc.dma_paddr);
+ if (!sc->sc_spalloc.dma_vaddr) {
+ device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
+ goto out;
+ }
+ sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
+ sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
+ sc->sc_spfree = sc->sc_spring;
+ bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
+
+ sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
+ SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+ &sc->sc_dpalloc.dma_paddr);
+ if (!sc->sc_dpalloc.dma_vaddr) {
+ device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
+ goto out;
+ }
+ sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
+ sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
+ sc->sc_dpfree = sc->sc_dpring;
+ bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
+
+ sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid < 0) {
+ device_printf(sc->sc_dev, "could not get crypto driver id\n");
+ goto out;
+ }
+
+ printf("%s:", device_get_nameunit(sc->sc_dev));
+
+ devinfo = READ_REG(sc, SAFE_DEVINFO);
+ if (devinfo & SAFE_DEVINFO_RNG) {
+ sc->sc_flags |= SAFE_FLAGS_RNG;
+ printf(" rng");
+ }
+ if (devinfo & SAFE_DEVINFO_PKEY) {
+ printf(" key");
+ sc->sc_flags |= SAFE_FLAGS_KEY;
+ crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
+#if 0
+ crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
+#endif
+ init_timer(&sc->sc_pkto);
+ sc->sc_pkto.function = safe_kpoll;
+ sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
+ }
+ if (devinfo & SAFE_DEVINFO_DES) {
+ printf(" des/3des");
+ crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+ }
+ if (devinfo & SAFE_DEVINFO_AES) {
+ printf(" aes");
+ crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+ }
+ if (devinfo & SAFE_DEVINFO_MD5) {
+ printf(" md5");
+ crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+ }
+ if (devinfo & SAFE_DEVINFO_SHA1) {
+ printf(" sha1");
+ crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+ }
+ printf(" null");
+ crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
+ /* XXX other supported algorithms */
+ printf("\n");
+
+ safe_reset_board(sc); /* reset h/w */
+ safe_init_board(sc); /* init h/w */
+
+#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
+ if (sc->sc_flags & SAFE_FLAGS_RNG) {
+ safe_rng_init(sc);
+ crypto_rregister(sc->sc_cid, safe_read_random, sc);
+ }
+#endif /* SAFE_NO_RNG */
+
+ return (0);
+
+out:
+ if (sc->sc_cid >= 0)
+ crypto_unregister_all(sc->sc_cid);
+ if (sc->sc_irq != -1)
+ free_irq(sc->sc_irq, sc);
+ if (sc->sc_ringalloc.dma_vaddr)
+ pci_free_consistent(sc->sc_pcidev,
+ SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
+ sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
+ if (sc->sc_spalloc.dma_vaddr)
+ pci_free_consistent(sc->sc_pcidev,
+ SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+ sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
+ if (sc->sc_dpalloc.dma_vaddr)
+ pci_free_consistent(sc->sc_pcidev,
+ SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+ sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
+ kfree(sc);
+ return(-ENODEV);
+}
+
+static void safe_remove(struct pci_dev *dev)
+{
+ struct safe_softc *sc = pci_get_drvdata(dev);
+
+ DPRINTF(("%s()\n", __FUNCTION__));
+
+ /* XXX wait/abort active ops */
+
+ WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
+
+ del_timer_sync(&sc->sc_pkto);
+
+ crypto_unregister_all(sc->sc_cid);
+
+ safe_cleanchip(sc);
+
+ if (sc->sc_irq != -1)
+ free_irq(sc->sc_irq, sc);
+ if (sc->sc_ringalloc.dma_vaddr)
+ pci_free_consistent(sc->sc_pcidev,
+ SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
+ sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
+ if (sc->sc_spalloc.dma_vaddr)
+ pci_free_consistent(sc->sc_pcidev,
+ SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+ sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
+ if (sc->sc_dpalloc.dma_vaddr)
+ pci_free_consistent(sc->sc_pcidev,
+ SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+ sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
+ sc->sc_irq = -1;
+ sc->sc_ringalloc.dma_vaddr = NULL;
+ sc->sc_spalloc.dma_vaddr = NULL;
+ sc->sc_dpalloc.dma_vaddr = NULL;
+}
+
+static struct pci_device_id safe_pci_tbl[] = {
+ { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
+
+static struct pci_driver safe_driver = {
+ .name = "safe",
+ .id_table = safe_pci_tbl,
+ .probe = safe_probe,
+ .remove = safe_remove,
+ /* add PM stuff here one day */
+};
+
+static int __init safe_init (void)
+{
+ struct safe_softc *sc = NULL;
+ int rc;
+
+ DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
+
+ rc = pci_register_driver(&safe_driver);
+ pci_register_driver_compat(&safe_driver, rc);
+
+ return rc;
+}
+
+static void __exit safe_exit (void)
+{
+ pci_unregister_driver(&safe_driver);
+}
+
+module_init(safe_init);
+module_exit(safe_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
diff --git a/target/linux/generic/files/crypto/ocf/safe/safereg.h b/target/linux/generic/files/crypto/ocf/safe/safereg.h
new file mode 100644
index 000000000..dbaf98fe7
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/safereg.h
@@ -0,0 +1,421 @@
+/*-
+ * Copyright (c) 2003 Sam Leffler, Errno Consulting
+ * Copyright (c) 2003 Global Technology Associates, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
+ */
+#ifndef _SAFE_SAFEREG_H_
+#define _SAFE_SAFEREG_H_
+
+/*
+ * Register definitions for SafeNet SafeXcel-1141 crypto device.
+ * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
+ */
+
+#define BS_BAR 0x10 /* DMA base address register */
+#define BS_TRDY_TIMEOUT 0x40 /* TRDY timeout */
+#define BS_RETRY_TIMEOUT 0x41 /* DMA retry timeout */
+
+#define PCI_VENDOR_SAFENET 0x16ae /* SafeNet, Inc. */
+
+/* SafeNet */
+#define PCI_PRODUCT_SAFEXCEL 0x1141 /* 1141 */
+
+#define SAFE_PE_CSR 0x0000 /* Packet Enginge Ctrl/Status */
+#define SAFE_PE_SRC 0x0004 /* Packet Engine Source */
+#define SAFE_PE_DST 0x0008 /* Packet Engine Destination */
+#define SAFE_PE_SA 0x000c /* Packet Engine SA */
+#define SAFE_PE_LEN 0x0010 /* Packet Engine Length */
+#define SAFE_PE_DMACFG 0x0040 /* Packet Engine DMA Configuration */
+#define SAFE_PE_DMASTAT 0x0044 /* Packet Engine DMA Status */
+#define SAFE_PE_PDRBASE 0x0048 /* Packet Engine Descriptor Ring Base */
+#define SAFE_PE_RDRBASE 0x004c /* Packet Engine Result Ring Base */
+#define SAFE_PE_RINGCFG 0x0050 /* Packet Engine Ring Configuration */
+#define SAFE_PE_RINGPOLL 0x0054 /* Packet Engine Ring Poll */
+#define SAFE_PE_IRNGSTAT 0x0058 /* Packet Engine Internal Ring Status */
+#define SAFE_PE_ERNGSTAT 0x005c /* Packet Engine External Ring Status */
+#define SAFE_PE_IOTHRESH 0x0060 /* Packet Engine I/O Threshold */
+#define SAFE_PE_GRNGBASE 0x0064 /* Packet Engine Gather Ring Base */
+#define SAFE_PE_SRNGBASE 0x0068 /* Packet Engine Scatter Ring Base */
+#define SAFE_PE_PARTSIZE 0x006c /* Packet Engine Particlar Ring Size */
+#define SAFE_PE_PARTCFG 0x0070 /* Packet Engine Particle Ring Config */
+#define SAFE_CRYPTO_CTRL 0x0080 /* Crypto Control */
+#define SAFE_DEVID 0x0084 /* Device ID */
+#define SAFE_DEVINFO 0x0088 /* Device Info */
+#define SAFE_HU_STAT 0x00a0 /* Host Unmasked Status */
+#define SAFE_HM_STAT 0x00a4 /* Host Masked Status (read-only) */
+#define SAFE_HI_CLR 0x00a4 /* Host Clear Interrupt (write-only) */
+#define SAFE_HI_MASK 0x00a8 /* Host Mask Control */
+#define SAFE_HI_CFG 0x00ac /* Interrupt Configuration */
+#define SAFE_HI_RD_DESCR 0x00b4 /* Force Descriptor Read */
+#define SAFE_HI_DESC_CNT 0x00b8 /* Host Descriptor Done Count */
+#define SAFE_DMA_ENDIAN 0x00c0 /* Master Endian Status */
+#define SAFE_DMA_SRCADDR 0x00c4 /* DMA Source Address Status */
+#define SAFE_DMA_DSTADDR 0x00c8 /* DMA Destination Address Status */
+#define SAFE_DMA_STAT 0x00cc /* DMA Current Status */
+#define SAFE_DMA_CFG 0x00d4 /* DMA Configuration/Status */
+#define SAFE_ENDIAN 0x00e0 /* Endian Configuration */
+#define SAFE_PK_A_ADDR 0x0800 /* Public Key A Address */
+#define SAFE_PK_B_ADDR 0x0804 /* Public Key B Address */
+#define SAFE_PK_C_ADDR 0x0808 /* Public Key C Address */
+#define SAFE_PK_D_ADDR 0x080c /* Public Key D Address */
+#define SAFE_PK_A_LEN 0x0810 /* Public Key A Length */
+#define SAFE_PK_B_LEN 0x0814 /* Public Key B Length */
+#define SAFE_PK_SHIFT 0x0818 /* Public Key Shift */
+#define SAFE_PK_FUNC 0x081c /* Public Key Function */
+#define SAFE_PK_RAM_START 0x1000 /* Public Key RAM start address */
+#define SAFE_PK_RAM_END 0x1fff /* Public Key RAM end address */
+
+#define SAFE_RNG_OUT 0x0100 /* RNG Output */
+#define SAFE_RNG_STAT 0x0104 /* RNG Status */
+#define SAFE_RNG_CTRL 0x0108 /* RNG Control */
+#define SAFE_RNG_A 0x010c /* RNG A */
+#define SAFE_RNG_B 0x0110 /* RNG B */
+#define SAFE_RNG_X_LO 0x0114 /* RNG X [31:0] */
+#define SAFE_RNG_X_MID 0x0118 /* RNG X [63:32] */
+#define SAFE_RNG_X_HI 0x011c /* RNG X [80:64] */
+#define SAFE_RNG_X_CNTR 0x0120 /* RNG Counter */
+#define SAFE_RNG_ALM_CNT 0x0124 /* RNG Alarm Count */
+#define SAFE_RNG_CNFG 0x0128 /* RNG Configuration */
+#define SAFE_RNG_LFSR1_LO 0x012c /* RNG LFSR1 [31:0] */
+#define SAFE_RNG_LFSR1_HI 0x0130 /* RNG LFSR1 [47:32] */
+#define SAFE_RNG_LFSR2_LO 0x0134 /* RNG LFSR1 [31:0] */
+#define SAFE_RNG_LFSR2_HI 0x0138 /* RNG LFSR1 [47:32] */
+
+#define SAFE_PE_CSR_READY 0x00000001 /* ready for processing */
+#define SAFE_PE_CSR_DONE 0x00000002 /* h/w completed processing */
+#define SAFE_PE_CSR_LOADSA 0x00000004 /* load SA digests */
+#define SAFE_PE_CSR_HASHFINAL 0x00000010 /* do hash pad & write result */
+#define SAFE_PE_CSR_SABUSID 0x000000c0 /* bus id for SA */
+#define SAFE_PE_CSR_SAPCI 0x00000040 /* PCI bus id for SA */
+#define SAFE_PE_CSR_NXTHDR 0x0000ff00 /* next hdr value for IPsec */
+#define SAFE_PE_CSR_FPAD 0x0000ff00 /* fixed pad for basic ops */
+#define SAFE_PE_CSR_STATUS 0x00ff0000 /* operation result status */
+#define SAFE_PE_CSR_AUTH_FAIL 0x00010000 /* ICV mismatch (inbound) */
+#define SAFE_PE_CSR_PAD_FAIL 0x00020000 /* pad verify fail (inbound) */
+#define SAFE_PE_CSR_SEQ_FAIL 0x00040000 /* sequence number (inbound) */
+#define SAFE_PE_CSR_XERROR 0x00080000 /* extended error follows */
+#define SAFE_PE_CSR_XECODE 0x00f00000 /* extended error code */
+#define SAFE_PE_CSR_XECODE_S 20
+#define SAFE_PE_CSR_XECODE_BADCMD 0 /* invalid command */
+#define SAFE_PE_CSR_XECODE_BADALG 1 /* invalid algorithm */
+#define SAFE_PE_CSR_XECODE_ALGDIS 2 /* algorithm disabled */
+#define SAFE_PE_CSR_XECODE_ZEROLEN 3 /* zero packet length */
+#define SAFE_PE_CSR_XECODE_DMAERR 4 /* bus DMA error */
+#define SAFE_PE_CSR_XECODE_PIPEABORT 5 /* secondary bus DMA error */
+#define SAFE_PE_CSR_XECODE_BADSPI 6 /* IPsec SPI mismatch */
+#define SAFE_PE_CSR_XECODE_TIMEOUT 10 /* failsafe timeout */
+#define SAFE_PE_CSR_PAD 0xff000000 /* ESP padding control/status */
+#define SAFE_PE_CSR_PAD_MIN 0x00000000 /* minimum IPsec padding */
+#define SAFE_PE_CSR_PAD_16 0x08000000 /* pad to 16-byte boundary */
+#define SAFE_PE_CSR_PAD_32 0x10000000 /* pad to 32-byte boundary */
+#define SAFE_PE_CSR_PAD_64 0x20000000 /* pad to 64-byte boundary */
+#define SAFE_PE_CSR_PAD_128 0x40000000 /* pad to 128-byte boundary */
+#define SAFE_PE_CSR_PAD_256 0x80000000 /* pad to 256-byte boundary */
+
+/*
+ * Check the CSR to see if the PE has returned ownership to
+ * the host. Note that before processing a descriptor this
+ * must be done followed by a check of the SAFE_PE_LEN register
+ * status bits to avoid premature processing of a descriptor
+ * on its way back to the host.
+ */
+#define SAFE_PE_CSR_IS_DONE(_csr) \
+ (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
+
+#define SAFE_PE_LEN_LENGTH 0x000fffff /* total length (bytes) */
+#define SAFE_PE_LEN_READY 0x00400000 /* ready for processing */
+#define SAFE_PE_LEN_DONE 0x00800000 /* h/w completed processing */
+#define SAFE_PE_LEN_BYPASS 0xff000000 /* bypass offset (bytes) */
+#define SAFE_PE_LEN_BYPASS_S 24
+
+#define SAFE_PE_LEN_IS_DONE(_len) \
+ (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
+
+/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
+#define SAFE_INT_PE_CDONE 0x00000002 /* PE context done */
+#define SAFE_INT_PE_DDONE 0x00000008 /* PE descriptor done */
+#define SAFE_INT_PE_ERROR 0x00000010 /* PE error */
+#define SAFE_INT_PE_ODONE 0x00000020 /* PE operation done */
+
+#define SAFE_HI_CFG_PULSE 0x00000001 /* use pulse interrupt */
+#define SAFE_HI_CFG_LEVEL 0x00000000 /* use level interrupt */
+#define SAFE_HI_CFG_AUTOCLR 0x00000002 /* auto-clear pulse interrupt */
+
+#define SAFE_ENDIAN_PASS 0x000000e4 /* straight pass-thru */
+#define SAFE_ENDIAN_SWAB 0x0000001b /* swap bytes in 32-bit word */
+
+#define SAFE_PE_DMACFG_PERESET 0x00000001 /* reset packet engine */
+#define SAFE_PE_DMACFG_PDRRESET 0x00000002 /* reset PDR counters/ptrs */
+#define SAFE_PE_DMACFG_SGRESET 0x00000004 /* reset scatter/gather cache */
+#define SAFE_PE_DMACFG_FSENA 0x00000008 /* enable failsafe reset */
+#define SAFE_PE_DMACFG_PEMODE 0x00000100 /* packet engine mode */
+#define SAFE_PE_DMACFG_SAPREC 0x00000200 /* SA precedes packet */
+#define SAFE_PE_DMACFG_PKFOLL 0x00000400 /* packet follows descriptor */
+#define SAFE_PE_DMACFG_GPRBID 0x00003000 /* gather particle ring busid */
+#define SAFE_PE_DMACFG_GPRPCI 0x00001000 /* PCI gather particle ring */
+#define SAFE_PE_DMACFG_SPRBID 0x0000c000 /* scatter part. ring busid */
+#define SAFE_PE_DMACFG_SPRPCI 0x00004000 /* PCI scatter part. ring */
+#define SAFE_PE_DMACFG_ESDESC 0x00010000 /* endian swap descriptors */
+#define SAFE_PE_DMACFG_ESSA 0x00020000 /* endian swap SA data */
+#define SAFE_PE_DMACFG_ESPACKET 0x00040000 /* endian swap packet data */
+#define SAFE_PE_DMACFG_ESPDESC 0x00080000 /* endian swap particle desc. */
+#define SAFE_PE_DMACFG_NOPDRUP 0x00100000 /* supp. PDR ownership update */
+#define SAFE_PD_EDMACFG_PCIMODE 0x01000000 /* PCI target mode */
+
+#define SAFE_PE_DMASTAT_PEIDONE 0x00000001 /* PE core input done */
+#define SAFE_PE_DMASTAT_PEODONE 0x00000002 /* PE core output done */
+#define SAFE_PE_DMASTAT_ENCDONE 0x00000004 /* encryption done */
+#define SAFE_PE_DMASTAT_IHDONE 0x00000008 /* inner hash done */
+#define SAFE_PE_DMASTAT_OHDONE 0x00000010 /* outer hash (HMAC) done */
+#define SAFE_PE_DMASTAT_PADFLT 0x00000020 /* crypto pad fault */
+#define SAFE_PE_DMASTAT_ICVFLT 0x00000040 /* ICV fault */
+#define SAFE_PE_DMASTAT_SPIMIS 0x00000080 /* SPI mismatch */
+#define SAFE_PE_DMASTAT_CRYPTO 0x00000100 /* crypto engine timeout */
+#define SAFE_PE_DMASTAT_CQACT 0x00000200 /* command queue active */
+#define SAFE_PE_DMASTAT_IRACT 0x00000400 /* input request active */
+#define SAFE_PE_DMASTAT_ORACT 0x00000800 /* output request active */
+#define SAFE_PE_DMASTAT_PEISIZE 0x003ff000 /* PE input size:32-bit words */
+#define SAFE_PE_DMASTAT_PEOSIZE 0xffc00000 /* PE out. size:32-bit words */
+
+#define SAFE_PE_RINGCFG_SIZE 0x000003ff /* ring size (descriptors) */
+#define SAFE_PE_RINGCFG_OFFSET 0xffff0000 /* offset btw desc's (dwords) */
+#define SAFE_PE_RINGCFG_OFFSET_S 16
+
+#define SAFE_PE_RINGPOLL_POLL 0x00000fff /* polling frequency/divisor */
+#define SAFE_PE_RINGPOLL_RETRY 0x03ff0000 /* polling frequency/divisor */
+#define SAFE_PE_RINGPOLL_CONT 0x80000000 /* continuously poll */
+
+#define SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001 /* command queue available */
+
+#define SAFE_PE_ERNGSTAT_NEXT 0x03ff0000 /* index of next packet desc. */
+#define SAFE_PE_ERNGSTAT_NEXT_S 16
+
+#define SAFE_PE_IOTHRESH_INPUT 0x000003ff /* input threshold (dwords) */
+#define SAFE_PE_IOTHRESH_OUTPUT 0x03ff0000 /* output threshold (dwords) */
+
+#define SAFE_PE_PARTCFG_SIZE 0x0000ffff /* scatter particle size */
+#define SAFE_PE_PARTCFG_GBURST 0x00030000 /* gather particle burst */
+#define SAFE_PE_PARTCFG_GBURST_2 0x00000000
+#define SAFE_PE_PARTCFG_GBURST_4 0x00010000
+#define SAFE_PE_PARTCFG_GBURST_8 0x00020000
+#define SAFE_PE_PARTCFG_GBURST_16 0x00030000
+#define SAFE_PE_PARTCFG_SBURST 0x000c0000 /* scatter particle burst */
+#define SAFE_PE_PARTCFG_SBURST_2 0x00000000
+#define SAFE_PE_PARTCFG_SBURST_4 0x00040000
+#define SAFE_PE_PARTCFG_SBURST_8 0x00080000
+#define SAFE_PE_PARTCFG_SBURST_16 0x000c0000
+
+#define SAFE_PE_PARTSIZE_SCAT 0xffff0000 /* scatter particle ring size */
+#define SAFE_PE_PARTSIZE_GATH 0x0000ffff /* gather particle ring size */
+
+#define SAFE_CRYPTO_CTRL_3DES 0x00000001 /* enable 3DES support */
+#define SAFE_CRYPTO_CTRL_PKEY 0x00010000 /* enable public key support */
+#define SAFE_CRYPTO_CTRL_RNG 0x00020000 /* enable RNG support */
+
+#define SAFE_DEVINFO_REV_MIN 0x0000000f /* minor rev for chip */
+#define SAFE_DEVINFO_REV_MAJ 0x000000f0 /* major rev for chip */
+#define SAFE_DEVINFO_REV_MAJ_S 4
+#define SAFE_DEVINFO_DES 0x00000100 /* DES/3DES support present */
+#define SAFE_DEVINFO_ARC4 0x00000200 /* ARC4 support present */
+#define SAFE_DEVINFO_AES 0x00000400 /* AES support present */
+#define SAFE_DEVINFO_MD5 0x00001000 /* MD5 support present */
+#define SAFE_DEVINFO_SHA1 0x00002000 /* SHA-1 support present */
+#define SAFE_DEVINFO_RIPEMD 0x00004000 /* RIPEMD support present */
+#define SAFE_DEVINFO_DEFLATE 0x00010000 /* Deflate support present */
+#define SAFE_DEVINFO_SARAM 0x00100000 /* on-chip SA RAM present */
+#define SAFE_DEVINFO_EMIBUS 0x00200000 /* EMI bus present */
+#define SAFE_DEVINFO_PKEY 0x00400000 /* public key support present */
+#define SAFE_DEVINFO_RNG 0x00800000 /* RNG present */
+
+#define SAFE_REV(_maj, _min) (((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
+#define SAFE_REV_MAJ(_chiprev) \
+ (((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
+#define SAFE_REV_MIN(_chiprev) ((_chiprev) & SAFE_DEVINFO_REV_MIN)
+
+#define SAFE_PK_FUNC_MULT 0x00000001 /* Multiply function */
+#define SAFE_PK_FUNC_SQUARE 0x00000004 /* Square function */
+#define SAFE_PK_FUNC_ADD 0x00000010 /* Add function */
+#define SAFE_PK_FUNC_SUB 0x00000020 /* Subtract function */
+#define SAFE_PK_FUNC_LSHIFT 0x00000040 /* Left-shift function */
+#define SAFE_PK_FUNC_RSHIFT 0x00000080 /* Right-shift function */
+#define SAFE_PK_FUNC_DIV 0x00000100 /* Divide function */
+#define SAFE_PK_FUNC_CMP 0x00000400 /* Compare function */
+#define SAFE_PK_FUNC_COPY 0x00000800 /* Copy function */
+#define SAFE_PK_FUNC_EXP16 0x00002000 /* Exponentiate (4-bit ACT) */
+#define SAFE_PK_FUNC_EXP4 0x00004000 /* Exponentiate (2-bit ACT) */
+#define SAFE_PK_FUNC_RUN 0x00008000 /* start/status */
+
+#define SAFE_RNG_STAT_BUSY 0x00000001 /* busy, data not valid */
+
+#define SAFE_RNG_CTRL_PRE_LFSR 0x00000001 /* enable output pre-LFSR */
+#define SAFE_RNG_CTRL_TST_MODE 0x00000002 /* enable test mode */
+#define SAFE_RNG_CTRL_TST_RUN 0x00000004 /* start test state machine */
+#define SAFE_RNG_CTRL_ENA_RING1 0x00000008 /* test entropy oscillator #1 */
+#define SAFE_RNG_CTRL_ENA_RING2 0x00000010 /* test entropy oscillator #2 */
+#define SAFE_RNG_CTRL_DIS_ALARM 0x00000020 /* disable RNG alarm reports */
+#define SAFE_RNG_CTRL_TST_CLOCK 0x00000040 /* enable test clock */
+#define SAFE_RNG_CTRL_SHORTEN 0x00000080 /* shorten state timers */
+#define SAFE_RNG_CTRL_TST_ALARM 0x00000100 /* simulate alarm state */
+#define SAFE_RNG_CTRL_RST_LFSR 0x00000200 /* reset LFSR */
+
+/*
+ * Packet engine descriptor. Note that d_csr is a copy of the
+ * SAFE_PE_CSR register and all definitions apply, and d_len
+ * is a copy of the SAFE_PE_LEN register and all definitions apply.
+ * d_src and d_len may point directly to contiguous data or to a
+ * list of ``particle descriptors'' when using scatter/gather i/o.
+ */
+struct safe_desc {
+ u_int32_t d_csr; /* per-packet control/status */
+ u_int32_t d_src; /* source address */
+ u_int32_t d_dst; /* destination address */
+ u_int32_t d_sa; /* SA address */
+ u_int32_t d_len; /* length, bypass, status */
+};
+
+/*
+ * Scatter/Gather particle descriptor.
+ *
+ * NB: scatter descriptors do not specify a size; this is fixed
+ * by the setting of the SAFE_PE_PARTCFG register.
+ */
+struct safe_pdesc {
+ u_int32_t pd_addr; /* particle address */
+#ifdef __BIG_ENDIAN
+ u_int16_t pd_flags; /* control word */
+ u_int16_t pd_size; /* particle size (bytes) */
+#else
+ u_int16_t pd_flags; /* control word */
+ u_int16_t pd_size; /* particle size (bytes) */
+#endif
+};
+
+#define SAFE_PD_READY 0x0001 /* ready for processing */
+#define SAFE_PD_DONE 0x0002 /* h/w completed processing */
+
+/*
+ * Security Association (SA) Record (Rev 1). One of these is
+ * required for each operation processed by the packet engine.
+ */
+struct safe_sarec {
+ u_int32_t sa_cmd0;
+ u_int32_t sa_cmd1;
+ u_int32_t sa_resv0;
+ u_int32_t sa_resv1;
+ u_int32_t sa_key[8]; /* DES/3DES/AES key */
+ u_int32_t sa_indigest[5]; /* inner digest */
+ u_int32_t sa_outdigest[5]; /* outer digest */
+ u_int32_t sa_spi; /* SPI */
+ u_int32_t sa_seqnum; /* sequence number */
+ u_int32_t sa_seqmask[2]; /* sequence number mask */
+ u_int32_t sa_resv2;
+ u_int32_t sa_staterec; /* address of state record */
+ u_int32_t sa_resv3[2];
+ u_int32_t sa_samgmt0; /* SA management field 0 */
+ u_int32_t sa_samgmt1; /* SA management field 0 */
+};
+
+#define SAFE_SA_CMD0_OP 0x00000007 /* operation code */
+#define SAFE_SA_CMD0_OP_CRYPT 0x00000000 /* encrypt/decrypt (basic) */
+#define SAFE_SA_CMD0_OP_BOTH 0x00000001 /* encrypt-hash/hash-decrypto */
+#define SAFE_SA_CMD0_OP_HASH 0x00000003 /* hash (outbound-only) */
+#define SAFE_SA_CMD0_OP_ESP 0x00000000 /* ESP in/out (proto) */
+#define SAFE_SA_CMD0_OP_AH 0x00000001 /* AH in/out (proto) */
+#define SAFE_SA_CMD0_INBOUND 0x00000008 /* inbound operation */
+#define SAFE_SA_CMD0_OUTBOUND 0x00000000 /* outbound operation */
+#define SAFE_SA_CMD0_GROUP 0x00000030 /* operation group */
+#define SAFE_SA_CMD0_BASIC 0x00000000 /* basic operation */
+#define SAFE_SA_CMD0_PROTO 0x00000010 /* protocol/packet operation */
+#define SAFE_SA_CMD0_BUNDLE 0x00000020 /* bundled operation (resvd) */
+#define SAFE_SA_CMD0_PAD 0x000000c0 /* crypto pad method */
+#define SAFE_SA_CMD0_PAD_IPSEC 0x00000000 /* IPsec padding */
+#define SAFE_SA_CMD0_PAD_PKCS7 0x00000040 /* PKCS#7 padding */
+#define SAFE_SA_CMD0_PAD_CONS 0x00000080 /* constant padding */
+#define SAFE_SA_CMD0_PAD_ZERO 0x000000c0 /* zero padding */
+#define SAFE_SA_CMD0_CRYPT_ALG 0x00000f00 /* symmetric crypto algorithm */
+#define SAFE_SA_CMD0_DES 0x00000000 /* DES crypto algorithm */
+#define SAFE_SA_CMD0_3DES 0x00000100 /* 3DES crypto algorithm */
+#define SAFE_SA_CMD0_AES 0x00000300 /* AES crypto algorithm */
+#define SAFE_SA_CMD0_CRYPT_NULL 0x00000f00 /* null crypto algorithm */
+#define SAFE_SA_CMD0_HASH_ALG 0x0000f000 /* hash algorithm */
+#define SAFE_SA_CMD0_MD5 0x00000000 /* MD5 hash algorithm */
+#define SAFE_SA_CMD0_SHA1 0x00001000 /* SHA-1 hash algorithm */
+#define SAFE_SA_CMD0_HASH_NULL 0x0000f000 /* null hash algorithm */
+#define SAFE_SA_CMD0_HDR_PROC 0x00080000 /* header processing */
+#define SAFE_SA_CMD0_IBUSID 0x00300000 /* input bus id */
+#define SAFE_SA_CMD0_IPCI 0x00100000 /* PCI input bus id */
+#define SAFE_SA_CMD0_OBUSID 0x00c00000 /* output bus id */
+#define SAFE_SA_CMD0_OPCI 0x00400000 /* PCI output bus id */
+#define SAFE_SA_CMD0_IVLD 0x03000000 /* IV loading */
+#define SAFE_SA_CMD0_IVLD_NONE 0x00000000 /* IV no load (reuse) */
+#define SAFE_SA_CMD0_IVLD_IBUF 0x01000000 /* IV load from input buffer */
+#define SAFE_SA_CMD0_IVLD_STATE 0x02000000 /* IV load from state */
+#define SAFE_SA_CMD0_HSLD 0x0c000000 /* hash state loading */
+#define SAFE_SA_CMD0_HSLD_SA 0x00000000 /* hash state load from SA */
+#define SAFE_SA_CMD0_HSLD_STATE 0x08000000 /* hash state load from state */
+#define SAFE_SA_CMD0_HSLD_NONE 0x0c000000 /* hash state no load */
+#define SAFE_SA_CMD0_SAVEIV 0x10000000 /* save IV */
+#define SAFE_SA_CMD0_SAVEHASH 0x20000000 /* save hash state */
+#define SAFE_SA_CMD0_IGATHER 0x40000000 /* input gather */
+#define SAFE_SA_CMD0_OSCATTER 0x80000000 /* output scatter */
+
+#define SAFE_SA_CMD1_HDRCOPY 0x00000002 /* copy header to output */
+#define SAFE_SA_CMD1_PAYCOPY 0x00000004 /* copy payload to output */
+#define SAFE_SA_CMD1_PADCOPY 0x00000008 /* copy pad to output */
+#define SAFE_SA_CMD1_IPV4 0x00000000 /* IPv4 protocol */
+#define SAFE_SA_CMD1_IPV6 0x00000010 /* IPv6 protocol */
+#define SAFE_SA_CMD1_MUTABLE 0x00000020 /* mutable bit processing */
+#define SAFE_SA_CMD1_SRBUSID 0x000000c0 /* state record bus id */
+#define SAFE_SA_CMD1_SRPCI 0x00000040 /* state record from PCI */
+#define SAFE_SA_CMD1_CRMODE 0x00000300 /* crypto mode */
+#define SAFE_SA_CMD1_ECB 0x00000000 /* ECB crypto mode */
+#define SAFE_SA_CMD1_CBC 0x00000100 /* CBC crypto mode */
+#define SAFE_SA_CMD1_OFB 0x00000200 /* OFB crypto mode */
+#define SAFE_SA_CMD1_CFB 0x00000300 /* CFB crypto mode */
+#define SAFE_SA_CMD1_CRFEEDBACK 0x00000c00 /* crypto feedback mode */
+#define SAFE_SA_CMD1_64BIT 0x00000000 /* 64-bit crypto feedback */
+#define SAFE_SA_CMD1_8BIT 0x00000400 /* 8-bit crypto feedback */
+#define SAFE_SA_CMD1_1BIT 0x00000800 /* 1-bit crypto feedback */
+#define SAFE_SA_CMD1_128BIT 0x00000c00 /* 128-bit crypto feedback */
+#define SAFE_SA_CMD1_OPTIONS 0x00001000 /* HMAC/options mutable bit */
+#define SAFE_SA_CMD1_HMAC SAFE_SA_CMD1_OPTIONS
+#define SAFE_SA_CMD1_SAREV1 0x00008000 /* SA Revision 1 */
+#define SAFE_SA_CMD1_OFFSET 0x00ff0000 /* hash/crypto offset(dwords) */
+#define SAFE_SA_CMD1_OFFSET_S 16
+#define SAFE_SA_CMD1_AESKEYLEN 0x0f000000 /* AES key length */
+#define SAFE_SA_CMD1_AES128 0x02000000 /* 128-bit AES key */
+#define SAFE_SA_CMD1_AES192 0x03000000 /* 192-bit AES key */
+#define SAFE_SA_CMD1_AES256 0x04000000 /* 256-bit AES key */
+
+/*
+ * Security Associate State Record (Rev 1).
+ */
+struct safe_sastate {
+ u_int32_t sa_saved_iv[4]; /* saved IV (DES/3DES/AES) */
+ u_int32_t sa_saved_hashbc; /* saved hash byte count */
+ u_int32_t sa_saved_indigest[5]; /* saved inner digest */
+};
+#endif /* _SAFE_SAFEREG_H_ */
diff --git a/target/linux/generic/files/crypto/ocf/safe/safevar.h b/target/linux/generic/files/crypto/ocf/safe/safevar.h
new file mode 100644
index 000000000..11d8304aa
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/safevar.h
@@ -0,0 +1,229 @@
+/*-
+ * The linux port of this code done by David McCullough
+ * Copyright (C) 2004-2010 David McCullough <david_mccullough@mcafee.com>
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 2003 Sam Leffler, Errno Consulting
+ * Copyright (c) 2003 Global Technology Associates, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
+ */
+#ifndef _SAFE_SAFEVAR_H_
+#define _SAFE_SAFEVAR_H_
+
+/* Maximum queue length */
+#ifndef SAFE_MAX_NQUEUE
+#define SAFE_MAX_NQUEUE 60
+#endif
+
+#define SAFE_MAX_PART 64 /* Maximum scatter/gather depth */
+#define SAFE_DMA_BOUNDARY 0 /* No boundary for source DMA ops */
+#define SAFE_MAX_DSIZE 2048 /* MCLBYTES Fixed scatter particle size */
+#define SAFE_MAX_SSIZE 0x0ffff /* Maximum gather particle size */
+#define SAFE_MAX_DMA 0xfffff /* Maximum PE operand size (20 bits) */
+/* total src+dst particle descriptors */
+#define SAFE_TOTAL_DPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
+#define SAFE_TOTAL_SPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
+
+#define SAFE_RNG_MAXBUFSIZ 128 /* 32-bit words */
+
+#define SAFE_CARD(sid) (((sid) & 0xf0000000) >> 28)
+#define SAFE_SESSION(sid) ( (sid) & 0x0fffffff)
+#define SAFE_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
+
+#define SAFE_DEF_RTY 0xff /* PCI Retry Timeout */
+#define SAFE_DEF_TOUT 0xff /* PCI TRDY Timeout */
+#define SAFE_DEF_CACHELINE 0x01 /* Cache Line setting */
+
+#ifdef __KERNEL__
+/*
+ * State associated with the allocation of each chunk
+ * of memory setup for DMA.
+ */
+struct safe_dma_alloc {
+ dma_addr_t dma_paddr;
+ void *dma_vaddr;
+};
+
+/*
+ * Cryptographic operand state. One of these exists for each
+ * source and destination operand passed in from the crypto
+ * subsystem. When possible source and destination operands
+ * refer to the same memory. More often they are distinct.
+ * We track the virtual address of each operand as well as
+ * where each is mapped for DMA.
+ */
+struct safe_operand {
+ union {
+ struct sk_buff *skb;
+ struct uio *io;
+ } u;
+ void *map;
+ int mapsize; /* total number of bytes in segs */
+ struct {
+ dma_addr_t ds_addr;
+ int ds_len;
+ int ds_tlen;
+ } segs[SAFE_MAX_PART];
+ int nsegs;
+};
+
+/*
+ * Packet engine ring entry and cryptographic operation state.
+ * The packet engine requires a ring of descriptors that contain
+ * pointers to various cryptographic state. However the ring
+ * configuration register allows you to specify an arbitrary size
+ * for ring entries. We use this feature to collect most of the
+ * state for each cryptographic request into one spot. Other than
+ * ring entries only the ``particle descriptors'' (scatter/gather
+ * lists) and the actual operand data are kept separate. The
+ * particle descriptors must also be organized in rings. The
+ * operand data can be located aribtrarily (modulo alignment constraints).
+ *
+ * Note that the descriptor ring is mapped onto the PCI bus so
+ * the hardware can DMA data. This means the entire ring must be
+ * contiguous.
+ */
+struct safe_ringentry {
+ struct safe_desc re_desc; /* command descriptor */
+ struct safe_sarec re_sa; /* SA record */
+ struct safe_sastate re_sastate; /* SA state record */
+
+ struct cryptop *re_crp; /* crypto operation */
+
+ struct safe_operand re_src; /* source operand */
+ struct safe_operand re_dst; /* destination operand */
+
+ int re_sesn; /* crypto session ID */
+ int re_flags;
+#define SAFE_QFLAGS_COPYOUTIV 0x1 /* copy back on completion */
+#define SAFE_QFLAGS_COPYOUTICV 0x2 /* copy back on completion */
+};
+
+#define re_src_skb re_src.u.skb
+#define re_src_io re_src.u.io
+#define re_src_map re_src.map
+#define re_src_nsegs re_src.nsegs
+#define re_src_segs re_src.segs
+#define re_src_mapsize re_src.mapsize
+
+#define re_dst_skb re_dst.u.skb
+#define re_dst_io re_dst.u.io
+#define re_dst_map re_dst.map
+#define re_dst_nsegs re_dst.nsegs
+#define re_dst_segs re_dst.segs
+#define re_dst_mapsize re_dst.mapsize
+
+struct rndstate_test;
+
+struct safe_session {
+ u_int32_t ses_used;
+ u_int32_t ses_klen; /* key length in bits */
+ u_int32_t ses_key[8]; /* DES/3DES/AES key */
+ u_int32_t ses_mlen; /* hmac length in bytes */
+ u_int32_t ses_hminner[5]; /* hmac inner state */
+ u_int32_t ses_hmouter[5]; /* hmac outer state */
+};
+
+struct safe_pkq {
+ struct list_head pkq_list;
+ struct cryptkop *pkq_krp;
+};
+
+struct safe_softc {
+ softc_device_decl sc_dev;
+ u32 sc_irq;
+
+ struct pci_dev *sc_pcidev;
+ ocf_iomem_t sc_base_addr;
+
+ u_int sc_chiprev; /* major/minor chip revision */
+ int sc_flags; /* device specific flags */
+#define SAFE_FLAGS_KEY 0x01 /* has key accelerator */
+#define SAFE_FLAGS_RNG 0x02 /* hardware rng */
+ int sc_suspended;
+ int sc_needwakeup; /* notify crypto layer */
+ int32_t sc_cid; /* crypto tag */
+
+ struct safe_dma_alloc sc_ringalloc; /* PE ring allocation state */
+ struct safe_ringentry *sc_ring; /* PE ring */
+ struct safe_ringentry *sc_ringtop; /* PE ring top */
+ struct safe_ringentry *sc_front; /* next free entry */
+ struct safe_ringentry *sc_back; /* next pending entry */
+ int sc_nqchip; /* # passed to chip */
+ spinlock_t sc_ringmtx; /* PE ring lock */
+ struct safe_pdesc *sc_spring; /* src particle ring */
+ struct safe_pdesc *sc_springtop; /* src particle ring top */
+ struct safe_pdesc *sc_spfree; /* next free src particle */
+ struct safe_dma_alloc sc_spalloc; /* src particle ring state */
+ struct safe_pdesc *sc_dpring; /* dest particle ring */
+ struct safe_pdesc *sc_dpringtop; /* dest particle ring top */
+ struct safe_pdesc *sc_dpfree; /* next free dest particle */
+ struct safe_dma_alloc sc_dpalloc; /* dst particle ring state */
+ int sc_nsessions; /* # of sessions */
+ struct safe_session *sc_sessions; /* sessions */
+
+ struct timer_list sc_pkto; /* PK polling */
+ spinlock_t sc_pkmtx; /* PK lock */
+ struct list_head sc_pkq; /* queue of PK requests */
+ struct safe_pkq *sc_pkq_cur; /* current processing request */
+ u_int32_t sc_pk_reslen, sc_pk_resoff;
+
+ int sc_max_dsize; /* maximum safe DMA size */
+};
+#endif /* __KERNEL__ */
+
+struct safe_stats {
+ u_int64_t st_ibytes;
+ u_int64_t st_obytes;
+ u_int32_t st_ipackets;
+ u_int32_t st_opackets;
+ u_int32_t st_invalid; /* invalid argument */
+ u_int32_t st_badsession; /* invalid session id */
+ u_int32_t st_badflags; /* flags indicate !(mbuf | uio) */
+ u_int32_t st_nodesc; /* op submitted w/o descriptors */
+ u_int32_t st_badalg; /* unsupported algorithm */
+ u_int32_t st_ringfull; /* PE descriptor ring full */
+ u_int32_t st_peoperr; /* PE marked error */
+ u_int32_t st_dmaerr; /* PE DMA error */
+ u_int32_t st_bypasstoobig; /* bypass > 96 bytes */
+ u_int32_t st_skipmismatch; /* enc part begins before auth part */
+ u_int32_t st_lenmismatch; /* enc length different auth length */
+ u_int32_t st_coffmisaligned; /* crypto offset not 32-bit aligned */
+ u_int32_t st_cofftoobig; /* crypto offset > 255 words */
+ u_int32_t st_iovmisaligned; /* iov op not aligned */
+ u_int32_t st_iovnotuniform; /* iov op not suitable */
+ u_int32_t st_unaligned; /* unaligned src caused copy */
+ u_int32_t st_notuniform; /* non-uniform src caused copy */
+ u_int32_t st_nomap; /* bus_dmamap_create failed */
+ u_int32_t st_noload; /* bus_dmamap_load_* failed */
+ u_int32_t st_nombuf; /* MGET* failed */
+ u_int32_t st_nomcl; /* MCLGET* failed */
+ u_int32_t st_maxqchip; /* max mcr1 ops out for processing */
+ u_int32_t st_rng; /* RNG requests */
+ u_int32_t st_rngalarm; /* RNG alarm requests */
+ u_int32_t st_noicvcopy; /* ICV data copies suppressed */
+};
+#endif /* _SAFE_SAFEVAR_H_ */
diff --git a/target/linux/generic/files/crypto/ocf/safe/sha1.c b/target/linux/generic/files/crypto/ocf/safe/sha1.c
new file mode 100644
index 000000000..4e360e20d
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/sha1.c
@@ -0,0 +1,279 @@
+/* $KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
+ * based on: http://csrc.nist.gov/fips/fip180-1.txt
+ * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
+ */
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
+
+#include <sys/types.h>
+#include <sys/cdefs.h>
+#include <sys/time.h>
+#include <sys/systm.h>
+
+#include <crypto/sha1.h>
+#endif
+
+/* sanity check */
+#if BYTE_ORDER != BIG_ENDIAN
+# if BYTE_ORDER != LITTLE_ENDIAN
+# define unsupported 1
+# endif
+#endif
+
+#ifndef unsupported
+
+/* constant table */
+static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
+#define K(t) _K[(t) / 20]
+
+#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
+#define F1(b, c, d) (((b) ^ (c)) ^ (d))
+#define F2(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
+#define F3(b, c, d) (((b) ^ (c)) ^ (d))
+
+#define S(n, x) (((x) << (n)) | ((x) >> (32 - n)))
+
+#undef H
+#define H(n) (ctxt->h.b32[(n)])
+#define COUNT (ctxt->count)
+#define BCOUNT (ctxt->c.b64[0] / 8)
+#define W(n) (ctxt->m.b32[(n)])
+
+#define PUTBYTE(x) { \
+ ctxt->m.b8[(COUNT % 64)] = (x); \
+ COUNT++; \
+ COUNT %= 64; \
+ ctxt->c.b64[0] += 8; \
+ if (COUNT % 64 == 0) \
+ sha1_step(ctxt); \
+ }
+
+#define PUTPAD(x) { \
+ ctxt->m.b8[(COUNT % 64)] = (x); \
+ COUNT++; \
+ COUNT %= 64; \
+ if (COUNT % 64 == 0) \
+ sha1_step(ctxt); \
+ }
+
+static void sha1_step(struct sha1_ctxt *);
+
+static void
+sha1_step(ctxt)
+ struct sha1_ctxt *ctxt;
+{
+ u_int32_t a, b, c, d, e;
+ size_t t, s;
+ u_int32_t tmp;
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+ struct sha1_ctxt tctxt;
+ bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
+ ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
+ ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
+ ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
+ ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
+ ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
+ ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
+ ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
+ ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
+ ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
+ ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
+ ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
+ ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
+ ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
+ ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
+ ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
+ ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
+ ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
+ ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
+ ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
+ ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
+ ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
+ ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
+ ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
+ ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
+ ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
+ ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
+ ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
+ ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
+ ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
+ ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
+ ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
+ ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
+#endif
+
+ a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
+
+ for (t = 0; t < 20; t++) {
+ s = t & 0x0f;
+ if (t >= 16) {
+ W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+ }
+ tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
+ e = d; d = c; c = S(30, b); b = a; a = tmp;
+ }
+ for (t = 20; t < 40; t++) {
+ s = t & 0x0f;
+ W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+ tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
+ e = d; d = c; c = S(30, b); b = a; a = tmp;
+ }
+ for (t = 40; t < 60; t++) {
+ s = t & 0x0f;
+ W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+ tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
+ e = d; d = c; c = S(30, b); b = a; a = tmp;
+ }
+ for (t = 60; t < 80; t++) {
+ s = t & 0x0f;
+ W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+ tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
+ e = d; d = c; c = S(30, b); b = a; a = tmp;
+ }
+
+ H(0) = H(0) + a;
+ H(1) = H(1) + b;
+ H(2) = H(2) + c;
+ H(3) = H(3) + d;
+ H(4) = H(4) + e;
+
+ bzero(&ctxt->m.b8[0], 64);
+}
+
+/*------------------------------------------------------------*/
+
+void
+sha1_init(ctxt)
+ struct sha1_ctxt *ctxt;
+{
+ bzero(ctxt, sizeof(struct sha1_ctxt));
+ H(0) = 0x67452301;
+ H(1) = 0xefcdab89;
+ H(2) = 0x98badcfe;
+ H(3) = 0x10325476;
+ H(4) = 0xc3d2e1f0;
+}
+
+void
+sha1_pad(ctxt)
+ struct sha1_ctxt *ctxt;
+{
+ size_t padlen; /*pad length in bytes*/
+ size_t padstart;
+
+ PUTPAD(0x80);
+
+ padstart = COUNT % 64;
+ padlen = 64 - padstart;
+ if (padlen < 8) {
+ bzero(&ctxt->m.b8[padstart], padlen);
+ COUNT += padlen;
+ COUNT %= 64;
+ sha1_step(ctxt);
+ padstart = COUNT % 64; /* should be 0 */
+ padlen = 64 - padstart; /* should be 64 */
+ }
+ bzero(&ctxt->m.b8[padstart], padlen - 8);
+ COUNT += (padlen - 8);
+ COUNT %= 64;
+#if BYTE_ORDER == BIG_ENDIAN
+ PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
+ PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
+ PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
+ PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
+#else
+ PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
+ PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
+ PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
+ PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
+#endif
+}
+
+void
+sha1_loop(ctxt, input, len)
+ struct sha1_ctxt *ctxt;
+ const u_int8_t *input;
+ size_t len;
+{
+ size_t gaplen;
+ size_t gapstart;
+ size_t off;
+ size_t copysiz;
+
+ off = 0;
+
+ while (off < len) {
+ gapstart = COUNT % 64;
+ gaplen = 64 - gapstart;
+
+ copysiz = (gaplen < len - off) ? gaplen : len - off;
+ bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
+ COUNT += copysiz;
+ COUNT %= 64;
+ ctxt->c.b64[0] += copysiz * 8;
+ if (COUNT % 64 == 0)
+ sha1_step(ctxt);
+ off += copysiz;
+ }
+}
+
+void
+sha1_result(ctxt, digest0)
+ struct sha1_ctxt *ctxt;
+ caddr_t digest0;
+{
+ u_int8_t *digest;
+
+ digest = (u_int8_t *)digest0;
+ sha1_pad(ctxt);
+#if BYTE_ORDER == BIG_ENDIAN
+ bcopy(&ctxt->h.b8[0], digest, 20);
+#else
+ digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
+ digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
+ digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
+ digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
+ digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
+ digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
+ digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
+ digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
+ digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
+ digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
+#endif
+}
+
+#endif /*unsupported*/
diff --git a/target/linux/generic/files/crypto/ocf/safe/sha1.h b/target/linux/generic/files/crypto/ocf/safe/sha1.h
new file mode 100644
index 000000000..0e19d9071
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/safe/sha1.h
@@ -0,0 +1,72 @@
+/* $FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $ */
+/* $KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $ */
+
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
+ * based on: http://csrc.nist.gov/fips/fip180-1.txt
+ * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
+ */
+
+#ifndef _NETINET6_SHA1_H_
+#define _NETINET6_SHA1_H_
+
+struct sha1_ctxt {
+ union {
+ u_int8_t b8[20];
+ u_int32_t b32[5];
+ } h;
+ union {
+ u_int8_t b8[8];
+ u_int64_t b64[1];
+ } c;
+ union {
+ u_int8_t b8[64];
+ u_int32_t b32[16];
+ } m;
+ u_int8_t count;
+};
+
+#ifdef __KERNEL__
+extern void sha1_init(struct sha1_ctxt *);
+extern void sha1_pad(struct sha1_ctxt *);
+extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
+extern void sha1_result(struct sha1_ctxt *, caddr_t);
+
+/* compatibilty with other SHA1 source codes */
+typedef struct sha1_ctxt SHA1_CTX;
+#define SHA1Init(x) sha1_init((x))
+#define SHA1Update(x, y, z) sha1_loop((x), (y), (z))
+#define SHA1Final(x, y) sha1_result((y), (x))
+#endif /* __KERNEL__ */
+
+#define SHA1_RESULTLEN (160/8)
+
+#endif /*_NETINET6_SHA1_H_*/
diff --git a/target/linux/generic/files/crypto/ocf/talitos/Makefile b/target/linux/generic/files/crypto/ocf/talitos/Makefile
new file mode 100644
index 000000000..2591b8aef
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/talitos/Makefile
@@ -0,0 +1,12 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_TALITOS) += talitos.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/talitos/talitos.c b/target/linux/generic/files/crypto/ocf/talitos/talitos.c
new file mode 100644
index 000000000..c4bc8c0fd
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/talitos/talitos.c
@@ -0,0 +1,1355 @@
+/*
+ * crypto/ocf/talitos/talitos.c
+ *
+ * An OCF-Linux module that uses Freescale's SEC to do the crypto.
+ * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
+ *
+ * Copyright (c) 2006 Freescale Semiconductor, Inc.
+ *
+ * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
+ * some code copied from files with the following:
+ * Copyright (C) 2004-2007 David McCullough <david_mccullough@mcafee.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * NOTES:
+ *
+ * The Freescale SEC (also known as 'talitos') resides on the
+ * internal bus, and runs asynchronous to the processor core. It has
+ * a wide gamut of cryptographic acceleration features, including single-
+ * pass IPsec (also known as algorithm chaining). To properly utilize
+ * all of the SEC's performance enhancing features, further reworking
+ * of higher level code (framework, applications) will be necessary.
+ *
+ * The following table shows which SEC version is present in which devices:
+ *
+ * Devices SEC version
+ *
+ * 8272, 8248 SEC 1.0
+ * 885, 875 SEC 1.2
+ * 8555E, 8541E SEC 2.0
+ * 8349E SEC 2.01
+ * 8548E SEC 2.1
+ *
+ * The following table shows the features offered by each SEC version:
+ *
+ * Max. chan-
+ * version Bus I/F Clock nels DEU AESU AFEU MDEU PKEU RNG KEU
+ *
+ * SEC 1.0 internal 64b 100MHz 4 1 1 1 1 1 1 0
+ * SEC 1.2 internal 32b 66MHz 1 1 1 0 1 0 0 0
+ * SEC 2.0 internal 64b 166MHz 4 1 1 1 1 1 1 0
+ * SEC 2.01 internal 64b 166MHz 4 1 1 1 1 1 1 0
+ * SEC 2.1 internal 64b 333MHz 4 1 1 1 1 1 1 1
+ *
+ * Each execution unit in the SEC has two modes of execution; channel and
+ * slave/debug. This driver employs the channel infrastructure in the
+ * device for convenience. Only the RNG is directly accessed due to the
+ * convenience of its random fifo pool. The relationship between the
+ * channels and execution units is depicted in the following diagram:
+ *
+ * ------- ------------
+ * ---| ch0 |---| |
+ * ------- | |
+ * | |------+-------+-------+-------+------------
+ * ------- | | | | | | |
+ * ---| ch1 |---| | | | | | |
+ * ------- | | ------ ------ ------ ------ ------
+ * |controller| |DEU | |AESU| |MDEU| |PKEU| ... |RNG |
+ * ------- | | ------ ------ ------ ------ ------
+ * ---| ch2 |---| | | | | | |
+ * ------- | | | | | | |
+ * | |------+-------+-------+-------+------------
+ * ------- | |
+ * ---| ch3 |---| |
+ * ------- ------------
+ *
+ * Channel ch0 may drive an aes operation to the aes unit (AESU),
+ * and, at the same time, ch1 may drive a message digest operation
+ * to the mdeu. Each channel has an input descriptor FIFO, and the
+ * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
+ * a buffer overrun error is triggered. The controller is responsible
+ * for fetching the data from descriptor pointers, and passing the
+ * data to the appropriate EUs. The controller also writes the
+ * cryptographic operation's result to memory. The SEC notifies
+ * completion by triggering an interrupt and/or setting the 1st byte
+ * of the hdr field to 0xff.
+ *
+ * TODO:
+ * o support more algorithms
+ * o support more versions of the SEC
+ * o add support for linux 2.4
+ * o scatter-gather (sg) support
+ * o add support for public key ops (PKEU)
+ * o add statistics
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/scatterlist.h>
+#include <linux/dma-mapping.h> /* dma_map_single() */
+#include <linux/moduleparam.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
+#include <linux/platform_device.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+#include <linux/of_platform.h>
+#endif
+
+#include <cryptodev.h>
+#include <uio.h>
+
+#define DRV_NAME "talitos"
+
+#include "talitos_dev.h"
+#include "talitos_soft.h"
+
+#define read_random(p,l) get_random_bytes(p,l)
+
+const char talitos_driver_name[] = "Talitos OCF";
+const char talitos_driver_version[] = "0.2";
+
+static int talitos_newsession(device_t dev, u_int32_t *sidp,
+ struct cryptoini *cri);
+static int talitos_freesession(device_t dev, u_int64_t tid);
+static int talitos_process(device_t dev, struct cryptop *crp, int hint);
+static void dump_talitos_status(struct talitos_softc *sc);
+static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td,
+ int chsel);
+static void talitos_doneprocessing(struct talitos_softc *sc);
+static void talitos_init_device(struct talitos_softc *sc);
+static void talitos_reset_device_master(struct talitos_softc *sc);
+static void talitos_reset_device(struct talitos_softc *sc);
+static void talitos_errorprocessing(struct talitos_softc *sc);
+#ifdef CONFIG_PPC_MERGE
+static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
+static int talitos_remove(struct of_device *ofdev);
+#else
+static int talitos_probe(struct platform_device *pdev);
+static int talitos_remove(struct platform_device *pdev);
+#endif
+#ifdef CONFIG_OCF_RANDOMHARVEST
+static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
+static void talitos_rng_init(struct talitos_softc *sc);
+#endif
+
+static device_method_t talitos_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, talitos_newsession),
+ DEVMETHOD(cryptodev_freesession,talitos_freesession),
+ DEVMETHOD(cryptodev_process, talitos_process),
+};
+
+#define debug talitos_debug
+int talitos_debug = 0;
+module_param(talitos_debug, int, 0644);
+MODULE_PARM_DESC(talitos_debug, "Enable debug");
+
+static inline void talitos_write(volatile unsigned *addr, u32 val)
+{
+ out_be32(addr, val);
+}
+
+static inline u32 talitos_read(volatile unsigned *addr)
+{
+ u32 val;
+ val = in_be32(addr);
+ return val;
+}
+
+static void dump_talitos_status(struct talitos_softc *sc)
+{
+ unsigned int v, v_hi, i, *ptr;
+ v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
+ v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
+ printk(KERN_INFO "%s: MCR 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), v, v_hi);
+ v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
+ v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
+ printk(KERN_INFO "%s: IMR 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), v, v_hi);
+ v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
+ v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
+ printk(KERN_INFO "%s: ISR 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), v, v_hi);
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CDPR);
+ v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CDPR_HI);
+ printk(KERN_INFO "%s: CDPR ch%d 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), i, v, v_hi);
+ }
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CCPSR);
+ v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CCPSR_HI);
+ printk(KERN_INFO "%s: CCPSR ch%d 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), i, v, v_hi);
+ }
+ ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
+ for (i = 0; i < 16; i++) {
+ v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
+ printk(KERN_INFO "%s: DESCBUF ch0 0x%08x_%08x (tdp%02d)\n",
+ device_get_nameunit(sc->sc_cdev), v, v_hi, i);
+ }
+ return;
+}
+
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
+/*
+ * pull random numbers off the RNG FIFO, not exceeding amount available
+ */
+static int
+talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
+{
+ struct talitos_softc *sc = (struct talitos_softc *) arg;
+ int rc;
+ u_int32_t v;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ /* check for things like FIFO underflow */
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
+ if (unlikely(v)) {
+ printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
+ device_get_nameunit(sc->sc_cdev), v);
+ return 0;
+ }
+ /*
+ * OFL is number of available 64-bit words,
+ * shift and convert to a 32-bit word count
+ */
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
+ v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
+ if (maxwords > v)
+ maxwords = v;
+ for (rc = 0; rc < maxwords; rc++) {
+ buf[rc] = talitos_read(sc->sc_base_addr +
+ TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
+ }
+ if (maxwords & 1) {
+ /*
+ * RNG will complain with an AE in the RNGISR
+ * if we don't complete the pairs of 32-bit reads
+ * to its 64-bit register based FIFO
+ */
+ v = talitos_read(sc->sc_base_addr +
+ TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
+ }
+
+ return rc;
+}
+
+static void
+talitos_rng_init(struct talitos_softc *sc)
+{
+ u_int32_t v;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+ /* reset RNG EU */
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
+ v |= TALITOS_RNGRCR_HI_SR;
+ talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
+ while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI)
+ & TALITOS_RNGSR_HI_RD) == 0)
+ cpu_relax();
+ /*
+ * we tell the RNG to start filling the RNG FIFO
+ * by writing the RNGDSR
+ */
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
+ talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
+ /*
+ * 64 bits of data will be pushed onto the FIFO every
+ * 256 SEC cycles until the FIFO is full. The RNG then
+ * attempts to keep the FIFO full.
+ */
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
+ if (v) {
+ printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
+ device_get_nameunit(sc->sc_cdev), v);
+ return;
+ }
+ /*
+ * n.b. we need to add a FIPS test here - if the RNG is going
+ * to fail, it's going to fail at reset time
+ */
+ return;
+}
+#endif /* CONFIG_OCF_RANDOMHARVEST */
+
+/*
+ * Generate a new software session.
+ */
+static int
+talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+ struct cryptoini *c, *encini = NULL, *macini = NULL;
+ struct talitos_softc *sc = device_get_softc(dev);
+ struct talitos_session *ses = NULL;
+ int sesn;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+ if (sidp == NULL || cri == NULL || sc == NULL) {
+ DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+ for (c = cri; c != NULL; c = c->cri_next) {
+ if (c->cri_alg == CRYPTO_MD5 ||
+ c->cri_alg == CRYPTO_MD5_HMAC ||
+ c->cri_alg == CRYPTO_SHA1 ||
+ c->cri_alg == CRYPTO_SHA1_HMAC ||
+ c->cri_alg == CRYPTO_NULL_HMAC) {
+ if (macini)
+ return EINVAL;
+ macini = c;
+ } else if (c->cri_alg == CRYPTO_DES_CBC ||
+ c->cri_alg == CRYPTO_3DES_CBC ||
+ c->cri_alg == CRYPTO_AES_CBC ||
+ c->cri_alg == CRYPTO_NULL_CBC) {
+ if (encini)
+ return EINVAL;
+ encini = c;
+ } else {
+ DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
+ return EINVAL;
+ }
+ }
+ if (encini == NULL && macini == NULL)
+ return EINVAL;
+ if (encini) {
+ /* validate key length */
+ switch (encini->cri_alg) {
+ case CRYPTO_DES_CBC:
+ if (encini->cri_klen != 64)
+ return EINVAL;
+ break;
+ case CRYPTO_3DES_CBC:
+ if (encini->cri_klen != 192) {
+ return EINVAL;
+ }
+ break;
+ case CRYPTO_AES_CBC:
+ if (encini->cri_klen != 128 &&
+ encini->cri_klen != 192 &&
+ encini->cri_klen != 256)
+ return EINVAL;
+ break;
+ default:
+ DPRINTF("UNKNOWN encini->cri_alg %d\n",
+ encini->cri_alg);
+ return EINVAL;
+ }
+ }
+
+ if (sc->sc_sessions == NULL) {
+ ses = sc->sc_sessions = (struct talitos_session *)
+ kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
+ if (ses == NULL)
+ return ENOMEM;
+ memset(ses, 0, sizeof(struct talitos_session));
+ sesn = 0;
+ sc->sc_nsessions = 1;
+ } else {
+ for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+ if (sc->sc_sessions[sesn].ses_used == 0) {
+ ses = &sc->sc_sessions[sesn];
+ break;
+ }
+ }
+
+ if (ses == NULL) {
+ /* allocating session */
+ sesn = sc->sc_nsessions;
+ ses = (struct talitos_session *) kmalloc(
+ (sesn + 1) * sizeof(struct talitos_session),
+ SLAB_ATOMIC);
+ if (ses == NULL)
+ return ENOMEM;
+ memset(ses, 0,
+ (sesn + 1) * sizeof(struct talitos_session));
+ memcpy(ses, sc->sc_sessions,
+ sesn * sizeof(struct talitos_session));
+ memset(sc->sc_sessions, 0,
+ sesn * sizeof(struct talitos_session));
+ kfree(sc->sc_sessions);
+ sc->sc_sessions = ses;
+ ses = &sc->sc_sessions[sesn];
+ sc->sc_nsessions++;
+ }
+ }
+
+ ses->ses_used = 1;
+
+ if (encini) {
+ ses->ses_klen = (encini->cri_klen + 7) / 8;
+ memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
+ if (macini) {
+ /* doing hash on top of cipher */
+ ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
+ memcpy(ses->ses_hmac, macini->cri_key,
+ ses->ses_hmac_len);
+ }
+ } else if (macini) {
+ /* doing hash */
+ ses->ses_klen = (macini->cri_klen + 7) / 8;
+ memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
+ }
+
+ /* back compat way of determining MSC result len */
+ if (macini) {
+ ses->ses_mlen = macini->cri_mlen;
+ if (ses->ses_mlen == 0) {
+ if (macini->cri_alg == CRYPTO_MD5_HMAC)
+ ses->ses_mlen = MD5_HASH_LEN;
+ else
+ ses->ses_mlen = SHA1_HASH_LEN;
+ }
+ }
+
+ /* really should make up a template td here,
+ * and only fill things like i/o and direction in process() */
+
+ /* assign session ID */
+ *sidp = TALITOS_SID(sc->sc_num, sesn);
+ return 0;
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+talitos_freesession(device_t dev, u_int64_t tid)
+{
+ struct talitos_softc *sc = device_get_softc(dev);
+ int session, ret;
+ u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
+
+ if (sc == NULL)
+ return EINVAL;
+ session = TALITOS_SESSION(sid);
+ if (session < sc->sc_nsessions) {
+ memset(&sc->sc_sessions[session], 0,
+ sizeof(sc->sc_sessions[session]));
+ ret = 0;
+ } else
+ ret = EINVAL;
+ return ret;
+}
+
+/*
+ * launch device processing - it will come back with done notification
+ * in the form of an interrupt and/or HDR_DONE_BITS in header
+ */
+static int
+talitos_submit(
+ struct talitos_softc *sc,
+ struct talitos_desc *td,
+ int chsel)
+{
+ u_int32_t v;
+
+ v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
+ talitos_write(sc->sc_base_addr +
+ chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
+ talitos_write(sc->sc_base_addr +
+ chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
+ return 0;
+}
+
+static int
+talitos_process(device_t dev, struct cryptop *crp, int hint)
+{
+ int i, err = 0, ivsize;
+ struct talitos_softc *sc = device_get_softc(dev);
+ struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+ caddr_t iv;
+ struct talitos_session *ses;
+ struct talitos_desc *td;
+ unsigned long flags;
+ /* descriptor mappings */
+ int hmac_key, hmac_data, cipher_iv, cipher_key,
+ in_fifo, out_fifo, cipher_iv_out;
+ static int chsel = -1;
+ u_int32_t rand_iv[4];
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
+ return EINVAL;
+ }
+ crp->crp_etype = 0;
+ if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
+ return EINVAL;
+ }
+
+ ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
+
+ /* enter the channel scheduler */
+ spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+ /* reuse channel that already had/has requests for the required EU */
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
+ break;
+ }
+ if (i == sc->sc_num_channels) {
+ /*
+ * haven't seen this algo the last sc_num_channels or more
+ * use round robin in this case
+ * nb: sc->sc_num_channels must be power of 2
+ */
+ chsel = (chsel + 1) & (sc->sc_num_channels - 1);
+ } else {
+ /*
+ * matches channel with same target execution unit;
+ * use same channel in this case
+ */
+ chsel = i;
+ }
+ sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
+
+ /* release the channel scheduler lock */
+ spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+ /* acquire the selected channel fifo lock */
+ spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
+
+ /* find and reserve next available descriptor-cryptop pair */
+ for (i = 0; i < sc->sc_chfifo_len; i++) {
+ if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
+ /*
+ * ensure correct descriptor formation by
+ * avoiding inadvertently setting "optional" entries
+ * e.g. not using "optional" dptr2 for MD/HMAC descs
+ */
+ memset(&sc->sc_chnfifo[chsel][i].cf_desc,
+ 0, sizeof(*td));
+ /* reserve it with done notification request bit */
+ sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
+ TALITOS_DONE_NOTIFY;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
+
+ if (i == sc->sc_chfifo_len) {
+ /* fifo full */
+ err = ERESTART;
+ goto errout;
+ }
+
+ td = &sc->sc_chnfifo[chsel][i].cf_desc;
+ sc->sc_chnfifo[chsel][i].cf_crp = crp;
+
+ crd1 = crp->crp_desc;
+ if (crd1 == NULL) {
+ err = EINVAL;
+ goto errout;
+ }
+ crd2 = crd1->crd_next;
+ /* prevent compiler warning */
+ hmac_key = 0;
+ hmac_data = 0;
+ if (crd2 == NULL) {
+ td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
+ /* assign descriptor dword ptr mappings for this desc. type */
+ cipher_iv = 1;
+ cipher_key = 2;
+ in_fifo = 3;
+ cipher_iv_out = 5;
+ if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1 ||
+ crd1->crd_alg == CRYPTO_MD5) {
+ out_fifo = 5;
+ maccrd = crd1;
+ enccrd = NULL;
+ } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+ crd1->crd_alg == CRYPTO_3DES_CBC ||
+ crd1->crd_alg == CRYPTO_AES_CBC ||
+ crd1->crd_alg == CRYPTO_ARC4) {
+ out_fifo = 4;
+ maccrd = NULL;
+ enccrd = crd1;
+ } else {
+ DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
+ err = EINVAL;
+ goto errout;
+ }
+ } else {
+ if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
+ td->hdr |= TD_TYPE_IPSEC_ESP;
+ } else {
+ DPRINTF("unimplemented: multiple descriptor ipsec\n");
+ err = EINVAL;
+ goto errout;
+ }
+ /* assign descriptor dword ptr mappings for this desc. type */
+ hmac_key = 0;
+ hmac_data = 1;
+ cipher_iv = 2;
+ cipher_key = 3;
+ in_fifo = 4;
+ out_fifo = 5;
+ cipher_iv_out = 6;
+ if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd1->crd_alg == CRYPTO_MD5 ||
+ crd1->crd_alg == CRYPTO_SHA1) &&
+ (crd2->crd_alg == CRYPTO_DES_CBC ||
+ crd2->crd_alg == CRYPTO_3DES_CBC ||
+ crd2->crd_alg == CRYPTO_AES_CBC ||
+ crd2->crd_alg == CRYPTO_ARC4) &&
+ ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+ maccrd = crd1;
+ enccrd = crd2;
+ } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+ crd1->crd_alg == CRYPTO_ARC4 ||
+ crd1->crd_alg == CRYPTO_3DES_CBC ||
+ crd1->crd_alg == CRYPTO_AES_CBC) &&
+ (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+ crd2->crd_alg == CRYPTO_SHA1_HMAC ||
+ crd2->crd_alg == CRYPTO_MD5 ||
+ crd2->crd_alg == CRYPTO_SHA1) &&
+ (crd1->crd_flags & CRD_F_ENCRYPT)) {
+ enccrd = crd1;
+ maccrd = crd2;
+ } else {
+ /* We cannot order the SEC as requested */
+ printk("%s: cannot do the order\n",
+ device_get_nameunit(sc->sc_cdev));
+ err = EINVAL;
+ goto errout;
+ }
+ }
+ /* assign in_fifo and out_fifo based on input/output struct type */
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ /* using SKB buffers */
+ struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
+ if (skb_shinfo(skb)->nr_frags) {
+ printk("%s: skb frags unimplemented\n",
+ device_get_nameunit(sc->sc_cdev));
+ err = EINVAL;
+ goto errout;
+ }
+ td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ td->ptr[in_fifo].len = skb->len;
+ td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ td->ptr[out_fifo].len = skb->len;
+ td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ /* using IOV buffers */
+ struct uio *uiop = (struct uio *)crp->crp_buf;
+ if (uiop->uio_iovcnt > 1) {
+ printk("%s: iov frags unimplemented\n",
+ device_get_nameunit(sc->sc_cdev));
+ err = EINVAL;
+ goto errout;
+ }
+ td->ptr[in_fifo].ptr = dma_map_single(NULL,
+ uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
+ td->ptr[in_fifo].len = crp->crp_ilen;
+ /* crp_olen is never set; always use crp_ilen */
+ td->ptr[out_fifo].ptr = dma_map_single(NULL,
+ uiop->uio_iov->iov_base,
+ crp->crp_ilen, DMA_TO_DEVICE);
+ td->ptr[out_fifo].len = crp->crp_ilen;
+ } else {
+ /* using contig buffers */
+ td->ptr[in_fifo].ptr = dma_map_single(NULL,
+ crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
+ td->ptr[in_fifo].len = crp->crp_ilen;
+ td->ptr[out_fifo].ptr = dma_map_single(NULL,
+ crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
+ td->ptr[out_fifo].len = crp->crp_ilen;
+ }
+ if (enccrd) {
+ switch (enccrd->crd_alg) {
+ case CRYPTO_3DES_CBC:
+ td->hdr |= TALITOS_MODE0_DEU_3DES;
+ /* FALLTHROUGH */
+ case CRYPTO_DES_CBC:
+ td->hdr |= TALITOS_SEL0_DEU
+ | TALITOS_MODE0_DEU_CBC;
+ if (enccrd->crd_flags & CRD_F_ENCRYPT)
+ td->hdr |= TALITOS_MODE0_DEU_ENC;
+ ivsize = 2*sizeof(u_int32_t);
+ DPRINTF("%cDES ses %d ch %d len %d\n",
+ (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
+ (u32)TALITOS_SESSION(crp->crp_sid),
+ chsel, td->ptr[in_fifo].len);
+ break;
+ case CRYPTO_AES_CBC:
+ td->hdr |= TALITOS_SEL0_AESU
+ | TALITOS_MODE0_AESU_CBC;
+ if (enccrd->crd_flags & CRD_F_ENCRYPT)
+ td->hdr |= TALITOS_MODE0_AESU_ENC;
+ ivsize = 4*sizeof(u_int32_t);
+ DPRINTF("AES ses %d ch %d len %d\n",
+ (u32)TALITOS_SESSION(crp->crp_sid),
+ chsel, td->ptr[in_fifo].len);
+ break;
+ default:
+ printk("%s: unimplemented enccrd->crd_alg %d\n",
+ device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
+ err = EINVAL;
+ goto errout;
+ }
+ /*
+ * Setup encrypt/decrypt state. When using basic ops
+ * we can't use an inline IV because hash/crypt offset
+ * must be from the end of the IV to the start of the
+ * crypt data and this leaves out the preceding header
+ * from the hash calculation. Instead we place the IV
+ * in the state record and set the hash/crypt offset to
+ * copy both the header+IV.
+ */
+ if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+ td->hdr |= TALITOS_DIR_OUTBOUND;
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ iv = enccrd->crd_iv;
+ else
+ read_random((iv = (caddr_t) rand_iv), sizeof(rand_iv));
+ if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ enccrd->crd_inject, ivsize, iv);
+ }
+ } else {
+ td->hdr |= TALITOS_DIR_INBOUND;
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+ iv = enccrd->crd_iv;
+ } else {
+ iv = (caddr_t) rand_iv;
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ enccrd->crd_inject, ivsize, iv);
+ }
+ }
+ td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
+ DMA_TO_DEVICE);
+ td->ptr[cipher_iv].len = ivsize;
+ /*
+ * we don't need the cipher iv out length/pointer
+ * field to do ESP IPsec. Therefore we set the len field as 0,
+ * which tells the SEC not to do anything with this len/ptr
+ * field. Previously, when length/pointer as pointing to iv,
+ * it gave us corruption of packets.
+ */
+ td->ptr[cipher_iv_out].len = 0;
+ }
+ if (enccrd && maccrd) {
+ /* this is ipsec only for now */
+ td->hdr |= TALITOS_SEL1_MDEU
+ | TALITOS_MODE1_MDEU_INIT
+ | TALITOS_MODE1_MDEU_PAD;
+ switch (maccrd->crd_alg) {
+ case CRYPTO_MD5:
+ td->hdr |= TALITOS_MODE1_MDEU_MD5;
+ break;
+ case CRYPTO_MD5_HMAC:
+ td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
+ break;
+ case CRYPTO_SHA1:
+ td->hdr |= TALITOS_MODE1_MDEU_SHA1;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
+ break;
+ default:
+ /* We cannot order the SEC as requested */
+ printk("%s: cannot do the order\n",
+ device_get_nameunit(sc->sc_cdev));
+ err = EINVAL;
+ goto errout;
+ }
+ if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
+ (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
+ /*
+ * The offset from hash data to the start of
+ * crypt data is the difference in the skips.
+ */
+ /* ipsec only for now */
+ td->ptr[hmac_key].ptr = dma_map_single(NULL,
+ ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
+ td->ptr[hmac_key].len = ses->ses_hmac_len;
+ td->ptr[in_fifo].ptr += enccrd->crd_skip;
+ td->ptr[in_fifo].len = enccrd->crd_len;
+ td->ptr[out_fifo].ptr += enccrd->crd_skip;
+ td->ptr[out_fifo].len = enccrd->crd_len;
+ /* bytes of HMAC to postpend to ciphertext */
+ td->ptr[out_fifo].extent = ses->ses_mlen;
+ td->ptr[hmac_data].ptr += maccrd->crd_skip;
+ td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
+ }
+ if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
+ device_get_nameunit(sc->sc_cdev));
+ }
+ }
+ if (!enccrd && maccrd) {
+ /* single MD5 or SHA */
+ td->hdr |= TALITOS_SEL0_MDEU
+ | TALITOS_MODE0_MDEU_INIT
+ | TALITOS_MODE0_MDEU_PAD;
+ switch (maccrd->crd_alg) {
+ case CRYPTO_MD5:
+ td->hdr |= TALITOS_MODE0_MDEU_MD5;
+ DPRINTF("MD5 ses %d ch %d len %d\n",
+ (u32)TALITOS_SESSION(crp->crp_sid),
+ chsel, td->ptr[in_fifo].len);
+ break;
+ case CRYPTO_MD5_HMAC:
+ td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
+ break;
+ case CRYPTO_SHA1:
+ td->hdr |= TALITOS_MODE0_MDEU_SHA1;
+ DPRINTF("SHA1 ses %d ch %d len %d\n",
+ (u32)TALITOS_SESSION(crp->crp_sid),
+ chsel, td->ptr[in_fifo].len);
+ break;
+ case CRYPTO_SHA1_HMAC:
+ td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
+ break;
+ default:
+ /* We cannot order the SEC as requested */
+ DPRINTF("cannot do the order\n");
+ err = EINVAL;
+ goto errout;
+ }
+
+ if (crp->crp_flags & CRYPTO_F_IOV)
+ td->ptr[out_fifo].ptr += maccrd->crd_inject;
+
+ if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
+ (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
+ td->ptr[hmac_key].ptr = dma_map_single(NULL,
+ ses->ses_hmac, ses->ses_hmac_len,
+ DMA_TO_DEVICE);
+ td->ptr[hmac_key].len = ses->ses_hmac_len;
+ }
+ }
+ else {
+ /* using process key (session data has duplicate) */
+ td->ptr[cipher_key].ptr = dma_map_single(NULL,
+ enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
+ DMA_TO_DEVICE);
+ td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
+ }
+ /* descriptor complete - GO! */
+ return talitos_submit(sc, td, chsel);
+
+errout:
+ if (err != ERESTART) {
+ crp->crp_etype = err;
+ crypto_done(crp);
+ }
+ return err;
+}
+
+/* go through all channels descriptors, notifying OCF what has
+ * _and_hasn't_ successfully completed and reset the device
+ * (otherwise it's up to decoding desc hdrs!)
+ */
+static void talitos_errorprocessing(struct talitos_softc *sc)
+{
+ unsigned long flags;
+ int i, j;
+
+ /* disable further scheduling until under control */
+ spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+ if (debug) dump_talitos_status(sc);
+ /* go through descriptors, try and salvage those successfully done,
+ * and EIO those that weren't
+ */
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
+ for (j = 0; j < sc->sc_chfifo_len; j++) {
+ if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
+ if ((sc->sc_chnfifo[i][j].cf_desc.hdr
+ & TALITOS_HDR_DONE_BITS)
+ != TALITOS_HDR_DONE_BITS) {
+ /* this one didn't finish */
+ /* signify in crp->etype */
+ sc->sc_chnfifo[i][j].cf_crp->crp_etype
+ = EIO;
+ }
+ } else
+ continue; /* free entry */
+ /* either way, notify ocf */
+ crypto_done(sc->sc_chnfifo[i][j].cf_crp);
+ /* and tag it available again
+ *
+ * memset to ensure correct descriptor formation by
+ * avoiding inadvertently setting "optional" entries
+ * e.g. not using "optional" dptr2 MD/HMAC processing
+ */
+ memset(&sc->sc_chnfifo[i][j].cf_desc,
+ 0, sizeof(struct talitos_desc));
+ }
+ spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
+ }
+ /* reset and initialize the SEC h/w device */
+ talitos_reset_device(sc);
+ talitos_init_device(sc);
+#ifdef CONFIG_OCF_RANDOMHARVEST
+ if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
+ talitos_rng_init(sc);
+#endif
+
+ /* Okay. Stand by. */
+ spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+ return;
+}
+
+/* go through all channels descriptors, notifying OCF what's been done */
+static void talitos_doneprocessing(struct talitos_softc *sc)
+{
+ unsigned long flags;
+ int i, j;
+
+ /* go through descriptors looking for done bits */
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
+ for (j = 0; j < sc->sc_chfifo_len; j++) {
+ /* descriptor has done bits set? */
+ if ((sc->sc_chnfifo[i][j].cf_desc.hdr
+ & TALITOS_HDR_DONE_BITS)
+ == TALITOS_HDR_DONE_BITS) {
+ /* notify ocf */
+ crypto_done(sc->sc_chnfifo[i][j].cf_crp);
+ /* and tag it available again
+ *
+ * memset to ensure correct descriptor formation by
+ * avoiding inadvertently setting "optional" entries
+ * e.g. not using "optional" dptr2 MD/HMAC processing
+ */
+ memset(&sc->sc_chnfifo[i][j].cf_desc,
+ 0, sizeof(struct talitos_desc));
+ }
+ }
+ spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
+ }
+ return;
+}
+
+static irqreturn_t
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+talitos_intr(int irq, void *arg)
+#else
+talitos_intr(int irq, void *arg, struct pt_regs *regs)
+#endif
+{
+ struct talitos_softc *sc = arg;
+ u_int32_t v, v_hi;
+
+ /* ack */
+ v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
+ v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
+ talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
+ talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
+
+ if (unlikely(v & TALITOS_ISR_ERROR)) {
+ /* Okay, Houston, we've had a problem here. */
+ printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), v, v_hi);
+ talitos_errorprocessing(sc);
+ } else
+ if (likely(v & TALITOS_ISR_DONE)) {
+ talitos_doneprocessing(sc);
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initialize registers we need to touch only once.
+ */
+static void
+talitos_init_device(struct talitos_softc *sc)
+{
+ u_int32_t v;
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ /* init all channels */
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ v = talitos_read(sc->sc_base_addr +
+ i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
+ v |= TALITOS_CH_CCCR_HI_CDWE
+ | TALITOS_CH_CCCR_HI_CDIE; /* invoke interrupt if done */
+ talitos_write(sc->sc_base_addr +
+ i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
+ }
+ /* enable all interrupts */
+ v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
+ v |= TALITOS_IMR_ALL;
+ talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
+ v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
+ v |= TALITOS_IMR_HI_ERRONLY;
+ talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
+ return;
+}
+
+/*
+ * set the master reset bit on the device.
+ */
+static void
+talitos_reset_device_master(struct talitos_softc *sc)
+{
+ u_int32_t v;
+
+ /* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
+ v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
+ talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
+
+ while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
+ cpu_relax();
+
+ return;
+}
+
+/*
+ * Resets the device. Values in the registers are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+static void
+talitos_reset_device(struct talitos_softc *sc)
+{
+ u_int32_t v;
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ /*
+ * Master reset
+ * errata documentation: warning: certain SEC interrupts
+ * are not fully cleared by writing the MCR:SWR bit,
+ * set bit twice to completely reset
+ */
+ talitos_reset_device_master(sc); /* once */
+ talitos_reset_device_master(sc); /* and once again */
+
+ /* reset all channels */
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CCCR);
+ talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
+ }
+}
+
+/* Set up the crypto device structure, private data,
+ * and anything else we need before we start */
+#ifdef CONFIG_PPC_MERGE
+static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
+#else
+static int talitos_probe(struct platform_device *pdev)
+#endif
+{
+ struct talitos_softc *sc = NULL;
+ struct resource *r;
+#ifdef CONFIG_PPC_MERGE
+ struct device *device = &ofdev->dev;
+ struct device_node *np = ofdev->node;
+ const unsigned int *prop;
+ int err;
+ struct resource res;
+#endif
+ static int num_chips = 0;
+ int rc;
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+
+ sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return -ENOMEM;
+ memset(sc, 0, sizeof(*sc));
+
+ softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
+
+ sc->sc_irq = -1;
+ sc->sc_cid = -1;
+#ifndef CONFIG_PPC_MERGE
+ sc->sc_dev = pdev;
+#endif
+ sc->sc_num = num_chips++;
+
+#ifdef CONFIG_PPC_MERGE
+ dev_set_drvdata(device, sc);
+#else
+ platform_set_drvdata(sc->sc_dev, sc);
+#endif
+
+ /* get the irq line */
+#ifdef CONFIG_PPC_MERGE
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ return -EINVAL;
+ r = &res;
+
+ sc->sc_irq = irq_of_parse_and_map(np, 0);
+#else
+ /* get a pointer to the register memory */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ sc->sc_irq = platform_get_irq(pdev, 0);
+#endif
+ rc = request_irq(sc->sc_irq, talitos_intr, 0,
+ device_get_nameunit(sc->sc_cdev), sc);
+ if (rc) {
+ printk(KERN_ERR "%s: failed to hook irq %d\n",
+ device_get_nameunit(sc->sc_cdev), sc->sc_irq);
+ sc->sc_irq = -1;
+ goto out;
+ }
+
+ sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
+ if (!sc->sc_base_addr) {
+ printk(KERN_ERR "%s: failed to ioremap\n",
+ device_get_nameunit(sc->sc_cdev));
+ goto out;
+ }
+
+ /* figure out our SEC's properties and capabilities */
+ sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
+ | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
+ DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
+
+#ifdef CONFIG_PPC_MERGE
+ /* get SEC properties from device tree, defaulting to SEC 2.0 */
+
+ prop = of_get_property(np, "num-channels", NULL);
+ sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
+
+ prop = of_get_property(np, "channel-fifo-len", NULL);
+ sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
+
+ prop = of_get_property(np, "exec-units-mask", NULL);
+ sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
+
+ prop = of_get_property(np, "descriptor-types-mask", NULL);
+ sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
+#else
+ /* bulk should go away with openfirmware flat device tree support */
+ if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
+ sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
+ sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
+ sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
+ sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
+ } else {
+ printk(KERN_ERR "%s: failed to id device\n",
+ device_get_nameunit(sc->sc_cdev));
+ goto out;
+ }
+#endif
+
+ /* + 1 is for the meta-channel lock used by the channel scheduler */
+ sc->sc_chnfifolock = (spinlock_t *) kmalloc(
+ (sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
+ if (!sc->sc_chnfifolock)
+ goto out;
+ for (i = 0; i < sc->sc_num_channels + 1; i++) {
+ spin_lock_init(&sc->sc_chnfifolock[i]);
+ }
+
+ sc->sc_chnlastalg = (int *) kmalloc(
+ sc->sc_num_channels * sizeof(int), GFP_KERNEL);
+ if (!sc->sc_chnlastalg)
+ goto out;
+ memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
+
+ sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
+ sc->sc_num_channels * sizeof(struct desc_cryptop_pair *),
+ GFP_KERNEL);
+ if (!sc->sc_chnfifo)
+ goto out;
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
+ sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair),
+ GFP_KERNEL);
+ if (!sc->sc_chnfifo[i])
+ goto out;
+ memset(sc->sc_chnfifo[i], 0,
+ sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
+ }
+
+ /* reset and initialize the SEC h/w device */
+ talitos_reset_device(sc);
+ talitos_init_device(sc);
+
+ sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid < 0) {
+ printk(KERN_ERR "%s: could not get crypto driver id\n",
+ device_get_nameunit(sc->sc_cdev));
+ goto out;
+ }
+
+ /* register algorithms with the framework */
+ printk("%s:", device_get_nameunit(sc->sc_cdev));
+
+ if (sc->sc_exec_units & TALITOS_HAS_EU_RNG) {
+ printk(" rng");
+#ifdef CONFIG_OCF_RANDOMHARVEST
+ talitos_rng_init(sc);
+ crypto_rregister(sc->sc_cid, talitos_read_random, sc);
+#endif
+ }
+ if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
+ printk(" des/3des");
+ crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+ }
+ if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
+ printk(" aes");
+ crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+ }
+ if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
+ printk(" md5");
+ crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
+ /* HMAC support only with IPsec for now */
+ crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+ printk(" sha1");
+ crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
+ /* HMAC support only with IPsec for now */
+ crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+ }
+ printk("\n");
+ return 0;
+
+out:
+#ifndef CONFIG_PPC_MERGE
+ talitos_remove(pdev);
+#endif
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_PPC_MERGE
+static int talitos_remove(struct of_device *ofdev)
+#else
+static int talitos_remove(struct platform_device *pdev)
+#endif
+{
+#ifdef CONFIG_PPC_MERGE
+ struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
+#else
+ struct talitos_softc *sc = platform_get_drvdata(pdev);
+#endif
+ int i;
+
+ DPRINTF("%s()\n", __FUNCTION__);
+ if (sc->sc_cid >= 0)
+ crypto_unregister_all(sc->sc_cid);
+ if (sc->sc_chnfifo) {
+ for (i = 0; i < sc->sc_num_channels; i++)
+ if (sc->sc_chnfifo[i])
+ kfree(sc->sc_chnfifo[i]);
+ kfree(sc->sc_chnfifo);
+ }
+ if (sc->sc_chnlastalg)
+ kfree(sc->sc_chnlastalg);
+ if (sc->sc_chnfifolock)
+ kfree(sc->sc_chnfifolock);
+ if (sc->sc_irq != -1)
+ free_irq(sc->sc_irq, sc);
+ if (sc->sc_base_addr)
+ iounmap((void *) sc->sc_base_addr);
+ kfree(sc);
+ return 0;
+}
+
+#ifdef CONFIG_PPC_MERGE
+static struct of_device_id talitos_match[] = {
+ {
+ .type = "crypto",
+ .compatible = "talitos",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, talitos_match);
+
+static struct of_platform_driver talitos_driver = {
+ .name = DRV_NAME,
+ .match_table = talitos_match,
+ .probe = talitos_probe,
+ .remove = talitos_remove,
+};
+
+static int __init talitos_init(void)
+{
+ return of_register_platform_driver(&talitos_driver);
+}
+
+static void __exit talitos_exit(void)
+{
+ of_unregister_platform_driver(&talitos_driver);
+}
+#else
+/* Structure for a platform device driver */
+static struct platform_driver talitos_driver = {
+ .probe = talitos_probe,
+ .remove = talitos_remove,
+ .driver = {
+ .name = "fsl-sec2",
+ }
+};
+
+static int __init talitos_init(void)
+{
+ return platform_driver_register(&talitos_driver);
+}
+
+static void __exit talitos_exit(void)
+{
+ platform_driver_unregister(&talitos_driver);
+}
+#endif
+
+module_init(talitos_init);
+module_exit(talitos_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("kim.phillips@freescale.com");
+MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
diff --git a/target/linux/generic/files/crypto/ocf/talitos/talitos_dev.h b/target/linux/generic/files/crypto/ocf/talitos/talitos_dev.h
new file mode 100644
index 000000000..a8b04799f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/talitos/talitos_dev.h
@@ -0,0 +1,277 @@
+/*
+ * Freescale SEC (talitos) device dependent data structures
+ *
+ * Copyright (c) 2006 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* device ID register values */
+#define TALITOS_ID_SEC_2_0 0x40
+#define TALITOS_ID_SEC_2_1 0x40 /* cross ref with IP block revision reg */
+
+/*
+ * following num_channels, channel-fifo-depth, exec-unit-mask, and
+ * descriptor-types-mask are for forward-compatibility with openfirmware
+ * flat device trees
+ */
+
+/*
+ * num_channels : the number of channels available in each SEC version.
+ */
+
+/* n.b. this driver requires these values be a power of 2 */
+#define TALITOS_NCHANNELS_SEC_1_0 4
+#define TALITOS_NCHANNELS_SEC_1_2 1
+#define TALITOS_NCHANNELS_SEC_2_0 4
+#define TALITOS_NCHANNELS_SEC_2_01 4
+#define TALITOS_NCHANNELS_SEC_2_1 4
+#define TALITOS_NCHANNELS_SEC_2_4 4
+
+/*
+ * channel-fifo-depth : The number of descriptor
+ * pointers a channel fetch fifo can hold.
+ */
+#define TALITOS_CHFIFOLEN_SEC_1_0 1
+#define TALITOS_CHFIFOLEN_SEC_1_2 1
+#define TALITOS_CHFIFOLEN_SEC_2_0 24
+#define TALITOS_CHFIFOLEN_SEC_2_01 24
+#define TALITOS_CHFIFOLEN_SEC_2_1 24
+#define TALITOS_CHFIFOLEN_SEC_2_4 24
+
+/*
+ * exec-unit-mask : The bitmask representing what Execution Units (EUs)
+ * are available. EU information should be encoded following the SEC's
+ * EU_SEL0 bitfield documentation, i.e. as follows:
+ *
+ * bit 31 = set if SEC permits no-EU selection (should be always set)
+ * bit 30 = set if SEC has the ARC4 EU (AFEU)
+ * bit 29 = set if SEC has the des/3des EU (DEU)
+ * bit 28 = set if SEC has the message digest EU (MDEU)
+ * bit 27 = set if SEC has the random number generator EU (RNG)
+ * bit 26 = set if SEC has the public key EU (PKEU)
+ * bit 25 = set if SEC has the aes EU (AESU)
+ * bit 24 = set if SEC has the Kasumi EU (KEU)
+ *
+ */
+#define TALITOS_HAS_EU_NONE (1<<0)
+#define TALITOS_HAS_EU_AFEU (1<<1)
+#define TALITOS_HAS_EU_DEU (1<<2)
+#define TALITOS_HAS_EU_MDEU (1<<3)
+#define TALITOS_HAS_EU_RNG (1<<4)
+#define TALITOS_HAS_EU_PKEU (1<<5)
+#define TALITOS_HAS_EU_AESU (1<<6)
+#define TALITOS_HAS_EU_KEU (1<<7)
+
+/* the corresponding masks for each SEC version */
+#define TALITOS_HAS_EUS_SEC_1_0 0x7f
+#define TALITOS_HAS_EUS_SEC_1_2 0x4d
+#define TALITOS_HAS_EUS_SEC_2_0 0x7f
+#define TALITOS_HAS_EUS_SEC_2_01 0x7f
+#define TALITOS_HAS_EUS_SEC_2_1 0xff
+#define TALITOS_HAS_EUS_SEC_2_4 0x7f
+
+/*
+ * descriptor-types-mask : The bitmask representing what descriptors
+ * are available. Descriptor type information should be encoded
+ * following the SEC's Descriptor Header Dword DESC_TYPE field
+ * documentation, i.e. as follows:
+ *
+ * bit 0 = set if SEC supports the aesu_ctr_nonsnoop desc. type
+ * bit 1 = set if SEC supports the ipsec_esp descriptor type
+ * bit 2 = set if SEC supports the common_nonsnoop desc. type
+ * bit 3 = set if SEC supports the 802.11i AES ccmp desc. type
+ * bit 4 = set if SEC supports the hmac_snoop_no_afeu desc. type
+ * bit 5 = set if SEC supports the srtp descriptor type
+ * bit 6 = set if SEC supports the non_hmac_snoop_no_afeu desc.type
+ * bit 7 = set if SEC supports the pkeu_assemble descriptor type
+ * bit 8 = set if SEC supports the aesu_key_expand_output desc.type
+ * bit 9 = set if SEC supports the pkeu_ptmul descriptor type
+ * bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
+ * bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
+ *
+ * ..and so on and so forth.
+ */
+#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP (1<<0)
+#define TALITOS_HAS_DT_IPSEC_ESP (1<<1)
+#define TALITOS_HAS_DT_COMMON_NONSNOOP (1<<2)
+
+/* the corresponding masks for each SEC version */
+#define TALITOS_HAS_DESCTYPES_SEC_2_0 0x01010ebf
+#define TALITOS_HAS_DESCTYPES_SEC_2_1 0x012b0ebf
+
+/*
+ * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
+ */
+
+/* global register offset addresses */
+#define TALITOS_ID 0x1020
+#define TALITOS_ID_HI 0x1024
+#define TALITOS_MCR 0x1030 /* master control register */
+#define TALITOS_MCR_HI 0x1038 /* master control register */
+#define TALITOS_MCR_SWR 0x1
+#define TALITOS_IMR 0x1008 /* interrupt mask register */
+#define TALITOS_IMR_ALL 0x00010fff /* enable all interrupts mask */
+#define TALITOS_IMR_ERRONLY 0x00010aaa /* enable error interrupts */
+#define TALITOS_IMR_HI 0x100C /* interrupt mask register */
+#define TALITOS_IMR_HI_ALL 0x00323333 /* enable all interrupts mask */
+#define TALITOS_IMR_HI_ERRONLY 0x00222222 /* enable error interrupts */
+#define TALITOS_ISR 0x1010 /* interrupt status register */
+#define TALITOS_ISR_ERROR 0x00010faa /* errors mask */
+#define TALITOS_ISR_DONE 0x00000055 /* channel(s) done mask */
+#define TALITOS_ISR_HI 0x1014 /* interrupt status register */
+#define TALITOS_ICR 0x1018 /* interrupt clear register */
+#define TALITOS_ICR_HI 0x101C /* interrupt clear register */
+
+/* channel register address stride */
+#define TALITOS_CH_OFFSET 0x100
+
+/* channel register offset addresses and bits */
+#define TALITOS_CH_CCCR 0x1108 /* Crypto-Channel Config Register */
+#define TALITOS_CH_CCCR_RESET 0x1 /* Channel Reset bit */
+#define TALITOS_CH_CCCR_HI 0x110c /* Crypto-Channel Config Register */
+#define TALITOS_CH_CCCR_HI_CDWE 0x10 /* Channel done writeback enable bit */
+#define TALITOS_CH_CCCR_HI_NT 0x4 /* Notification type bit */
+#define TALITOS_CH_CCCR_HI_CDIE 0x2 /* Channel Done Interrupt Enable bit */
+#define TALITOS_CH_CCPSR 0x1110 /* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_CCPSR_HI 0x1114 /* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_FF 0x1148 /* Fetch FIFO */
+#define TALITOS_CH_FF_HI 0x114c /* Fetch FIFO's FETCH_ADRS */
+#define TALITOS_CH_CDPR 0x1140 /* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_CDPR_HI 0x1144 /* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_DESCBUF 0x1180 /* (thru 11bf) Crypto-Channel
+ * Descriptor Buffer (debug) */
+
+/* execution unit register offset addresses and bits */
+#define TALITOS_DEUSR 0x2028 /* DEU status register */
+#define TALITOS_DEUSR_HI 0x202c /* DEU status register */
+#define TALITOS_DEUISR 0x2030 /* DEU interrupt status register */
+#define TALITOS_DEUISR_HI 0x2034 /* DEU interrupt status register */
+#define TALITOS_DEUICR 0x2038 /* DEU interrupt control register */
+#define TALITOS_DEUICR_HI 0x203c /* DEU interrupt control register */
+#define TALITOS_AESUISR 0x4030 /* AESU interrupt status register */
+#define TALITOS_AESUISR_HI 0x4034 /* AESU interrupt status register */
+#define TALITOS_AESUICR 0x4038 /* AESU interrupt control register */
+#define TALITOS_AESUICR_HI 0x403c /* AESU interrupt control register */
+#define TALITOS_MDEUISR 0x6030 /* MDEU interrupt status register */
+#define TALITOS_MDEUISR_HI 0x6034 /* MDEU interrupt status register */
+#define TALITOS_RNGSR 0xa028 /* RNG status register */
+#define TALITOS_RNGSR_HI 0xa02c /* RNG status register */
+#define TALITOS_RNGSR_HI_RD 0x1 /* RNG Reset done */
+#define TALITOS_RNGSR_HI_OFL 0xff0000/* number of dwords in RNG output FIFO*/
+#define TALITOS_RNGDSR 0xa010 /* RNG data size register */
+#define TALITOS_RNGDSR_HI 0xa014 /* RNG data size register */
+#define TALITOS_RNG_FIFO 0xa800 /* RNG FIFO - pool of random numbers */
+#define TALITOS_RNGISR 0xa030 /* RNG Interrupt status register */
+#define TALITOS_RNGISR_HI 0xa034 /* RNG Interrupt status register */
+#define TALITOS_RNGRCR 0xa018 /* RNG Reset control register */
+#define TALITOS_RNGRCR_HI 0xa01c /* RNG Reset control register */
+#define TALITOS_RNGRCR_HI_SR 0x1 /* RNG RNGRCR:Software Reset */
+
+/* descriptor pointer entry */
+struct talitos_desc_ptr {
+ u16 len; /* length */
+ u8 extent; /* jump (to s/g link table) and extent */
+ u8 res; /* reserved */
+ u32 ptr; /* pointer */
+};
+
+/* descriptor */
+struct talitos_desc {
+ u32 hdr; /* header */
+ u32 res; /* reserved */
+ struct talitos_desc_ptr ptr[7]; /* ptr/len pair array */
+};
+
+/* talitos descriptor header (hdr) bits */
+
+/* primary execution unit select */
+#define TALITOS_SEL0_AFEU 0x10000000
+#define TALITOS_SEL0_DEU 0x20000000
+#define TALITOS_SEL0_MDEU 0x30000000
+#define TALITOS_SEL0_RNG 0x40000000
+#define TALITOS_SEL0_PKEU 0x50000000
+#define TALITOS_SEL0_AESU 0x60000000
+
+/* primary execution unit mode (MODE0) and derivatives */
+#define TALITOS_MODE0_AESU_CBC 0x00200000
+#define TALITOS_MODE0_AESU_ENC 0x00100000
+#define TALITOS_MODE0_DEU_CBC 0x00400000
+#define TALITOS_MODE0_DEU_3DES 0x00200000
+#define TALITOS_MODE0_DEU_ENC 0x00100000
+#define TALITOS_MODE0_MDEU_INIT 0x01000000 /* init starting regs */
+#define TALITOS_MODE0_MDEU_HMAC 0x00800000
+#define TALITOS_MODE0_MDEU_PAD 0x00400000 /* PD */
+#define TALITOS_MODE0_MDEU_MD5 0x00200000
+#define TALITOS_MODE0_MDEU_SHA256 0x00100000
+#define TALITOS_MODE0_MDEU_SHA1 0x00000000 /* SHA-160 */
+#define TALITOS_MODE0_MDEU_MD5_HMAC \
+ (TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
+#define TALITOS_MODE0_MDEU_SHA256_HMAC \
+ (TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
+#define TALITOS_MODE0_MDEU_SHA1_HMAC \
+ (TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
+
+/* secondary execution unit select (SEL1) */
+/* it's MDEU or nothing */
+#define TALITOS_SEL1_MDEU 0x00030000
+
+/* secondary execution unit mode (MODE1) and derivatives */
+#define TALITOS_MODE1_MDEU_INIT 0x00001000 /* init starting regs */
+#define TALITOS_MODE1_MDEU_HMAC 0x00000800
+#define TALITOS_MODE1_MDEU_PAD 0x00000400 /* PD */
+#define TALITOS_MODE1_MDEU_MD5 0x00000200
+#define TALITOS_MODE1_MDEU_SHA256 0x00000100
+#define TALITOS_MODE1_MDEU_SHA1 0x00000000 /* SHA-160 */
+#define TALITOS_MODE1_MDEU_MD5_HMAC \
+ (TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
+#define TALITOS_MODE1_MDEU_SHA256_HMAC \
+ (TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
+#define TALITOS_MODE1_MDEU_SHA1_HMAC \
+ (TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
+
+/* direction of overall data flow (DIR) */
+#define TALITOS_DIR_OUTBOUND 0x00000000
+#define TALITOS_DIR_INBOUND 0x00000002
+
+/* done notification (DN) */
+#define TALITOS_DONE_NOTIFY 0x00000001
+
+/* descriptor types */
+/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
+#define TD_TYPE_AESU_CTR_NONSNOOP (0 << 3)
+#define TD_TYPE_IPSEC_ESP (1 << 3)
+#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU (2 << 3)
+#define TD_TYPE_HMAC_SNOOP_NO_AFEU (4 << 3)
+
+#define TALITOS_HDR_DONE_BITS 0xff000000
+
+#define DPRINTF(a...) do { \
+ if (debug) { \
+ printk("%s: ", sc ? \
+ device_get_nameunit(sc->sc_cdev) : "talitos"); \
+ printk(a); \
+ } \
+ } while (0)
diff --git a/target/linux/generic/files/crypto/ocf/talitos/talitos_soft.h b/target/linux/generic/files/crypto/ocf/talitos/talitos_soft.h
new file mode 100644
index 000000000..eda9c2efe
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/talitos/talitos_soft.h
@@ -0,0 +1,76 @@
+/*
+ * Freescale SEC data structures for integration with ocf-linux
+ *
+ * Copyright (c) 2006 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * paired descriptor and associated crypto operation
+ */
+struct desc_cryptop_pair {
+ struct talitos_desc cf_desc; /* descriptor ptr */
+ struct cryptop *cf_crp; /* cryptop ptr */
+};
+
+/*
+ * Holds data specific to a single talitos device.
+ */
+struct talitos_softc {
+ softc_device_decl sc_cdev;
+ struct platform_device *sc_dev; /* device backpointer */
+ ocf_iomem_t sc_base_addr;
+ int sc_irq;
+ int sc_num; /* if we have multiple chips */
+ int32_t sc_cid; /* crypto tag */
+ u64 sc_chiprev; /* major/minor chip revision */
+ int sc_nsessions;
+ struct talitos_session *sc_sessions;
+ int sc_num_channels;/* number of crypto channels */
+ int sc_chfifo_len; /* channel fetch fifo len */
+ int sc_exec_units; /* execution units mask */
+ int sc_desc_types; /* descriptor types mask */
+ /*
+ * mutual exclusion for intra-channel resources, e.g. fetch fifos
+ * the last entry is a meta-channel lock used by the channel scheduler
+ */
+ spinlock_t *sc_chnfifolock;
+ /* sc_chnlastalgo contains last algorithm for that channel */
+ int *sc_chnlastalg;
+ /* sc_chnfifo holds pending descriptor--crypto operation pairs */
+ struct desc_cryptop_pair **sc_chnfifo;
+};
+
+struct talitos_session {
+ u_int32_t ses_used;
+ u_int32_t ses_klen; /* key length in bits */
+ u_int32_t ses_key[8]; /* DES/3DES/AES key */
+ u_int32_t ses_hmac[5]; /* hmac inner state */
+ u_int32_t ses_hmac_len; /* hmac length */
+ u_int32_t ses_mlen; /* desired hash result len (12=ipsec or 16) */
+};
+
+#define TALITOS_SESSION(sid) ((sid) & 0x0fffffff)
+#define TALITOS_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/Makefile b/target/linux/generic/files/crypto/ocf/ubsec_ssb/Makefile
new file mode 100644
index 000000000..f973efd7b
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/Makefile
@@ -0,0 +1,12 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_UBSEC_SSB) += ubsec_ssb.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h b/target/linux/generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h
new file mode 100644
index 000000000..601055267
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/bsdqueue.h
@@ -0,0 +1,527 @@
+/* $OpenBSD: queue.h,v 1.32 2007/04/30 18:42:34 pedro Exp $ */
+/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _BSD_SYS_QUEUE_H_
+#define _BSD_SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC))
+#define _Q_INVALIDATE(a) (a) = ((void *)-1)
+#else
+#define _Q_INVALIDATE(a)
+#endif
+
+/*
+ * Singly-linked List definitions.
+ */
+#define BSD_SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define BSD_SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define BSD_SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List access methods.
+ */
+#define BSD_SLIST_FIRST(head) ((head)->slh_first)
+#define BSD_SLIST_END(head) NULL
+#define BSD_SLIST_EMPTY(head) (BSD_SLIST_FIRST(head) == BSD_SLIST_END(head))
+#define BSD_SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define BSD_SLIST_FOREACH(var, head, field) \
+ for((var) = BSD_SLIST_FIRST(head); \
+ (var) != BSD_SLIST_END(head); \
+ (var) = BSD_SLIST_NEXT(var, field))
+
+#define BSD_SLIST_FOREACH_PREVPTR(var, varp, head, field) \
+ for ((varp) = &BSD_SLIST_FIRST((head)); \
+ ((var) = *(varp)) != BSD_SLIST_END(head); \
+ (varp) = &BSD_SLIST_NEXT((var), field))
+
+/*
+ * Singly-linked List functions.
+ */
+#define BSD_SLIST_INIT(head) { \
+ BSD_SLIST_FIRST(head) = BSD_SLIST_END(head); \
+}
+
+#define BSD_SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (0)
+
+#define BSD_SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (0)
+
+#define BSD_SLIST_REMOVE_NEXT(head, elm, field) do { \
+ (elm)->field.sle_next = (elm)->field.sle_next->field.sle_next; \
+} while (0)
+
+#define BSD_SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (0)
+
+#define BSD_SLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ BSD_SLIST_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->slh_first; \
+ \
+ while (curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = \
+ curelm->field.sle_next->field.sle_next; \
+ _Q_INVALIDATE((elm)->field.sle_next); \
+ } \
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define BSD_LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define BSD_LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define BSD_LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List access methods
+ */
+#define BSD_LIST_FIRST(head) ((head)->lh_first)
+#define BSD_LIST_END(head) NULL
+#define BSD_LIST_EMPTY(head) (BSD_LIST_FIRST(head) == BSD_LIST_END(head))
+#define BSD_LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define BSD_LIST_FOREACH(var, head, field) \
+ for((var) = BSD_LIST_FIRST(head); \
+ (var)!= BSD_LIST_END(head); \
+ (var) = BSD_LIST_NEXT(var, field))
+
+/*
+ * List functions.
+ */
+#define BSD_LIST_INIT(head) do { \
+ BSD_LIST_FIRST(head) = BSD_LIST_END(head); \
+} while (0)
+
+#define BSD_LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (0)
+
+#define BSD_LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define BSD_LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define BSD_LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ _Q_INVALIDATE((elm)->field.le_prev); \
+ _Q_INVALIDATE((elm)->field.le_next); \
+} while (0)
+
+#define BSD_LIST_REPLACE(elm, elm2, field) do { \
+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+ (elm2)->field.le_next->field.le_prev = \
+ &(elm2)->field.le_next; \
+ (elm2)->field.le_prev = (elm)->field.le_prev; \
+ *(elm2)->field.le_prev = (elm2); \
+ _Q_INVALIDATE((elm)->field.le_prev); \
+ _Q_INVALIDATE((elm)->field.le_next); \
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define BSD_SIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define BSD_SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define BSD_SIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define BSD_SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define BSD_SIMPLEQ_END(head) NULL
+#define BSD_SIMPLEQ_EMPTY(head) (BSD_SIMPLEQ_FIRST(head) == BSD_SIMPLEQ_END(head))
+#define BSD_SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+#define BSD_SIMPLEQ_FOREACH(var, head, field) \
+ for((var) = BSD_SIMPLEQ_FIRST(head); \
+ (var) != BSD_SIMPLEQ_END(head); \
+ (var) = BSD_SIMPLEQ_NEXT(var, field))
+
+/*
+ * Simple queue functions.
+ */
+#define BSD_SIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (0)
+
+#define BSD_SIMPLEQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define BSD_TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define BSD_TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define BSD_TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/*
+ * tail queue access methods
+ */
+#define BSD_TAILQ_FIRST(head) ((head)->tqh_first)
+#define BSD_TAILQ_END(head) NULL
+#define BSD_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define BSD_TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define BSD_TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define BSD_TAILQ_EMPTY(head) \
+ (BSD_TAILQ_FIRST(head) == BSD_TAILQ_END(head))
+
+#define BSD_TAILQ_FOREACH(var, head, field) \
+ for((var) = BSD_TAILQ_FIRST(head); \
+ (var) != BSD_TAILQ_END(head); \
+ (var) = BSD_TAILQ_NEXT(var, field))
+
+#define BSD_TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for((var) = BSD_TAILQ_LAST(head, headname); \
+ (var) != BSD_TAILQ_END(head); \
+ (var) = BSD_TAILQ_PREV(var, headname, field))
+
+/*
+ * Tail queue functions.
+ */
+#define BSD_TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define BSD_TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define BSD_TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define BSD_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define BSD_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define BSD_TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+ _Q_INVALIDATE((elm)->field.tqe_prev); \
+ _Q_INVALIDATE((elm)->field.tqe_next); \
+} while (0)
+
+#define BSD_TAILQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
+ (elm2)->field.tqe_next->field.tqe_prev = \
+ &(elm2)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm2)->field.tqe_next; \
+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
+ *(elm2)->field.tqe_prev = (elm2); \
+ _Q_INVALIDATE((elm)->field.tqe_prev); \
+ _Q_INVALIDATE((elm)->field.tqe_next); \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define BSD_CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define BSD_CIRCLEQ_HEAD_INITIALIZER(head) \
+ { BSD_CIRCLEQ_END(&head), BSD_CIRCLEQ_END(&head) }
+
+#define BSD_CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue access methods
+ */
+#define BSD_CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define BSD_CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define BSD_CIRCLEQ_END(head) ((void *)(head))
+#define BSD_CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define BSD_CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+#define BSD_CIRCLEQ_EMPTY(head) \
+ (BSD_CIRCLEQ_FIRST(head) == BSD_CIRCLEQ_END(head))
+
+#define BSD_CIRCLEQ_FOREACH(var, head, field) \
+ for((var) = BSD_CIRCLEQ_FIRST(head); \
+ (var) != BSD_CIRCLEQ_END(head); \
+ (var) = BSD_CIRCLEQ_NEXT(var, field))
+
+#define BSD_CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for((var) = BSD_CIRCLEQ_LAST(head); \
+ (var) != BSD_CIRCLEQ_END(head); \
+ (var) = BSD_CIRCLEQ_PREV(var, field))
+
+/*
+ * Circular queue functions.
+ */
+#define BSD_CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = BSD_CIRCLEQ_END(head); \
+ (head)->cqh_last = BSD_CIRCLEQ_END(head); \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == BSD_CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == BSD_CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = BSD_CIRCLEQ_END(head); \
+ if ((head)->cqh_last == BSD_CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = BSD_CIRCLEQ_END(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == BSD_CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define BSD_CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == BSD_CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == BSD_CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+ _Q_INVALIDATE((elm)->field.cqe_prev); \
+ _Q_INVALIDATE((elm)->field.cqe_next); \
+} while (0)
+
+#define BSD_CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
+ BSD_CIRCLEQ_END(head)) \
+ (head).cqh_last = (elm2); \
+ else \
+ (elm2)->field.cqe_next->field.cqe_prev = (elm2); \
+ if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
+ BSD_CIRCLEQ_END(head)) \
+ (head).cqh_first = (elm2); \
+ else \
+ (elm2)->field.cqe_prev->field.cqe_next = (elm2); \
+ _Q_INVALIDATE((elm)->field.cqe_prev); \
+ _Q_INVALIDATE((elm)->field.cqe_next); \
+} while (0)
+
+#endif /* !_BSD_SYS_QUEUE_H_ */
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c
new file mode 100644
index 000000000..f5d776236
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsec_ssb.c
@@ -0,0 +1,2220 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2007 David McCullough (david_mccullough@securecomputing.com)
+ * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
+ * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+#undef UBSEC_DEBUG
+#undef UBSEC_VERBOSE_DEBUG
+
+#ifdef UBSEC_VERBOSE_DEBUG
+#define UBSEC_DEBUG
+#endif
+
+/*
+ * uBsec BCM5365 hardware crypto accelerator
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/stat.h>
+#include <asm/io.h>
+
+#include <linux/ssb/ssb.h>
+
+/*
+ * BSD queue
+ */
+//#include "bsdqueue.h"
+
+/*
+ * OCF
+ */
+#include <cryptodev.h>
+#include <uio.h>
+
+#define HMAC_HACK 1
+
+#define HMAC_HACK 1
+#ifdef HMAC_HACK
+#include <safe/hmachack.h>
+#include <safe/md5.h>
+#include <safe/md5.c>
+#include <safe/sha1.h>
+#include <safe/sha1.c>
+#endif
+
+#include "bsdqueue.h"
+#include "ubsecreg.h"
+#include "ubsecvar.h"
+
+#define DRV_MODULE_NAME "ubsec_ssb"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "0.02"
+#define DRV_MODULE_RELDATE "Feb 21, 2009"
+
+#if 1
+#define DPRINTF(a...) \
+ if (debug) \
+ { \
+ printk(DRV_MODULE_NAME ": " a); \
+ }
+#else
+#define DPRINTF(a...)
+#endif
+
+/*
+ * Prototypes
+ */
+static irqreturn_t ubsec_ssb_isr(int, void *, struct pt_regs *);
+static int __devinit ubsec_ssb_probe(struct ssb_device *sdev,
+ const struct ssb_device_id *ent);
+static void __devexit ubsec_ssb_remove(struct ssb_device *sdev);
+int ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
+ struct device *self);
+static void ubsec_setup_mackey(struct ubsec_session *ses, int algo,
+ caddr_t key, int klen);
+static int dma_map_skb(struct ubsec_softc *sc,
+ struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen);
+static int dma_map_uio(struct ubsec_softc *sc,
+ struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen);
+static void dma_unmap(struct ubsec_softc *sc,
+ struct ubsec_dma_alloc *q_map, int mlen);
+static int ubsec_dmamap_aligned(struct ubsec_softc *sc,
+ const struct ubsec_dma_alloc *q_map, int mlen);
+
+#ifdef UBSEC_DEBUG
+static int proc_read(char *buf, char **start, off_t offset,
+ int size, int *peof, void *data);
+#endif
+
+void ubsec_reset_board(struct ubsec_softc *);
+void ubsec_init_board(struct ubsec_softc *);
+void ubsec_cleanchip(struct ubsec_softc *);
+void ubsec_totalreset(struct ubsec_softc *);
+int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *);
+
+static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int ubsec_freesession(device_t, u_int64_t);
+static int ubsec_process(device_t, struct cryptop *, int);
+
+void ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
+void ubsec_feed(struct ubsec_softc *);
+void ubsec_mcopy(struct sk_buff *, struct sk_buff *, int, int);
+void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
+int ubsec_dma_malloc(struct ubsec_softc *, struct ubsec_dma_alloc *,
+ size_t, int);
+
+/* DEBUG crap... */
+void ubsec_dump_pb(struct ubsec_pktbuf *);
+void ubsec_dump_mcr(struct ubsec_mcr *);
+
+#define READ_REG(sc,r) \
+ ssb_read32((sc)->sdev, (r));
+#define WRITE_REG(sc,r,val) \
+ ssb_write32((sc)->sdev, (r), (val));
+#define READ_REG_SDEV(sdev,r) \
+ ssb_read32((sdev), (r));
+#define WRITE_REG_SDEV(sdev,r,val) \
+ ssb_write32((sdev), (r), (val));
+
+#define SWAP32(x) (x) = htole32(ntohl((x)))
+#define HTOLE32(x) (x) = htole32(x)
+
+#ifdef __LITTLE_ENDIAN
+#define letoh16(x) (x)
+#define letoh32(x) (x)
+#endif
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable debug output");
+
+#define UBSEC_SSB_MAX_CHIPS 1
+static struct ubsec_softc *ubsec_chip_idx[UBSEC_SSB_MAX_CHIPS];
+static struct ubsec_stats ubsecstats;
+
+#ifdef UBSEC_DEBUG
+static struct proc_dir_entry *procdebug;
+#endif
+
+static struct ssb_device_id ubsec_ssb_tbl[] = {
+ /* Broadcom BCM5365P IPSec Core */
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_IPSEC, SSB_ANY_REV),
+ SSB_DEVTABLE_END
+};
+
+static struct ssb_driver ubsec_ssb_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = ubsec_ssb_tbl,
+ .probe = ubsec_ssb_probe,
+ .remove = __devexit_p(ubsec_ssb_remove),
+ /*
+ .suspend = ubsec_ssb_suspend,
+ .resume = ubsec_ssb_resume
+ */
+};
+
+static device_method_t ubsec_ssb_methods = {
+ /* crypto device methods */
+ DEVMETHOD(cryptodev_newsession, ubsec_newsession),
+ DEVMETHOD(cryptodev_freesession,ubsec_freesession),
+ DEVMETHOD(cryptodev_process, ubsec_process),
+};
+
+#ifdef UBSEC_DEBUG
+static int
+proc_read(char *buf, char **start, off_t offset,
+ int size, int *peof, void *data)
+{
+ int i = 0, byteswritten = 0, ret;
+ unsigned int stat, ctrl;
+#ifdef UBSEC_VERBOSE_DEBUG
+ struct ubsec_q *q;
+ struct ubsec_dma *dmap;
+#endif
+
+ while ((i < UBSEC_SSB_MAX_CHIPS) && (ubsec_chip_idx[i] != NULL))
+ {
+ struct ubsec_softc *sc = ubsec_chip_idx[i];
+
+ stat = READ_REG(sc, BS_STAT);
+ ctrl = READ_REG(sc, BS_CTRL);
+ ret = snprintf((buf + byteswritten),
+ (size - byteswritten) ,
+ "DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
+
+ byteswritten += ret;
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ printf("DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
+
+ /* Dump all queues MCRs */
+ if (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+ BSD_SIMPLEQ_FOREACH(q, &sc->sc_qchip, q_next)
+ {
+ dmap = q->q_dma;
+ ubsec_dump_mcr(&dmap->d_dma->d_mcr);
+ }
+ }
+#endif
+
+ i++;
+ }
+
+ *peof = 1;
+
+ return byteswritten;
+}
+#endif
+
+/*
+ * map in a given sk_buff
+ */
+static int
+dma_map_skb(struct ubsec_softc *sc, struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen)
+{
+ int i = 0;
+ dma_addr_t tmp;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ /*
+ * We support only a limited number of fragments.
+ */
+ if (unlikely((skb_shinfo(skb)->nr_frags + 1) >= UBS_MAX_SCATTER))
+ {
+ printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
+ return (-ENOMEM);
+ }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, 0, (unsigned int)skb->data, skb_headlen(skb));
+#endif
+
+ /* first data package */
+ tmp = dma_map_single(sc->sc_dv,
+ skb->data,
+ skb_headlen(skb),
+ DMA_BIDIRECTIONAL);
+
+ q_map[i].dma_paddr = tmp;
+ q_map[i].dma_vaddr = skb->data;
+ q_map[i].dma_size = skb_headlen(skb);
+
+ if (unlikely(tmp == 0))
+ {
+ printk(KERN_ERR "Could not map memory region for dma.\n");
+ return (-EINVAL);
+ }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, 0, (unsigned int)tmp);
+#endif
+
+
+ /* all other data packages */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, i + 1,
+ (unsigned int)page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+ skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].size);
+#endif
+
+ tmp = dma_map_single(sc->sc_dv,
+ page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+ skb_shinfo(skb)->frags[i].page_offset,
+ skb_shinfo(skb)->frags[i].size,
+ DMA_BIDIRECTIONAL);
+
+ q_map[i + 1].dma_paddr = tmp;
+ q_map[i + 1].dma_vaddr = (void*)(page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+ skb_shinfo(skb)->frags[i].page_offset);
+ q_map[i + 1].dma_size = skb_shinfo(skb)->frags[i].size;
+
+ if (unlikely(tmp == 0))
+ {
+ printk(KERN_ERR "Could not map memory region for dma.\n");
+ return (-EINVAL);
+ }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, i + 1, (unsigned int)tmp);
+#endif
+
+ }
+ *mlen = i + 1;
+
+ return(0);
+}
+
+/*
+ * map in a given uio buffer
+ */
+
+static int
+dma_map_uio(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen)
+{
+ struct iovec *iov = uio->uio_iov;
+ int n;
+ dma_addr_t tmp;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ /*
+ * We support only a limited number of fragments.
+ */
+ if (unlikely(uio->uio_iovcnt >= UBS_MAX_SCATTER))
+ {
+ printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
+ return (-ENOMEM);
+ }
+
+ for (n = 0; n < uio->uio_iovcnt; n++) {
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, n, (unsigned int)iov->iov_base, iov->iov_len);
+#endif
+ tmp = dma_map_single(sc->sc_dv,
+ iov->iov_base,
+ iov->iov_len,
+ DMA_BIDIRECTIONAL);
+
+ q_map[n].dma_paddr = tmp;
+ q_map[n].dma_vaddr = iov->iov_base;
+ q_map[n].dma_size = iov->iov_len;
+
+ if (unlikely(tmp == 0))
+ {
+ printk(KERN_ERR "Could not map memory region for dma.\n");
+ return (-EINVAL);
+ }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, n, (unsigned int)tmp);
+#endif
+
+ iov++;
+ }
+ *mlen = n;
+
+ return(0);
+}
+
+static void
+dma_unmap(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, int mlen)
+{
+ int i;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ for(i = 0; i < mlen; i++)
+ {
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, (unsigned int)q_map[i].dma_paddr, q_map[i].dma_size);
+#endif
+ dma_unmap_single(sc->sc_dv,
+ q_map[i].dma_paddr,
+ q_map[i].dma_size,
+ DMA_BIDIRECTIONAL);
+ }
+ return;
+}
+
+/*
+ * Is the operand suitable aligned for direct DMA. Each
+ * segment must be aligned on a 32-bit boundary and all
+ * but the last segment must be a multiple of 4 bytes.
+ */
+static int
+ubsec_dmamap_aligned(struct ubsec_softc *sc, const struct ubsec_dma_alloc *q_map, int mlen)
+{
+ int i;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ for (i = 0; i < mlen; i++) {
+ if (q_map[i].dma_paddr & 3)
+ return (0);
+ if (i != (mlen - 1) && (q_map[i].dma_size & 3))
+ return (0);
+ }
+ return (1);
+}
+
+
+#define N(a) (sizeof(a) / sizeof (a[0]))
+static void
+ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen)
+{
+#ifdef HMAC_HACK
+ MD5_CTX md5ctx;
+ SHA1_CTX sha1ctx;
+ int i;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ for (i = 0; i < klen; i++)
+ key[i] ^= HMAC_IPAD_VAL;
+
+ if (algo == CRYPTO_MD5_HMAC) {
+ MD5Init(&md5ctx);
+ MD5Update(&md5ctx, key, klen);
+ MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+ bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
+ } else {
+ SHA1Init(&sha1ctx);
+ SHA1Update(&sha1ctx, key, klen);
+ SHA1Update(&sha1ctx, hmac_ipad_buffer,
+ SHA1_HMAC_BLOCK_LEN - klen);
+ bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
+ }
+
+ for (i = 0; i < klen; i++)
+ key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
+
+ if (algo == CRYPTO_MD5_HMAC) {
+ MD5Init(&md5ctx);
+ MD5Update(&md5ctx, key, klen);
+ MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+ bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
+ } else {
+ SHA1Init(&sha1ctx);
+ SHA1Update(&sha1ctx, key, klen);
+ SHA1Update(&sha1ctx, hmac_opad_buffer,
+ SHA1_HMAC_BLOCK_LEN - klen);
+ bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
+ }
+
+ for (i = 0; i < klen; i++)
+ key[i] ^= HMAC_OPAD_VAL;
+
+#else /* HMAC_HACK */
+ DPRINTF("md5/sha not implemented\n");
+#endif /* HMAC_HACK */
+}
+#undef N
+
+static int
+__devinit ubsec_ssb_probe(struct ssb_device *sdev,
+ const struct ssb_device_id *ent)
+{
+ int err;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ err = ssb_bus_powerup(sdev->bus, 0);
+ if (err) {
+ dev_err(sdev->dev, "Failed to powerup the bus\n");
+ goto err_out;
+ }
+
+ err = request_irq(sdev->irq, (irq_handler_t)ubsec_ssb_isr,
+ IRQF_DISABLED | IRQF_SHARED, DRV_MODULE_NAME, sdev);
+ if (err) {
+ dev_err(sdev->dev, "Could not request irq\n");
+ goto err_out_powerdown;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+ err = dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(32));
+#else
+ err = ssb_dma_set_mask(sdev, DMA_32BIT_MASK);
+#endif
+ if (err) {
+ dev_err(sdev->dev,
+ "Required 32BIT DMA mask unsupported by the system.\n");
+ goto err_out_free_irq;
+ }
+
+ printk(KERN_INFO "Sentry5(tm) ROBOGateway(tm) IPSec Core at IRQ %u\n",
+ sdev->irq);
+
+ DPRINTF("Vendor: %x, core id: %x, revision: %x\n",
+ sdev->id.vendor, sdev->id.coreid, sdev->id.revision);
+
+ ssb_device_enable(sdev, 0);
+
+ if (ubsec_attach(sdev, ent, sdev->dev) != 0)
+ goto err_out_disable;
+
+#ifdef UBSEC_DEBUG
+ procdebug = create_proc_entry(DRV_MODULE_NAME, S_IRUSR, NULL);
+ if (procdebug)
+ {
+ procdebug->read_proc = proc_read;
+ procdebug->data = NULL;
+ } else
+ DPRINTF("Unable to create proc file.\n");
+#endif
+
+ return 0;
+
+err_out_disable:
+ ssb_device_disable(sdev, 0);
+
+err_out_free_irq:
+ free_irq(sdev->irq, sdev);
+
+err_out_powerdown:
+ ssb_bus_may_powerdown(sdev->bus);
+
+err_out:
+ return err;
+}
+
+static void __devexit ubsec_ssb_remove(struct ssb_device *sdev) {
+
+ struct ubsec_softc *sc;
+ unsigned int ctrlflgs;
+ struct ubsec_dma *dmap;
+ u_int32_t i;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ ctrlflgs = READ_REG_SDEV(sdev, BS_CTRL);
+ /* disable all IPSec Core interrupts globally */
+ ctrlflgs ^= (BS_CTRL_MCR1INT | BS_CTRL_MCR2INT |
+ BS_CTRL_DMAERR);
+ WRITE_REG_SDEV(sdev, BS_CTRL, ctrlflgs);
+
+ free_irq(sdev->irq, sdev);
+
+ sc = (struct ubsec_softc *)ssb_get_drvdata(sdev);
+
+ /* unregister all crypto algorithms */
+ crypto_unregister_all(sc->sc_cid);
+
+ /* Free queue / dma memory */
+ for (i = 0; i < UBS_MAX_NQUEUE; i++) {
+ struct ubsec_q *q;
+
+ q = sc->sc_queuea[i];
+ if (q != NULL)
+ {
+ dmap = q->q_dma;
+ if (dmap != NULL)
+ {
+ ubsec_dma_free(sc, &dmap->d_alloc);
+ q->q_dma = NULL;
+ }
+ kfree(q);
+ }
+ sc->sc_queuea[i] = NULL;
+ }
+
+ ssb_device_disable(sdev, 0);
+ ssb_bus_may_powerdown(sdev->bus);
+ ssb_set_drvdata(sdev, NULL);
+
+#ifdef UBSEC_DEBUG
+ if (procdebug)
+ remove_proc_entry(DRV_MODULE_NAME, NULL);
+#endif
+
+}
+
+
+int
+ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
+ struct device *self)
+{
+ struct ubsec_softc *sc = NULL;
+ struct ubsec_dma *dmap;
+ u_int32_t i;
+ static int num_chips = 0;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ sc = (struct ubsec_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return(-ENOMEM);
+ memset(sc, 0, sizeof(*sc));
+
+ sc->sc_dv = sdev->dev;
+ sc->sdev = sdev;
+
+ spin_lock_init(&sc->sc_ringmtx);
+
+ softc_device_init(sc, "ubsec_ssb", num_chips, ubsec_ssb_methods);
+
+ /* Maybe someday there are boards with more than one chip available */
+ if (num_chips < UBSEC_SSB_MAX_CHIPS) {
+ ubsec_chip_idx[device_get_unit(sc->sc_dev)] = sc;
+ num_chips++;
+ }
+
+ ssb_set_drvdata(sdev, sc);
+
+ BSD_SIMPLEQ_INIT(&sc->sc_queue);
+ BSD_SIMPLEQ_INIT(&sc->sc_qchip);
+ BSD_SIMPLEQ_INIT(&sc->sc_queue2);
+ BSD_SIMPLEQ_INIT(&sc->sc_qchip2);
+ BSD_SIMPLEQ_INIT(&sc->sc_q2free);
+
+ sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR;
+
+ sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid < 0) {
+ device_printf(sc->sc_dev, "could not get crypto driver id\n");
+ return -1;
+ }
+
+ BSD_SIMPLEQ_INIT(&sc->sc_freequeue);
+ dmap = sc->sc_dmaa;
+ for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
+ struct ubsec_q *q;
+
+ q = (struct ubsec_q *)kmalloc(sizeof(struct ubsec_q), GFP_KERNEL);
+ if (q == NULL) {
+ printf(": can't allocate queue buffers\n");
+ break;
+ }
+
+ if (ubsec_dma_malloc(sc, &dmap->d_alloc, sizeof(struct ubsec_dmachunk),0)) {
+ printf(": can't allocate dma buffers\n");
+ kfree(q);
+ break;
+ }
+ dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
+
+ q->q_dma = dmap;
+ sc->sc_queuea[i] = q;
+
+ BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+ }
+
+ /*
+ * Reset Broadcom chip
+ */
+ ubsec_reset_board(sc);
+
+ /*
+ * Init Broadcom chip
+ */
+ ubsec_init_board(sc);
+
+ /* supported crypto algorithms */
+ crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+
+ if (sc->sc_flags & UBS_FLAGS_AES) {
+ crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+ printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES AES128 AES192 AES256 MD5_HMAC SHA1_HMAC\n");
+ }
+ else
+ printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES MD5_HMAC SHA1_HMAC\n");
+
+ crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+ crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+
+ return 0;
+}
+
+/*
+ * UBSEC Interrupt routine
+ */
+static irqreturn_t
+ubsec_ssb_isr(int irq, void *arg, struct pt_regs *regs)
+{
+ struct ubsec_softc *sc = NULL;
+ volatile u_int32_t stat;
+ struct ubsec_q *q;
+ struct ubsec_dma *dmap;
+ int npkts = 0, i;
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ sc = (struct ubsec_softc *)ssb_get_drvdata(arg);
+
+ stat = READ_REG(sc, BS_STAT);
+
+ stat &= sc->sc_statmask;
+ if (stat == 0)
+ return IRQ_NONE;
+
+ WRITE_REG(sc, BS_STAT, stat); /* IACK */
+
+ /*
+ * Check to see if we have any packets waiting for us
+ */
+ if ((stat & BS_STAT_MCR1_DONE)) {
+ while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+ q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
+ dmap = q->q_dma;
+
+ if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
+ {
+ DPRINTF("error while processing MCR. Flags = %x\n", dmap->d_dma->d_mcr.mcr_flags);
+ break;
+ }
+
+ BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
+
+ npkts = q->q_nstacked_mcrs;
+ /*
+ * search for further sc_qchip ubsec_q's that share
+ * the same MCR, and complete them too, they must be
+ * at the top.
+ */
+ for (i = 0; i < npkts; i++) {
+ if(q->q_stacked_mcr[i])
+ ubsec_callback(sc, q->q_stacked_mcr[i]);
+ else
+ break;
+ }
+ ubsec_callback(sc, q);
+ }
+
+ /*
+ * Don't send any more packet to chip if there has been
+ * a DMAERR.
+ */
+ if (likely(!(stat & BS_STAT_DMAERR)))
+ ubsec_feed(sc);
+ else
+ DPRINTF("DMA error occurred. Stop feeding crypto chip.\n");
+ }
+
+ /*
+ * Check to see if we got any DMA Error
+ */
+ if (stat & BS_STAT_DMAERR) {
+ volatile u_int32_t a = READ_REG(sc, BS_ERR);
+
+ printf(KERN_ERR "%s: dmaerr %s@%08x\n", DRV_MODULE_NAME,
+ (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR);
+
+ ubsecstats.hst_dmaerr++;
+ ubsec_totalreset(sc);
+ ubsec_feed(sc);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ubsec_feed() - aggregate and post requests to chip
+ * It is assumed that the caller set splnet()
+ */
+void
+ubsec_feed(struct ubsec_softc *sc)
+{
+#ifdef UBSEC_VERBOSE_DEBUG
+ static int max;
+#endif
+ struct ubsec_q *q, *q2;
+ int npkts, i;
+ void *v;
+ u_int32_t stat;
+
+ npkts = sc->sc_nqueue;
+ if (npkts > UBS_MAX_AGGR)
+ npkts = UBS_MAX_AGGR;
+ if (npkts < 2)
+ goto feed1;
+
+ stat = READ_REG(sc, BS_STAT);
+
+ if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
+ if(stat & BS_STAT_DMAERR) {
+ ubsec_totalreset(sc);
+ ubsecstats.hst_dmaerr++;
+ }
+ return;
+ }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("merging %d records\n", npkts);
+
+ /* XXX temporary aggregation statistics reporting code */
+ if (max < npkts) {
+ max = npkts;
+ DPRINTF("%s: new max aggregate %d\n", DRV_MODULE_NAME, max);
+ }
+#endif /* UBSEC_VERBOSE_DEBUG */
+
+ q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+ BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+ --sc->sc_nqueue;
+
+#if 0
+ /*
+ * XXX
+ * We use dma_map_single() - no sync required!
+ */
+
+ bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+ 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+ if (q->q_dst_map != NULL)
+ bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+ 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+#endif
+
+ q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */
+
+ for (i = 0; i < q->q_nstacked_mcrs; i++) {
+ q2 = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+
+#if 0
+ bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
+ 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+ if (q2->q_dst_map != NULL)
+ bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
+ 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+#endif
+ BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+ --sc->sc_nqueue;
+
+ v = ((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) -
+ sizeof(struct ubsec_mcr_add);
+ bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
+ q->q_stacked_mcr[i] = q2;
+ }
+ q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
+ BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
+#if 0
+ bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
+ 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#endif
+ WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_mcr));
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("feed (1): q->chip %p %08x %08x\n", q,
+ (u_int32_t)q->q_dma->d_alloc.dma_paddr,
+ (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_mcr)));
+#endif /* UBSEC_DEBUG */
+ return;
+
+feed1:
+ while (!BSD_SIMPLEQ_EMPTY(&sc->sc_queue)) {
+ stat = READ_REG(sc, BS_STAT);
+
+ if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
+ if(stat & BS_STAT_DMAERR) {
+ ubsec_totalreset(sc);
+ ubsecstats.hst_dmaerr++;
+ }
+ break;
+ }
+
+ q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+
+#if 0
+ bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+ 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+ if (q->q_dst_map != NULL)
+ bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+ 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+ bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
+ 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#endif
+
+ WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_mcr));
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("feed (2): q->chip %p %08x %08x\n", q,
+ (u_int32_t)q->q_dma->d_alloc.dma_paddr,
+ (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_mcr)));
+#endif /* UBSEC_DEBUG */
+ BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+ --sc->sc_nqueue;
+ BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
+ }
+}
+
+/*
+ * Allocate a new 'session' and return an encoded session id. 'sidp'
+ * contains our registration id, and should contain an encoded session
+ * id on successful allocation.
+ */
+static int
+ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+ struct cryptoini *c, *encini = NULL, *macini = NULL;
+ struct ubsec_softc *sc = NULL;
+ struct ubsec_session *ses = NULL;
+ int sesn, i;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ if (sidp == NULL || cri == NULL)
+ return (EINVAL);
+
+ sc = device_get_softc(dev);
+
+ if (sc == NULL)
+ return (EINVAL);
+
+ for (c = cri; c != NULL; c = c->cri_next) {
+ if (c->cri_alg == CRYPTO_MD5_HMAC ||
+ c->cri_alg == CRYPTO_SHA1_HMAC) {
+ if (macini)
+ return (EINVAL);
+ macini = c;
+ } else if (c->cri_alg == CRYPTO_DES_CBC ||
+ c->cri_alg == CRYPTO_3DES_CBC ||
+ c->cri_alg == CRYPTO_AES_CBC) {
+ if (encini)
+ return (EINVAL);
+ encini = c;
+ } else
+ return (EINVAL);
+ }
+ if (encini == NULL && macini == NULL)
+ return (EINVAL);
+
+ if (sc->sc_sessions == NULL) {
+ ses = sc->sc_sessions = (struct ubsec_session *)kmalloc(
+ sizeof(struct ubsec_session), SLAB_ATOMIC);
+ if (ses == NULL)
+ return (ENOMEM);
+ memset(ses, 0, sizeof(struct ubsec_session));
+ sesn = 0;
+ sc->sc_nsessions = 1;
+ } else {
+ for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+ if (sc->sc_sessions[sesn].ses_used == 0) {
+ ses = &sc->sc_sessions[sesn];
+ break;
+ }
+ }
+
+ if (ses == NULL) {
+ sesn = sc->sc_nsessions;
+ ses = (struct ubsec_session *)kmalloc((sesn + 1) *
+ sizeof(struct ubsec_session), SLAB_ATOMIC);
+ if (ses == NULL)
+ return (ENOMEM);
+ memset(ses, 0, (sesn + 1) * sizeof(struct ubsec_session));
+ bcopy(sc->sc_sessions, ses, sesn *
+ sizeof(struct ubsec_session));
+ bzero(sc->sc_sessions, sesn *
+ sizeof(struct ubsec_session));
+ kfree(sc->sc_sessions);
+ sc->sc_sessions = ses;
+ ses = &sc->sc_sessions[sesn];
+ sc->sc_nsessions++;
+ }
+ }
+
+ bzero(ses, sizeof(struct ubsec_session));
+ ses->ses_used = 1;
+ if (encini) {
+ /* get an IV */
+ /* XXX may read fewer than requested */
+ read_random(ses->ses_iv, sizeof(ses->ses_iv));
+
+ /* Go ahead and compute key in ubsec's byte order */
+ if (encini->cri_alg == CRYPTO_DES_CBC) {
+ /* DES uses the same key three times:
+ * 1st encrypt -> 2nd decrypt -> 3nd encrypt */
+ bcopy(encini->cri_key, &ses->ses_key[0], 8);
+ bcopy(encini->cri_key, &ses->ses_key[2], 8);
+ bcopy(encini->cri_key, &ses->ses_key[4], 8);
+ ses->ses_keysize = 192; /* Fake! Actually its only 64bits ..
+ oh no it is even less: 54bits. */
+ } else if(encini->cri_alg == CRYPTO_3DES_CBC) {
+ bcopy(encini->cri_key, ses->ses_key, 24);
+ ses->ses_keysize = 192;
+ } else if(encini->cri_alg == CRYPTO_AES_CBC) {
+ ses->ses_keysize = encini->cri_klen;
+
+ if (ses->ses_keysize != 128 &&
+ ses->ses_keysize != 192 &&
+ ses->ses_keysize != 256)
+ {
+ DPRINTF("unsupported AES key size: %d\n", ses->ses_keysize);
+ return (EINVAL);
+ }
+ bcopy(encini->cri_key, ses->ses_key, (ses->ses_keysize / 8));
+ }
+
+ /* Hardware requires the keys in little endian byte order */
+ for (i=0; i < (ses->ses_keysize / 32); i++)
+ SWAP32(ses->ses_key[i]);
+ }
+
+ if (macini) {
+ ses->ses_mlen = macini->cri_mlen;
+
+ if (ses->ses_mlen == 0 ||
+ ses->ses_mlen > SHA1_HASH_LEN) {
+
+ if (macini->cri_alg == CRYPTO_MD5_HMAC ||
+ macini->cri_alg == CRYPTO_SHA1_HMAC)
+ {
+ ses->ses_mlen = DEFAULT_HMAC_LEN;
+ } else
+ {
+ /*
+ * Reserved for future usage. MD5/SHA1 calculations have
+ * different hash sizes.
+ */
+ printk(KERN_ERR DRV_MODULE_NAME ": unsupported hash operation with mac/hash len: %d\n", ses->ses_mlen);
+ return (EINVAL);
+ }
+
+ }
+
+ if (macini->cri_key != NULL) {
+ ubsec_setup_mackey(ses, macini->cri_alg, macini->cri_key,
+ macini->cri_klen / 8);
+ }
+ }
+
+ *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn);
+ return (0);
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+ubsec_freesession(device_t dev, u_int64_t tid)
+{
+ struct ubsec_softc *sc = device_get_softc(dev);
+ int session;
+ u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ if (sc == NULL)
+ return (EINVAL);
+
+ session = UBSEC_SESSION(sid);
+ if (session < sc->sc_nsessions) {
+ bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
+ return (0);
+ } else
+ return (EINVAL);
+}
+
+static int
+ubsec_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct ubsec_q *q = NULL;
+ int err = 0, i, j, nicealign;
+ struct ubsec_softc *sc = device_get_softc(dev);
+ struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+ int encoffset = 0, macoffset = 0, cpskip, cpoffset;
+ int sskip, dskip, stheend, dtheend, ivsize = 8;
+ int16_t coffset;
+ struct ubsec_session *ses;
+ struct ubsec_generic_ctx ctx;
+ struct ubsec_dma *dmap = NULL;
+ unsigned long flags;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ if (unlikely(crp == NULL || crp->crp_callback == NULL)) {
+ ubsecstats.hst_invalid++;
+ return (EINVAL);
+ }
+
+ if (unlikely(sc == NULL))
+ return (EINVAL);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("spin_lock_irqsave\n");
+#endif
+ spin_lock_irqsave(&sc->sc_ringmtx, flags);
+ //spin_lock_irq(&sc->sc_ringmtx);
+
+ if (BSD_SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
+ ubsecstats.hst_queuefull++;
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("spin_unlock_irqrestore\n");
+#endif
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+ //spin_unlock_irq(&sc->sc_ringmtx);
+ err = ENOMEM;
+ goto errout2;
+ }
+
+ q = BSD_SIMPLEQ_FIRST(&sc->sc_freequeue);
+ BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("spin_unlock_irqrestore\n");
+#endif
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+ //spin_unlock_irq(&sc->sc_ringmtx);
+
+ dmap = q->q_dma; /* Save dma pointer */
+ bzero(q, sizeof(struct ubsec_q));
+ bzero(&ctx, sizeof(ctx));
+
+ q->q_sesn = UBSEC_SESSION(crp->crp_sid);
+ q->q_dma = dmap;
+ ses = &sc->sc_sessions[q->q_sesn];
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ q->q_src_m = (struct sk_buff *)crp->crp_buf;
+ q->q_dst_m = (struct sk_buff *)crp->crp_buf;
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ q->q_src_io = (struct uio *)crp->crp_buf;
+ q->q_dst_io = (struct uio *)crp->crp_buf;
+ } else {
+ err = EINVAL;
+ goto errout; /* XXX we don't handle contiguous blocks! */
+ }
+
+ bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
+
+ dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
+ dmap->d_dma->d_mcr.mcr_flags = 0;
+ q->q_crp = crp;
+
+ crd1 = crp->crp_desc;
+ if (crd1 == NULL) {
+ err = EINVAL;
+ goto errout;
+ }
+ crd2 = crd1->crd_next;
+
+ if (crd2 == NULL) {
+ if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1_HMAC) {
+ maccrd = crd1;
+ enccrd = NULL;
+ } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+ crd1->crd_alg == CRYPTO_3DES_CBC ||
+ crd1->crd_alg == CRYPTO_AES_CBC) {
+ maccrd = NULL;
+ enccrd = crd1;
+ } else {
+ err = EINVAL;
+ goto errout;
+ }
+ } else {
+ if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+ crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
+ (crd2->crd_alg == CRYPTO_DES_CBC ||
+ crd2->crd_alg == CRYPTO_3DES_CBC ||
+ crd2->crd_alg == CRYPTO_AES_CBC) &&
+ ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+ maccrd = crd1;
+ enccrd = crd2;
+ } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+ crd1->crd_alg == CRYPTO_3DES_CBC ||
+ crd1->crd_alg == CRYPTO_AES_CBC) &&
+ (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+ crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
+ (crd1->crd_flags & CRD_F_ENCRYPT)) {
+ enccrd = crd1;
+ maccrd = crd2;
+ } else {
+ /*
+ * We cannot order the ubsec as requested
+ */
+ printk(KERN_ERR DRV_MODULE_NAME ": got wrong algorithm/signature order.\n");
+ err = EINVAL;
+ goto errout;
+ }
+ }
+
+ /* Encryption/Decryption requested */
+ if (enccrd) {
+ encoffset = enccrd->crd_skip;
+
+ if (enccrd->crd_alg == CRYPTO_DES_CBC ||
+ enccrd->crd_alg == CRYPTO_3DES_CBC)
+ {
+ ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
+ ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_DES);
+ ivsize = 8; /* [3]DES uses 64bit IVs */
+ } else {
+ ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_AES);
+ ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_AES);
+ ivsize = 16; /* AES uses 128bit IVs / [3]DES 64bit IVs */
+
+ switch(ses->ses_keysize)
+ {
+ case 128:
+ ctx.pc_flags |= htole16(UBS_PKTCTX_AES128);
+ break;
+ case 192:
+ ctx.pc_flags |= htole16(UBS_PKTCTX_AES192);
+ break;
+ case 256:
+ ctx.pc_flags |= htole16(UBS_PKTCTX_AES256);
+ break;
+ default:
+ DPRINTF("invalid AES key size: %d\n", ses->ses_keysize);
+ err = EINVAL;
+ goto errout;
+ }
+ }
+
+ if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+ /* Direction: Outbound */
+
+ q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
+
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+ bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
+ } else {
+ for(i=0; i < (ivsize / 4); i++)
+ ctx.pc_iv[i] = ses->ses_iv[i];
+ }
+
+ /* If there is no IV in the buffer -> copy it here */
+ if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ if (crp->crp_flags & CRYPTO_F_SKBUF)
+ /*
+ m_copyback(q->q_src_m,
+ enccrd->crd_inject,
+ 8, ctx.pc_iv);
+ */
+ crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_m,
+ enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
+ else if (crp->crp_flags & CRYPTO_F_IOV)
+ /*
+ cuio_copyback(q->q_src_io,
+ enccrd->crd_inject,
+ 8, ctx.pc_iv);
+ */
+ crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_io,
+ enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
+ }
+ } else {
+ /* Direction: Inbound */
+
+ ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
+
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
+ else if (crp->crp_flags & CRYPTO_F_SKBUF)
+ /*
+ m_copydata(q->q_src_m, enccrd->crd_inject,
+ 8, (caddr_t)ctx.pc_iv);
+ */
+ crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_m,
+ enccrd->crd_inject, ivsize,
+ (caddr_t)ctx.pc_iv);
+ else if (crp->crp_flags & CRYPTO_F_IOV)
+ /*
+ cuio_copydata(q->q_src_io,
+ enccrd->crd_inject, 8,
+ (caddr_t)ctx.pc_iv);
+ */
+ crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_io,
+ enccrd->crd_inject, ivsize,
+ (caddr_t)ctx.pc_iv);
+
+ }
+
+ /* Even though key & IV sizes differ from cipher to cipher
+ * copy / swap the full array lengths. Let the compiler unroll
+ * the loop to increase the cpu pipeline performance... */
+ for(i=0; i < 8; i++)
+ ctx.pc_key[i] = ses->ses_key[i];
+ for(i=0; i < 4; i++)
+ SWAP32(ctx.pc_iv[i]);
+ }
+
+ /* Authentication requested */
+ if (maccrd) {
+ macoffset = maccrd->crd_skip;
+
+ if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
+ ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
+ else
+ ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
+
+ for (i = 0; i < 5; i++) {
+ ctx.pc_hminner[i] = ses->ses_hminner[i];
+ ctx.pc_hmouter[i] = ses->ses_hmouter[i];
+
+ HTOLE32(ctx.pc_hminner[i]);
+ HTOLE32(ctx.pc_hmouter[i]);
+ }
+ }
+
+ if (enccrd && maccrd) {
+ /*
+ * ubsec cannot handle packets where the end of encryption
+ * and authentication are not the same, or where the
+ * encrypted part begins before the authenticated part.
+ */
+ if (((encoffset + enccrd->crd_len) !=
+ (macoffset + maccrd->crd_len)) ||
+ (enccrd->crd_skip < maccrd->crd_skip)) {
+ err = EINVAL;
+ goto errout;
+ }
+ sskip = maccrd->crd_skip;
+ cpskip = dskip = enccrd->crd_skip;
+ stheend = maccrd->crd_len;
+ dtheend = enccrd->crd_len;
+ coffset = enccrd->crd_skip - maccrd->crd_skip;
+ cpoffset = cpskip + dtheend;
+#ifdef UBSEC_DEBUG
+ DPRINTF("mac: skip %d, len %d, inject %d\n",
+ maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
+ DPRINTF("enc: skip %d, len %d, inject %d\n",
+ enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
+ DPRINTF("src: skip %d, len %d\n", sskip, stheend);
+ DPRINTF("dst: skip %d, len %d\n", dskip, dtheend);
+ DPRINTF("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
+ coffset, stheend, cpskip, cpoffset);
+#endif
+ } else {
+ cpskip = dskip = sskip = macoffset + encoffset;
+ dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
+ cpoffset = cpskip + dtheend;
+ coffset = 0;
+ }
+ ctx.pc_offset = htole16(coffset >> 2);
+
+#if 0
+ if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER,
+ 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
+ err = ENOMEM;
+ goto errout;
+ }
+#endif
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+#if 0
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
+ q->q_src_m, BUS_DMA_NOWAIT) != 0) {
+ bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+ q->q_src_map = NULL;
+ err = ENOMEM;
+ goto errout;
+ }
+#endif
+ err = dma_map_skb(sc, q->q_src_map, q->q_src_m, &q->q_src_len);
+ if (unlikely(err != 0))
+ goto errout;
+
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+#if 0
+ if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
+ q->q_src_io, BUS_DMA_NOWAIT) != 0) {
+ bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+ q->q_src_map = NULL;
+ err = ENOMEM;
+ goto errout;
+ }
+#endif
+ err = dma_map_uio(sc, q->q_src_map, q->q_src_io, &q->q_src_len);
+ if (unlikely(err != 0))
+ goto errout;
+ }
+
+ /*
+ * Check alignment
+ */
+ nicealign = ubsec_dmamap_aligned(sc, q->q_src_map, q->q_src_len);
+
+ dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("src skip: %d\n", sskip);
+#endif
+ for (i = j = 0; i < q->q_src_len; i++) {
+ struct ubsec_pktbuf *pb;
+ size_t packl = q->q_src_map[i].dma_size;
+ dma_addr_t packp = q->q_src_map[i].dma_paddr;
+
+ if (sskip >= packl) {
+ sskip -= packl;
+ continue;
+ }
+
+ packl -= sskip;
+ packp += sskip;
+ sskip = 0;
+
+ /* maximum fragment size is 0xfffc */
+ if (packl > 0xfffc) {
+ DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
+ err = EIO;
+ goto errout;
+ }
+
+ if (j == 0)
+ pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
+ else
+ pb = &dmap->d_dma->d_sbuf[j - 1];
+
+ pb->pb_addr = htole32(packp);
+
+ if (stheend) {
+ if (packl > stheend) {
+ pb->pb_len = htole32(stheend);
+ stheend = 0;
+ } else {
+ pb->pb_len = htole32(packl);
+ stheend -= packl;
+ }
+ } else
+ pb->pb_len = htole32(packl);
+
+ if ((i + 1) == q->q_src_len)
+ pb->pb_next = 0;
+ else
+ pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_sbuf[j]));
+ j++;
+ }
+
+ if (enccrd == NULL && maccrd != NULL) {
+ /* Authentication only */
+ dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
+ dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
+ dmap->d_dma->d_mcr.mcr_opktbuf.pb_next =
+ htole32(dmap->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_macbuf[0]));
+#ifdef UBSEC_DEBUG
+ DPRINTF("opkt: %x %x %x\n",
+ dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
+ dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
+ dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
+#endif
+ } else {
+ if (crp->crp_flags & CRYPTO_F_IOV) {
+ if (!nicealign) {
+ err = EINVAL;
+ goto errout;
+ }
+#if 0
+ if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
+ UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
+ &q->q_dst_map) != 0) {
+ err = ENOMEM;
+ goto errout;
+ }
+ if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
+ q->q_dst_io, BUS_DMA_NOWAIT) != 0) {
+ bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+ q->q_dst_map = NULL;
+ goto errout;
+ }
+#endif
+
+ /* HW shall copy the result into the source memory */
+ for(i = 0; i < q->q_src_len; i++)
+ q->q_dst_map[i] = q->q_src_map[i];
+
+ q->q_dst_len = q->q_src_len;
+ q->q_has_dst = 0;
+
+ } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ if (nicealign) {
+
+ /* HW shall copy the result into the source memory */
+ q->q_dst_m = q->q_src_m;
+ for(i = 0; i < q->q_src_len; i++)
+ q->q_dst_map[i] = q->q_src_map[i];
+
+ q->q_dst_len = q->q_src_len;
+ q->q_has_dst = 0;
+
+ } else {
+#ifdef NOTYET
+ int totlen, len;
+ struct sk_buff *m, *top, **mp;
+
+ totlen = q->q_src_map->dm_mapsize;
+ if (q->q_src_m->m_flags & M_PKTHDR) {
+ len = MHLEN;
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ } else {
+ len = MLEN;
+ MGET(m, M_DONTWAIT, MT_DATA);
+ }
+ if (m == NULL) {
+ err = ENOMEM;
+ goto errout;
+ }
+ if (len == MHLEN)
+ M_DUP_PKTHDR(m, q->q_src_m);
+ if (totlen >= MINCLSIZE) {
+ MCLGET(m, M_DONTWAIT);
+ if (m->m_flags & M_EXT)
+ len = MCLBYTES;
+ }
+ m->m_len = len;
+ top = NULL;
+ mp = &top;
+
+ while (totlen > 0) {
+ if (top) {
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ m_freem(top);
+ err = ENOMEM;
+ goto errout;
+ }
+ len = MLEN;
+ }
+ if (top && totlen >= MINCLSIZE) {
+ MCLGET(m, M_DONTWAIT);
+ if (m->m_flags & M_EXT)
+ len = MCLBYTES;
+ }
+ m->m_len = len = min(totlen, len);
+ totlen -= len;
+ *mp = m;
+ mp = &m->m_next;
+ }
+ q->q_dst_m = top;
+ ubsec_mcopy(q->q_src_m, q->q_dst_m,
+ cpskip, cpoffset);
+ if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
+ UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
+ &q->q_dst_map) != 0) {
+ err = ENOMEM;
+ goto errout;
+ }
+ if (bus_dmamap_load_mbuf(sc->sc_dmat,
+ q->q_dst_map, q->q_dst_m,
+ BUS_DMA_NOWAIT) != 0) {
+ bus_dmamap_destroy(sc->sc_dmat,
+ q->q_dst_map);
+ q->q_dst_map = NULL;
+ err = ENOMEM;
+ goto errout;
+ }
+#else
+ device_printf(sc->sc_dev,
+ "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
+ __FILE__, __LINE__);
+ err = EINVAL;
+ goto errout;
+#endif
+ }
+ } else {
+ err = EINVAL;
+ goto errout;
+ }
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("dst skip: %d\n", dskip);
+#endif
+ for (i = j = 0; i < q->q_dst_len; i++) {
+ struct ubsec_pktbuf *pb;
+ size_t packl = q->q_dst_map[i].dma_size;
+ dma_addr_t packp = q->q_dst_map[i].dma_paddr;
+
+ if (dskip >= packl) {
+ dskip -= packl;
+ continue;
+ }
+
+ packl -= dskip;
+ packp += dskip;
+ dskip = 0;
+
+ if (packl > 0xfffc) {
+ DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
+ err = EIO;
+ goto errout;
+ }
+
+ if (j == 0)
+ pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
+ else
+ pb = &dmap->d_dma->d_dbuf[j - 1];
+
+ pb->pb_addr = htole32(packp);
+
+ if (dtheend) {
+ if (packl > dtheend) {
+ pb->pb_len = htole32(dtheend);
+ dtheend = 0;
+ } else {
+ pb->pb_len = htole32(packl);
+ dtheend -= packl;
+ }
+ } else
+ pb->pb_len = htole32(packl);
+
+ if ((i + 1) == q->q_dst_len) {
+ if (maccrd)
+ /* Authentication:
+ * The last fragment of the output buffer
+ * contains the HMAC. */
+ pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_macbuf[0]));
+ else
+ pb->pb_next = 0;
+ } else
+ pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_dbuf[j]));
+ j++;
+ }
+ }
+
+ dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
+ offsetof(struct ubsec_dmachunk, d_ctx));
+
+ if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
+ /* new Broadcom cards with dynamic long command context structure */
+
+ if (enccrd != NULL &&
+ enccrd->crd_alg == CRYPTO_AES_CBC)
+ {
+ struct ubsec_pktctx_aes128 *ctxaes128;
+ struct ubsec_pktctx_aes192 *ctxaes192;
+ struct ubsec_pktctx_aes256 *ctxaes256;
+
+ switch(ses->ses_keysize)
+ {
+ /* AES 128bit */
+ case 128:
+ ctxaes128 = (struct ubsec_pktctx_aes128 *)
+ (dmap->d_alloc.dma_vaddr +
+ offsetof(struct ubsec_dmachunk, d_ctx));
+
+ ctxaes128->pc_len = htole16(sizeof(struct ubsec_pktctx_aes128));
+ ctxaes128->pc_type = ctx.pc_type;
+ ctxaes128->pc_flags = ctx.pc_flags;
+ ctxaes128->pc_offset = ctx.pc_offset;
+ for (i = 0; i < 4; i++)
+ ctxaes128->pc_aeskey[i] = ctx.pc_key[i];
+ for (i = 0; i < 5; i++)
+ ctxaes128->pc_hminner[i] = ctx.pc_hminner[i];
+ for (i = 0; i < 5; i++)
+ ctxaes128->pc_hmouter[i] = ctx.pc_hmouter[i];
+ for (i = 0; i < 4; i++)
+ ctxaes128->pc_iv[i] = ctx.pc_iv[i];
+ break;
+
+ /* AES 192bit */
+ case 192:
+ ctxaes192 = (struct ubsec_pktctx_aes192 *)
+ (dmap->d_alloc.dma_vaddr +
+ offsetof(struct ubsec_dmachunk, d_ctx));
+
+ ctxaes192->pc_len = htole16(sizeof(struct ubsec_pktctx_aes192));
+ ctxaes192->pc_type = ctx.pc_type;
+ ctxaes192->pc_flags = ctx.pc_flags;
+ ctxaes192->pc_offset = ctx.pc_offset;
+ for (i = 0; i < 6; i++)
+ ctxaes192->pc_aeskey[i] = ctx.pc_key[i];
+ for (i = 0; i < 5; i++)
+ ctxaes192->pc_hminner[i] = ctx.pc_hminner[i];
+ for (i = 0; i < 5; i++)
+ ctxaes192->pc_hmouter[i] = ctx.pc_hmouter[i];
+ for (i = 0; i < 4; i++)
+ ctxaes192->pc_iv[i] = ctx.pc_iv[i];
+ break;
+
+ /* AES 256bit */
+ case 256:
+ ctxaes256 = (struct ubsec_pktctx_aes256 *)
+ (dmap->d_alloc.dma_vaddr +
+ offsetof(struct ubsec_dmachunk, d_ctx));
+
+ ctxaes256->pc_len = htole16(sizeof(struct ubsec_pktctx_aes256));
+ ctxaes256->pc_type = ctx.pc_type;
+ ctxaes256->pc_flags = ctx.pc_flags;
+ ctxaes256->pc_offset = ctx.pc_offset;
+ for (i = 0; i < 8; i++)
+ ctxaes256->pc_aeskey[i] = ctx.pc_key[i];
+ for (i = 0; i < 5; i++)
+ ctxaes256->pc_hminner[i] = ctx.pc_hminner[i];
+ for (i = 0; i < 5; i++)
+ ctxaes256->pc_hmouter[i] = ctx.pc_hmouter[i];
+ for (i = 0; i < 4; i++)
+ ctxaes256->pc_iv[i] = ctx.pc_iv[i];
+ break;
+
+ }
+ } else {
+ /*
+ * [3]DES / MD5_HMAC / SHA1_HMAC
+ *
+ * MD5_HMAC / SHA1_HMAC can use the IPSEC 3DES operation without
+ * encryption.
+ */
+ struct ubsec_pktctx_des *ctxdes;
+
+ ctxdes = (struct ubsec_pktctx_des *)(dmap->d_alloc.dma_vaddr +
+ offsetof(struct ubsec_dmachunk, d_ctx));
+
+ ctxdes->pc_len = htole16(sizeof(struct ubsec_pktctx_des));
+ ctxdes->pc_type = ctx.pc_type;
+ ctxdes->pc_flags = ctx.pc_flags;
+ ctxdes->pc_offset = ctx.pc_offset;
+ for (i = 0; i < 6; i++)
+ ctxdes->pc_deskey[i] = ctx.pc_key[i];
+ for (i = 0; i < 5; i++)
+ ctxdes->pc_hminner[i] = ctx.pc_hminner[i];
+ for (i = 0; i < 5; i++)
+ ctxdes->pc_hmouter[i] = ctx.pc_hmouter[i];
+ ctxdes->pc_iv[0] = ctx.pc_iv[0];
+ ctxdes->pc_iv[1] = ctx.pc_iv[1];
+ }
+ } else
+ {
+ /* old Broadcom card with fixed small command context structure */
+
+ /*
+ * [3]DES / MD5_HMAC / SHA1_HMAC
+ */
+ struct ubsec_pktctx *ctxs;
+
+ ctxs = (struct ubsec_pktctx *)(dmap->d_alloc.dma_vaddr +
+ offsetof(struct ubsec_dmachunk, d_ctx));
+
+ /* transform generic context into small context */
+ for (i = 0; i < 6; i++)
+ ctxs->pc_deskey[i] = ctx.pc_key[i];
+ for (i = 0; i < 5; i++)
+ ctxs->pc_hminner[i] = ctx.pc_hminner[i];
+ for (i = 0; i < 5; i++)
+ ctxs->pc_hmouter[i] = ctx.pc_hmouter[i];
+ ctxs->pc_iv[0] = ctx.pc_iv[0];
+ ctxs->pc_iv[1] = ctx.pc_iv[1];
+ ctxs->pc_flags = ctx.pc_flags;
+ ctxs->pc_offset = ctx.pc_offset;
+ }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("spin_lock_irqsave\n");
+#endif
+ spin_lock_irqsave(&sc->sc_ringmtx, flags);
+ //spin_lock_irq(&sc->sc_ringmtx);
+
+ BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
+ sc->sc_nqueue++;
+ ubsecstats.hst_ipackets++;
+ ubsecstats.hst_ibytes += stheend;
+ ubsec_feed(sc);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("spin_unlock_irqrestore\n");
+#endif
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+ //spin_unlock_irq(&sc->sc_ringmtx);
+
+ return (0);
+
+errout:
+ if (q != NULL) {
+#ifdef NOTYET
+ if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
+ m_freem(q->q_dst_m);
+#endif
+
+ if ((q->q_has_dst == 1) && q->q_dst_len > 0) {
+#if 0
+ bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
+ bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+#endif
+ dma_unmap(sc, q->q_dst_map, q->q_dst_len);
+ }
+ if (q->q_src_len > 0) {
+#if 0
+ bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
+ bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+#endif
+ dma_unmap(sc, q->q_src_map, q->q_src_len);
+ }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("spin_lock_irqsave\n");
+#endif
+ spin_lock_irqsave(&sc->sc_ringmtx, flags);
+ //spin_lock_irq(&sc->sc_ringmtx);
+
+ BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+ DPRINTF("spin_unlock_irqrestore\n");
+#endif
+ spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+ //spin_unlock_irq(&sc->sc_ringmtx);
+
+ }
+ if (err == EINVAL)
+ ubsecstats.hst_invalid++;
+ else
+ ubsecstats.hst_nomem++;
+errout2:
+ crp->crp_etype = err;
+ crypto_done(crp);
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s() err = %x\n", __FUNCTION__, err);
+#endif
+
+ return (0);
+}
+
+void
+ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
+{
+ struct cryptop *crp = (struct cryptop *)q->q_crp;
+ struct cryptodesc *crd;
+ struct ubsec_dma *dmap = q->q_dma;
+ int ivsize = 8;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ ubsecstats.hst_opackets++;
+ ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
+
+#if 0
+ bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0,
+ dmap->d_alloc.dma_map->dm_mapsize,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
+ bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+ 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
+ bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+ }
+ bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+ 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
+ bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+#endif
+
+ if ((q->q_has_dst == 1) && q->q_dst_len > 0)
+ dma_unmap(sc, q->q_dst_map, q->q_dst_len);
+
+ dma_unmap(sc, q->q_src_map, q->q_src_len);
+
+#ifdef NOTYET
+ if ((crp->crp_flags & CRYPTO_F_SKBUF) && (q->q_src_m != q->q_dst_m)) {
+ m_freem(q->q_src_m);
+ crp->crp_buf = (caddr_t)q->q_dst_m;
+ }
+#endif
+
+ /* copy out IV for future use */
+ if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+ if (crd->crd_alg != CRYPTO_DES_CBC &&
+ crd->crd_alg != CRYPTO_3DES_CBC &&
+ crd->crd_alg != CRYPTO_AES_CBC)
+ continue;
+
+ if (crd->crd_alg == CRYPTO_AES_CBC)
+ ivsize = 16;
+ else
+ ivsize = 8;
+
+ if (crp->crp_flags & CRYPTO_F_SKBUF)
+#if 0
+ m_copydata((struct sk_buff *)crp->crp_buf,
+ crd->crd_skip + crd->crd_len - 8, 8,
+ (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+#endif
+ crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
+ crd->crd_skip + crd->crd_len - ivsize, ivsize,
+ (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+
+ else if (crp->crp_flags & CRYPTO_F_IOV) {
+#if 0
+ cuio_copydata((struct uio *)crp->crp_buf,
+ crd->crd_skip + crd->crd_len - 8, 8,
+ (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+#endif
+ crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
+ crd->crd_skip + crd->crd_len - ivsize, ivsize,
+ (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+
+ }
+ break;
+ }
+ }
+
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+ if (crd->crd_alg != CRYPTO_MD5_HMAC &&
+ crd->crd_alg != CRYPTO_SHA1_HMAC)
+ continue;
+#if 0
+ if (crp->crp_flags & CRYPTO_F_SKBUF)
+ m_copyback((struct sk_buff *)crp->crp_buf,
+ crd->crd_inject, 12,
+ dmap->d_dma->d_macbuf);
+#endif
+#if 0
+ /* BUG? it does not honor the mac len.. */
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, 12,
+ (caddr_t)dmap->d_dma->d_macbuf);
+#endif
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject,
+ sc->sc_sessions[q->q_sesn].ses_mlen,
+ (caddr_t)dmap->d_dma->d_macbuf);
+#if 0
+ else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac)
+ bcopy((caddr_t)dmap->d_dma->d_macbuf,
+ crp->crp_mac, 12);
+#endif
+ break;
+ }
+ BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+ crypto_done(crp);
+}
+
+void
+ubsec_mcopy(struct sk_buff *srcm, struct sk_buff *dstm, int hoffset, int toffset)
+{
+ int i, j, dlen, slen;
+ caddr_t dptr, sptr;
+
+ j = 0;
+ sptr = srcm->data;
+ slen = srcm->len;
+ dptr = dstm->data;
+ dlen = dstm->len;
+
+ while (1) {
+ for (i = 0; i < min(slen, dlen); i++) {
+ if (j < hoffset || j >= toffset)
+ *dptr++ = *sptr++;
+ slen--;
+ dlen--;
+ j++;
+ }
+ if (slen == 0) {
+ srcm = srcm->next;
+ if (srcm == NULL)
+ return;
+ sptr = srcm->data;
+ slen = srcm->len;
+ }
+ if (dlen == 0) {
+ dstm = dstm->next;
+ if (dstm == NULL)
+ return;
+ dptr = dstm->data;
+ dlen = dstm->len;
+ }
+ }
+}
+
+int
+ubsec_dma_malloc(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma,
+ size_t size, int mapflags)
+{
+ dma->dma_vaddr = dma_alloc_coherent(sc->sc_dv,
+ size, &dma->dma_paddr, GFP_KERNEL);
+
+ if (likely(dma->dma_vaddr))
+ {
+ dma->dma_size = size;
+ return (0);
+ }
+
+ DPRINTF("could not allocate %d bytes of coherent memory.\n", size);
+
+ return (1);
+}
+
+void
+ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
+{
+ dma_free_coherent(sc->sc_dv, dma->dma_size, dma->dma_vaddr,
+ dma->dma_paddr);
+}
+
+/*
+ * Resets the board. Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+void
+ubsec_reset_board(struct ubsec_softc *sc)
+{
+ volatile u_int32_t ctrl;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+ DPRINTF("Send reset signal to chip.\n");
+
+ ctrl = READ_REG(sc, BS_CTRL);
+ ctrl |= BS_CTRL_RESET;
+ WRITE_REG(sc, BS_CTRL, ctrl);
+
+ /*
+ * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
+ */
+ DELAY(10);
+}
+
+/*
+ * Init Broadcom registers
+ */
+void
+ubsec_init_board(struct ubsec_softc *sc)
+{
+ u_int32_t ctrl;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+ DPRINTF("Initialize chip.\n");
+
+ ctrl = READ_REG(sc, BS_CTRL);
+ ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
+ ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT | BS_CTRL_DMAERR;
+
+ WRITE_REG(sc, BS_CTRL, ctrl);
+
+ /* Set chip capabilities (BCM5365P) */
+ sc->sc_flags |= UBS_FLAGS_LONGCTX | UBS_FLAGS_AES;
+}
+
+/*
+ * Clean up after a chip crash.
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+void
+ubsec_cleanchip(struct ubsec_softc *sc)
+{
+ struct ubsec_q *q;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+ DPRINTF("Clean up queues after chip crash.\n");
+
+ while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+ q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
+ BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
+ ubsec_free_q(sc, q);
+ }
+}
+
+/*
+ * free a ubsec_q
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+int
+ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
+{
+ struct ubsec_q *q2;
+ struct cryptop *crp;
+ int npkts;
+ int i;
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+ npkts = q->q_nstacked_mcrs;
+
+ for (i = 0; i < npkts; i++) {
+ if(q->q_stacked_mcr[i]) {
+ q2 = q->q_stacked_mcr[i];
+
+ if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
+#ifdef NOTYET
+ m_freem(q2->q_dst_m);
+#else
+ printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
+#endif
+
+ crp = (struct cryptop *)q2->q_crp;
+
+ BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
+
+ crp->crp_etype = EFAULT;
+ crypto_done(crp);
+ } else {
+ break;
+ }
+ }
+
+ /*
+ * Free header MCR
+ */
+ if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
+#ifdef NOTYET
+ m_freem(q->q_dst_m);
+#else
+ printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
+#endif
+
+ crp = (struct cryptop *)q->q_crp;
+
+ BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+
+ crp->crp_etype = EFAULT;
+ crypto_done(crp);
+ return(0);
+}
+
+/*
+ * Routine to reset the chip and clean up.
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+void
+ubsec_totalreset(struct ubsec_softc *sc)
+{
+
+#ifdef UBSEC_DEBUG
+ DPRINTF("%s()\n", __FUNCTION__);
+#endif
+ DPRINTF("initiate total chip reset.. \n");
+ ubsec_reset_board(sc);
+ ubsec_init_board(sc);
+ ubsec_cleanchip(sc);
+}
+
+void
+ubsec_dump_pb(struct ubsec_pktbuf *pb)
+{
+ printf("addr 0x%x (0x%x) next 0x%x\n",
+ pb->pb_addr, pb->pb_len, pb->pb_next);
+}
+
+void
+ubsec_dump_mcr(struct ubsec_mcr *mcr)
+{
+ struct ubsec_mcr_add *ma;
+ int i;
+
+ printf("MCR:\n");
+ printf(" pkts: %u, flags 0x%x\n",
+ letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags));
+ ma = (struct ubsec_mcr_add *)&mcr->mcr_cmdctxp;
+ for (i = 0; i < letoh16(mcr->mcr_pkts); i++) {
+ printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i,
+ letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen),
+ letoh16(ma->mcr_reserved));
+ printf(" %d: ipkt ", i);
+ ubsec_dump_pb(&ma->mcr_ipktbuf);
+ printf(" %d: opkt ", i);
+ ubsec_dump_pb(&ma->mcr_opktbuf);
+ ma++;
+ }
+ printf("END MCR\n");
+}
+
+static int __init mod_init(void) {
+ return ssb_driver_register(&ubsec_ssb_driver);
+}
+
+static void __exit mod_exit(void) {
+ ssb_driver_unregister(&ubsec_ssb_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+// Meta information
+MODULE_AUTHOR("Daniel Mueller <daniel@danm.de>");
+MODULE_LICENSE("BSD");
+MODULE_DESCRIPTION("OCF driver for BCM5365P IPSec Core");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h
new file mode 100644
index 000000000..dafac5b41
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecreg.h
@@ -0,0 +1,233 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2000 Theo de Raadt
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+/*
+ * Register definitions for 5601 BlueSteel Networks Ubiquitous Broadband
+ * Security "uBSec" chip. Definitions from revision 2.8 of the product
+ * datasheet.
+ */
+
+#define BS_BAR 0x10 /* DMA base address register */
+#define BS_TRDY_TIMEOUT 0x40 /* TRDY timeout */
+#define BS_RETRY_TIMEOUT 0x41 /* DMA retry timeout */
+
+#define UBS_PCI_RTY_SHIFT 8
+#define UBS_PCI_RTY_MASK 0xff
+#define UBS_PCI_RTY(misc) \
+ (((misc) >> UBS_PCI_RTY_SHIFT) & UBS_PCI_RTY_MASK)
+
+#define UBS_PCI_TOUT_SHIFT 0
+#define UBS_PCI_TOUT_MASK 0xff
+#define UBS_PCI_TOUT(misc) \
+ (((misc) >> PCI_TOUT_SHIFT) & PCI_TOUT_MASK)
+
+/*
+ * DMA Control & Status Registers (offset from BS_BAR)
+ */
+#define BS_MCR1 0x20 /* DMA Master Command Record 1 */
+#define BS_CTRL 0x24 /* DMA Control */
+#define BS_STAT 0x28 /* DMA Status */
+#define BS_ERR 0x2c /* DMA Error Address */
+#define BS_DEV_ID 0x34 /* IPSec Device ID */
+
+/* BS_CTRL - DMA Control */
+#define BS_CTRL_RESET 0x80000000 /* hardware reset, 5805/5820 */
+#define BS_CTRL_MCR2INT 0x40000000 /* enable intr MCR for MCR2 */
+#define BS_CTRL_MCR1INT 0x20000000 /* enable intr MCR for MCR1 */
+#define BS_CTRL_OFM 0x10000000 /* Output fragment mode */
+#define BS_CTRL_BE32 0x08000000 /* big-endian, 32bit bytes */
+#define BS_CTRL_BE64 0x04000000 /* big-endian, 64bit bytes */
+#define BS_CTRL_DMAERR 0x02000000 /* enable intr DMA error */
+#define BS_CTRL_RNG_M 0x01800000 /* RNG mode */
+#define BS_CTRL_RNG_1 0x00000000 /* 1bit rn/one slow clock */
+#define BS_CTRL_RNG_4 0x00800000 /* 1bit rn/four slow clocks */
+#define BS_CTRL_RNG_8 0x01000000 /* 1bit rn/eight slow clocks */
+#define BS_CTRL_RNG_16 0x01800000 /* 1bit rn/16 slow clocks */
+#define BS_CTRL_SWNORM 0x00400000 /* 582[01], sw normalization */
+#define BS_CTRL_FRAG_M 0x0000ffff /* output fragment size mask */
+#define BS_CTRL_LITTLE_ENDIAN (BS_CTRL_BE32 | BS_CTRL_BE64)
+
+/* BS_STAT - DMA Status */
+#define BS_STAT_MCR1_BUSY 0x80000000 /* MCR1 is busy */
+#define BS_STAT_MCR1_FULL 0x40000000 /* MCR1 is full */
+#define BS_STAT_MCR1_DONE 0x20000000 /* MCR1 is done */
+#define BS_STAT_DMAERR 0x10000000 /* DMA error */
+#define BS_STAT_MCR2_FULL 0x08000000 /* MCR2 is full */
+#define BS_STAT_MCR2_DONE 0x04000000 /* MCR2 is done */
+#define BS_STAT_MCR1_ALLEMPTY 0x02000000 /* 5821, MCR1 is empty */
+#define BS_STAT_MCR2_ALLEMPTY 0x01000000 /* 5821, MCR2 is empty */
+
+/* BS_ERR - DMA Error Address */
+#define BS_ERR_ADDR 0xfffffffc /* error address mask */
+#define BS_ERR_READ 0x00000002 /* fault was on read */
+
+struct ubsec_pktctx {
+ u_int32_t pc_deskey[6]; /* 3DES key */
+ u_int32_t pc_hminner[5]; /* hmac inner state */
+ u_int32_t pc_hmouter[5]; /* hmac outer state */
+ u_int32_t pc_iv[2]; /* [3]DES iv */
+ u_int16_t pc_flags; /* flags, below */
+ u_int16_t pc_offset; /* crypto offset */
+} __attribute__ ((packed));
+
+#define UBS_PKTCTX_ENC_3DES 0x8000 /* use 3des */
+#define UBS_PKTCTX_ENC_AES 0x8000 /* use aes */
+#define UBS_PKTCTX_ENC_NONE 0x0000 /* no encryption */
+#define UBS_PKTCTX_INBOUND 0x4000 /* inbound packet */
+#define UBS_PKTCTX_AUTH 0x3000 /* authentication mask */
+#define UBS_PKTCTX_AUTH_NONE 0x0000 /* no authentication */
+#define UBS_PKTCTX_AUTH_MD5 0x1000 /* use hmac-md5 */
+#define UBS_PKTCTX_AUTH_SHA1 0x2000 /* use hmac-sha1 */
+#define UBS_PKTCTX_AES128 0x0 /* AES 128bit keys */
+#define UBS_PKTCTX_AES192 0x100 /* AES 192bit keys */
+#define UBS_PKTCTX_AES256 0x200 /* AES 256bit keys */
+
+struct ubsec_pktctx_des {
+ volatile u_int16_t pc_len; /* length of ctx struct */
+ volatile u_int16_t pc_type; /* context type */
+ volatile u_int16_t pc_flags; /* flags, same as above */
+ volatile u_int16_t pc_offset; /* crypto/auth offset */
+ volatile u_int32_t pc_deskey[6]; /* 3DES key */
+ volatile u_int32_t pc_iv[2]; /* [3]DES iv */
+ volatile u_int32_t pc_hminner[5]; /* hmac inner state */
+ volatile u_int32_t pc_hmouter[5]; /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes128 {
+ volatile u_int16_t pc_len; /* length of ctx struct */
+ volatile u_int16_t pc_type; /* context type */
+ volatile u_int16_t pc_flags; /* flags, same as above */
+ volatile u_int16_t pc_offset; /* crypto/auth offset */
+ volatile u_int32_t pc_aeskey[4]; /* AES 128bit key */
+ volatile u_int32_t pc_iv[4]; /* AES iv */
+ volatile u_int32_t pc_hminner[5]; /* hmac inner state */
+ volatile u_int32_t pc_hmouter[5]; /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes192 {
+ volatile u_int16_t pc_len; /* length of ctx struct */
+ volatile u_int16_t pc_type; /* context type */
+ volatile u_int16_t pc_flags; /* flags, same as above */
+ volatile u_int16_t pc_offset; /* crypto/auth offset */
+ volatile u_int32_t pc_aeskey[6]; /* AES 192bit key */
+ volatile u_int32_t pc_iv[4]; /* AES iv */
+ volatile u_int32_t pc_hminner[5]; /* hmac inner state */
+ volatile u_int32_t pc_hmouter[5]; /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes256 {
+ volatile u_int16_t pc_len; /* length of ctx struct */
+ volatile u_int16_t pc_type; /* context type */
+ volatile u_int16_t pc_flags; /* flags, same as above */
+ volatile u_int16_t pc_offset; /* crypto/auth offset */
+ volatile u_int32_t pc_aeskey[8]; /* AES 256bit key */
+ volatile u_int32_t pc_iv[4]; /* AES iv */
+ volatile u_int32_t pc_hminner[5]; /* hmac inner state */
+ volatile u_int32_t pc_hmouter[5]; /* hmac outer state */
+} __attribute__ ((packed));
+
+#define UBS_PKTCTX_TYPE_IPSEC_DES 0x0000
+#define UBS_PKTCTX_TYPE_IPSEC_AES 0x0040
+
+struct ubsec_pktbuf {
+ volatile u_int32_t pb_addr; /* address of buffer start */
+ volatile u_int32_t pb_next; /* pointer to next pktbuf */
+ volatile u_int32_t pb_len; /* packet length */
+} __attribute__ ((packed));
+#define UBS_PKTBUF_LEN 0x0000ffff /* length mask */
+
+struct ubsec_mcr {
+ volatile u_int16_t mcr_pkts; /* #pkts in this mcr */
+ volatile u_int16_t mcr_flags; /* mcr flags (below) */
+ volatile u_int32_t mcr_cmdctxp; /* command ctx pointer */
+ struct ubsec_pktbuf mcr_ipktbuf; /* input chain header */
+ volatile u_int16_t mcr_reserved;
+ volatile u_int16_t mcr_pktlen;
+ struct ubsec_pktbuf mcr_opktbuf; /* output chain header */
+} __attribute__ ((packed));
+
+struct ubsec_mcr_add {
+ volatile u_int32_t mcr_cmdctxp; /* command ctx pointer */
+ struct ubsec_pktbuf mcr_ipktbuf; /* input chain header */
+ volatile u_int16_t mcr_reserved;
+ volatile u_int16_t mcr_pktlen;
+ struct ubsec_pktbuf mcr_opktbuf; /* output chain header */
+} __attribute__ ((packed));
+
+#define UBS_MCR_DONE 0x0001 /* mcr has been processed */
+#define UBS_MCR_ERROR 0x0002 /* error in processing */
+#define UBS_MCR_ERRORCODE 0xff00 /* error type */
+
+struct ubsec_ctx_keyop {
+ volatile u_int16_t ctx_len; /* command length */
+ volatile u_int16_t ctx_op; /* operation code */
+ volatile u_int8_t ctx_pad[60]; /* padding */
+} __attribute__ ((packed));
+#define UBS_CTXOP_DHPKGEN 0x01 /* dh public key generation */
+#define UBS_CTXOP_DHSSGEN 0x02 /* dh shared secret gen. */
+#define UBS_CTXOP_RSAPUB 0x03 /* rsa public key op */
+#define UBS_CTXOP_RSAPRIV 0x04 /* rsa private key op */
+#define UBS_CTXOP_DSASIGN 0x05 /* dsa signing op */
+#define UBS_CTXOP_DSAVRFY 0x06 /* dsa verification */
+#define UBS_CTXOP_RNGBYPASS 0x41 /* rng direct test mode */
+#define UBS_CTXOP_RNGSHA1 0x42 /* rng sha1 test mode */
+#define UBS_CTXOP_MODADD 0x43 /* modular addition */
+#define UBS_CTXOP_MODSUB 0x44 /* modular subtraction */
+#define UBS_CTXOP_MODMUL 0x45 /* modular multiplication */
+#define UBS_CTXOP_MODRED 0x46 /* modular reduction */
+#define UBS_CTXOP_MODEXP 0x47 /* modular exponentiation */
+#define UBS_CTXOP_MODINV 0x48 /* modular inverse */
+
+struct ubsec_ctx_rngbypass {
+ volatile u_int16_t rbp_len; /* command length, 64 */
+ volatile u_int16_t rbp_op; /* rng bypass, 0x41 */
+ volatile u_int8_t rbp_pad[60]; /* padding */
+} __attribute__ ((packed));
+
+/* modexp: C = (M ^ E) mod N */
+struct ubsec_ctx_modexp {
+ volatile u_int16_t me_len; /* command length */
+ volatile u_int16_t me_op; /* modexp, 0x47 */
+ volatile u_int16_t me_E_len; /* E (bits) */
+ volatile u_int16_t me_N_len; /* N (bits) */
+ u_int8_t me_N[2048/8]; /* N */
+} __attribute__ ((packed));
+
+struct ubsec_ctx_rsapriv {
+ volatile u_int16_t rpr_len; /* command length */
+ volatile u_int16_t rpr_op; /* rsaprivate, 0x04 */
+ volatile u_int16_t rpr_q_len; /* q (bits) */
+ volatile u_int16_t rpr_p_len; /* p (bits) */
+ u_int8_t rpr_buf[5 * 1024 / 8]; /* parameters: */
+ /* p, q, dp, dq, pinv */
+} __attribute__ ((packed));
diff --git a/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h
new file mode 100644
index 000000000..c808f955b
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/ubsec_ssb/ubsecvar.h
@@ -0,0 +1,228 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2000 Theo de Raadt
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+/* Maximum queue length */
+#ifndef UBS_MAX_NQUEUE
+#define UBS_MAX_NQUEUE 60
+#endif
+
+#define UBS_MAX_SCATTER 64 /* Maximum scatter/gather depth */
+
+#ifndef UBS_MAX_AGGR
+#define UBS_MAX_AGGR 5 /* Maximum aggregation count */
+#endif
+
+#define UBSEC_CARD(sid) (((sid) & 0xf0000000) >> 28)
+#define UBSEC_SESSION(sid) ( (sid) & 0x0fffffff)
+#define UBSEC_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
+
+#define UBS_DEF_RTY 0xff /* PCI Retry Timeout */
+#define UBS_DEF_TOUT 0xff /* PCI TRDY Timeout */
+#define UBS_DEF_CACHELINE 0x01 /* Cache Line setting */
+
+#define DEFAULT_HMAC_LEN 12
+
+struct ubsec_dma_alloc {
+ dma_addr_t dma_paddr;
+ void *dma_vaddr;
+ /*
+ bus_dmamap_t dma_map;
+ bus_dma_segment_t dma_seg;
+ */
+ size_t dma_size;
+ /*
+ int dma_nseg;
+ */
+};
+
+struct ubsec_q2 {
+ BSD_SIMPLEQ_ENTRY(ubsec_q2) q_next;
+ struct ubsec_dma_alloc q_mcr;
+ struct ubsec_dma_alloc q_ctx;
+ u_int q_type;
+};
+
+struct ubsec_q2_rng {
+ struct ubsec_q2 rng_q;
+ struct ubsec_dma_alloc rng_buf;
+ int rng_used;
+};
+
+/* C = (M ^ E) mod N */
+#define UBS_MODEXP_PAR_M 0
+#define UBS_MODEXP_PAR_E 1
+#define UBS_MODEXP_PAR_N 2
+struct ubsec_q2_modexp {
+ struct ubsec_q2 me_q;
+ struct cryptkop * me_krp;
+ struct ubsec_dma_alloc me_M;
+ struct ubsec_dma_alloc me_E;
+ struct ubsec_dma_alloc me_C;
+ struct ubsec_dma_alloc me_epb;
+ int me_modbits;
+ int me_shiftbits;
+ int me_normbits;
+};
+
+#define UBS_RSAPRIV_PAR_P 0
+#define UBS_RSAPRIV_PAR_Q 1
+#define UBS_RSAPRIV_PAR_DP 2
+#define UBS_RSAPRIV_PAR_DQ 3
+#define UBS_RSAPRIV_PAR_PINV 4
+#define UBS_RSAPRIV_PAR_MSGIN 5
+#define UBS_RSAPRIV_PAR_MSGOUT 6
+struct ubsec_q2_rsapriv {
+ struct ubsec_q2 rpr_q;
+ struct cryptkop * rpr_krp;
+ struct ubsec_dma_alloc rpr_msgin;
+ struct ubsec_dma_alloc rpr_msgout;
+};
+
+#define UBSEC_RNG_BUFSIZ 16 /* measured in 32bit words */
+
+struct ubsec_dmachunk {
+ struct ubsec_mcr d_mcr;
+ struct ubsec_mcr_add d_mcradd[UBS_MAX_AGGR-1];
+ struct ubsec_pktbuf d_sbuf[UBS_MAX_SCATTER-1];
+ struct ubsec_pktbuf d_dbuf[UBS_MAX_SCATTER-1];
+ u_int32_t d_macbuf[5];
+ union {
+ struct ubsec_pktctx_aes256 ctxaes256;
+ struct ubsec_pktctx_aes192 ctxaes192;
+ struct ubsec_pktctx_des ctxdes;
+ struct ubsec_pktctx_aes128 ctxaes128;
+ struct ubsec_pktctx ctx;
+ } d_ctx;
+};
+
+struct ubsec_dma {
+ BSD_SIMPLEQ_ENTRY(ubsec_dma) d_next;
+ struct ubsec_dmachunk *d_dma;
+ struct ubsec_dma_alloc d_alloc;
+};
+
+#define UBS_FLAGS_KEY 0x01 /* has key accelerator */
+#define UBS_FLAGS_LONGCTX 0x02 /* uses long ipsec ctx */
+#define UBS_FLAGS_BIGKEY 0x04 /* 2048bit keys */
+#define UBS_FLAGS_HWNORM 0x08 /* hardware normalization */
+#define UBS_FLAGS_RNG 0x10 /* hardware rng */
+#define UBS_FLAGS_AES 0x20 /* hardware AES support */
+
+struct ubsec_q {
+ BSD_SIMPLEQ_ENTRY(ubsec_q) q_next;
+ int q_nstacked_mcrs;
+ struct ubsec_q *q_stacked_mcr[UBS_MAX_AGGR-1];
+ struct cryptop *q_crp;
+ struct ubsec_dma *q_dma;
+
+ //struct mbuf *q_src_m, *q_dst_m;
+ struct sk_buff *q_src_m, *q_dst_m;
+ struct uio *q_src_io, *q_dst_io;
+
+ /*
+ bus_dmamap_t q_src_map;
+ bus_dmamap_t q_dst_map;
+ */
+
+ /* DMA addresses for In-/Out packages */
+ int q_src_len;
+ int q_dst_len;
+ struct ubsec_dma_alloc q_src_map[UBS_MAX_SCATTER];
+ struct ubsec_dma_alloc q_dst_map[UBS_MAX_SCATTER];
+ int q_has_dst;
+
+ int q_sesn;
+ int q_flags;
+};
+
+struct ubsec_softc {
+ softc_device_decl sc_dev;
+ struct ssb_device *sdev; /* device backpointer */
+
+ struct device *sc_dv; /* generic device */
+ void *sc_ih; /* interrupt handler cookie */
+ int sc_flags; /* device specific flags */
+ u_int32_t sc_statmask; /* interrupt status mask */
+ int32_t sc_cid; /* crypto tag */
+ BSD_SIMPLEQ_HEAD(,ubsec_q) sc_queue; /* packet queue, mcr1 */
+ int sc_nqueue; /* count enqueued, mcr1 */
+ BSD_SIMPLEQ_HEAD(,ubsec_q) sc_qchip; /* on chip, mcr1 */
+ BSD_SIMPLEQ_HEAD(,ubsec_q) sc_freequeue; /* list of free queue elements */
+ BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_queue2; /* packet queue, mcr2 */
+ int sc_nqueue2; /* count enqueued, mcr2 */
+ BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_qchip2; /* on chip, mcr2 */
+ int sc_nsessions; /* # of sessions */
+ struct ubsec_session *sc_sessions; /* sessions */
+ int sc_rnghz; /* rng poll time */
+ struct ubsec_q2_rng sc_rng;
+ struct ubsec_dma sc_dmaa[UBS_MAX_NQUEUE];
+ struct ubsec_q *sc_queuea[UBS_MAX_NQUEUE];
+ BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_q2free; /* free list */
+ spinlock_t sc_ringmtx; /* PE ring lock */
+};
+
+#define UBSEC_QFLAGS_COPYOUTIV 0x1
+
+struct ubsec_session {
+ u_int32_t ses_used;
+ u_int32_t ses_key[8]; /* 3DES/AES key */
+ u_int32_t ses_hminner[5]; /* hmac inner state */
+ u_int32_t ses_hmouter[5]; /* hmac outer state */
+ u_int32_t ses_iv[4]; /* [3]DES/AES iv */
+ u_int32_t ses_keysize; /* AES key size */
+ u_int32_t ses_mlen; /* hmac/hash length */
+};
+
+struct ubsec_stats {
+ u_int64_t hst_ibytes;
+ u_int64_t hst_obytes;
+ u_int32_t hst_ipackets;
+ u_int32_t hst_opackets;
+ u_int32_t hst_invalid;
+ u_int32_t hst_nomem;
+ u_int32_t hst_queuefull;
+ u_int32_t hst_dmaerr;
+ u_int32_t hst_mcrerr;
+ u_int32_t hst_nodmafree;
+};
+
+struct ubsec_generic_ctx {
+ u_int32_t pc_key[8]; /* [3]DES/AES key */
+ u_int32_t pc_hminner[5]; /* hmac inner state */
+ u_int32_t pc_hmouter[5]; /* hmac outer state */
+ u_int32_t pc_iv[4]; /* [3]DES/AES iv */
+ u_int16_t pc_flags; /* flags, below */
+ u_int16_t pc_offset; /* crypto offset */
+ u_int16_t pc_type; /* Cryptographic operation */
+};
+
diff --git a/target/linux/generic/files/crypto/ocf/uio.h b/target/linux/generic/files/crypto/ocf/uio.h
new file mode 100644
index 000000000..03a62491f
--- /dev/null
+++ b/target/linux/generic/files/crypto/ocf/uio.h
@@ -0,0 +1,54 @@
+#ifndef _OCF_UIO_H_
+#define _OCF_UIO_H_
+
+#include <linux/uio.h>
+
+/*
+ * The linux uio.h doesn't have all we need. To be fully api compatible
+ * with the BSD cryptodev, we need to keep this around. Perhaps this can
+ * be moved back into the linux/uio.h
+ *
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ * 1. distributions of this source code include the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ *
+ * 2. distributions in binary form include the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other associated materials;
+ *
+ * 3. the copyright holder's name is not used to endorse products
+ * built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ */
+
+struct uio {
+ struct iovec *uio_iov;
+ int uio_iovcnt;
+ off_t uio_offset;
+ int uio_resid;
+#if 0
+ enum uio_seg uio_segflg;
+ enum uio_rw uio_rw;
+ struct thread *uio_td;
+#endif
+};
+
+#endif
diff --git a/target/linux/generic/files/drivers/char/gpio_dev.c b/target/linux/generic/files/drivers/char/gpio_dev.c
new file mode 100644
index 000000000..c74157302
--- /dev/null
+++ b/target/linux/generic/files/drivers/char/gpio_dev.c
@@ -0,0 +1,181 @@
+/*
+ * character device wrapper for generic gpio layer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA02111-1307USA
+ *
+ * Feedback, Bugs... blogic@openwrt.org
+ *
+ * dpg 20100106
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <asm/atomic.h>
+#include <linux/init.h>
+#include <linux/genhd.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio_dev.h>
+#include <linux/fs.h>
+
+#define DRVNAME "gpiodev"
+#define DEVNAME "gpio"
+
+static int dev_major;
+static struct class *gpiodev_class;
+
+
+/* third argument of user space ioctl ('arg' here) contains the <pin> */
+static int
+gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+
+ switch (cmd)
+ {
+ case GPIO_GET:
+ retval = gpio_get_value(arg);
+ break;
+ case GPIO_SET:
+ gpio_set_value(arg, 1);
+ break;
+ case GPIO_CLEAR:
+ gpio_set_value(arg, 0);
+ break;
+ case GPIO_DIR_IN:
+ retval = gpio_direction_input(arg);
+ break;
+ case GPIO_DIR_OUT:
+ retval = gpio_direction_output(arg, 0);
+ break;
+ case GPIO_DIR_HIGH:
+ retval = gpio_direction_output(arg, 1);
+ break;
+ case GPIO_REQUEST:
+ /* should be first ioctl operation on <pin> */
+ retval = gpio_request(arg, DRVNAME);
+ break;
+ case GPIO_FREE:
+ /* should be last ioctl operation on <pin> */
+ /* may be needed first if previous user missed this ioctl */
+ gpio_free(arg);
+ break;
+ case GPIO_CAN_SLEEP:
+ retval = gpio_cansleep(arg);
+ break;
+ default:
+ retval = -EINVAL;
+ /* = -ENOTTY; // correct return but ... */
+ break;
+ }
+ return retval;
+}
+
+/* Allow co-incident opens */
+static int
+gpio_open(struct inode *inode, struct file *file)
+{
+ int result = 0;
+ unsigned int dev_minor = MINOR(inode->i_rdev);
+
+ if (dev_minor != 0)
+ {
+ printk(KERN_ERR DRVNAME ": trying to access unknown minor device -> %d\n", dev_minor);
+ result = -ENODEV;
+ goto out;
+ }
+out:
+ return result;
+}
+
+static int
+gpio_close(struct inode * inode, struct file * file)
+{
+ /* could track all <pin>s requested by this fd and gpio_free()
+ * them here
+ */
+ return 0;
+}
+
+struct file_operations gpio_fops = {
+ unlocked_ioctl: gpio_ioctl,
+ open: gpio_open,
+ release: gpio_close
+};
+
+static int
+gpio_probe(struct platform_device *dev)
+{
+ int result = 0;
+
+ dev_major = register_chrdev(0, DEVNAME, &gpio_fops);
+ if (!dev_major)
+ {
+ printk(KERN_ERR DRVNAME ": Error whilst opening %s \n", DEVNAME);
+ result = -ENODEV;
+ goto out;
+ }
+ gpiodev_class = class_create(THIS_MODULE, DRVNAME);
+ device_create(gpiodev_class, NULL, MKDEV(dev_major, 0), dev, DEVNAME);
+ printk(KERN_INFO DRVNAME ": gpio device registered with major %d\n", dev_major);
+out:
+ return result;
+}
+
+static int
+gpio_remove(struct platform_device *dev)
+{
+ device_destroy(gpiodev_class, MKDEV(dev_major, 0));
+ class_destroy(gpiodev_class);
+ unregister_chrdev(dev_major, DEVNAME);
+ return 0;
+}
+
+static struct
+platform_driver gpio_driver = {
+ .probe = gpio_probe,
+ .remove = gpio_remove,
+ .driver = {
+ .name = "GPIODEV",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init
+gpio_mod_init(void)
+{
+ int ret = platform_driver_register(&gpio_driver);
+ if (ret)
+ printk(KERN_INFO DRVNAME ": Error registering platfom driver!\n");
+
+ return ret;
+}
+
+static void __exit
+gpio_mod_exit(void)
+{
+ platform_driver_unregister(&gpio_driver);
+}
+
+module_init (gpio_mod_init);
+module_exit (gpio_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin / OpenWrt +");
+MODULE_DESCRIPTION("Character device for for generic gpio api");
diff --git a/target/linux/generic/files/drivers/input/misc/gpio_buttons.c b/target/linux/generic/files/drivers/input/misc/gpio_buttons.c
new file mode 100644
index 000000000..51288a3f9
--- /dev/null
+++ b/target/linux/generic/files/drivers/input/misc/gpio_buttons.c
@@ -0,0 +1,232 @@
+/*
+ * Driver for buttons on GPIO lines not capable of generating interrupts
+ *
+ * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com>
+ *
+ * This file was based on: /drivers/input/misc/cobalt_btns.c
+ * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ * also was based on: /drivers/input/keyboard/gpio_keys.c
+ * Copyright 2005 Phil Blundell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/gpio_buttons.h>
+
+#define DRV_NAME "gpio-buttons"
+
+struct gpio_button_data {
+ int last_state;
+ int count;
+ int can_sleep;
+};
+
+struct gpio_buttons_dev {
+ struct input_polled_dev *poll_dev;
+ struct gpio_buttons_platform_data *pdata;
+ struct gpio_button_data *data;
+};
+
+static void gpio_buttons_check_state(struct input_dev *input,
+ struct gpio_button *button,
+ struct gpio_button_data *bdata)
+{
+ int state;
+
+ if (bdata->can_sleep)
+ state = !!gpio_get_value_cansleep(button->gpio);
+ else
+ state = !!gpio_get_value(button->gpio);
+
+ if (state != bdata->last_state) {
+ unsigned int type = button->type ?: EV_KEY;
+
+ input_event(input, type, button->code,
+ !!(state ^ button->active_low));
+ input_sync(input);
+ bdata->count = 0;
+ bdata->last_state = state;
+ }
+}
+
+static void gpio_buttons_poll(struct input_polled_dev *dev)
+{
+ struct gpio_buttons_dev *bdev = dev->private;
+ struct gpio_buttons_platform_data *pdata = bdev->pdata;
+ struct input_dev *input = dev->input;
+ int i;
+
+ for (i = 0; i < bdev->pdata->nbuttons; i++) {
+ struct gpio_button *button = &pdata->buttons[i];
+ struct gpio_button_data *bdata = &bdev->data[i];
+
+ if (bdata->count < button->threshold)
+ bdata->count++;
+ else
+ gpio_buttons_check_state(input, button, bdata);
+
+ }
+}
+
+static int __devinit gpio_buttons_probe(struct platform_device *pdev)
+{
+ struct gpio_buttons_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct gpio_buttons_dev *bdev;
+ struct input_polled_dev *poll_dev;
+ struct input_dev *input;
+ int error;
+ int i;
+
+ if (!pdata)
+ return -ENXIO;
+
+ bdev = kzalloc(sizeof(struct gpio_buttons_dev) +
+ pdata->nbuttons * sizeof(struct gpio_button_data),
+ GFP_KERNEL);
+ if (!bdev) {
+ dev_err(dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ bdev->data = (struct gpio_button_data *) &bdev[1];
+
+ poll_dev = input_allocate_polled_device();
+ if (!poll_dev) {
+ dev_err(dev, "no memory for polled device\n");
+ error = -ENOMEM;
+ goto err_free_bdev;
+ }
+
+ poll_dev->private = bdev;
+ poll_dev->poll = gpio_buttons_poll;
+ poll_dev->poll_interval = pdata->poll_interval;
+
+ input = poll_dev->input;
+
+ input->evbit[0] = BIT(EV_KEY);
+ input->name = pdev->name;
+ input->phys = "gpio-buttons/input0";
+ input->dev.parent = &pdev->dev;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ struct gpio_button *button = &pdata->buttons[i];
+ unsigned int gpio = button->gpio;
+ unsigned int type = button->type ?: EV_KEY;
+
+ error = gpio_request(gpio,
+ button->desc ? button->desc : DRV_NAME);
+ if (error) {
+ dev_err(dev, "unable to claim gpio %u, err=%d\n",
+ gpio, error);
+ goto err_free_gpio;
+ }
+
+ error = gpio_direction_input(gpio);
+ if (error) {
+ dev_err(dev,
+ "unable to set direction on gpio %u, err=%d\n",
+ gpio, error);
+ goto err_free_gpio;
+ }
+
+ bdev->data[i].can_sleep = gpio_cansleep(gpio);
+ bdev->data[i].last_state = -1;
+
+ input_set_capability(input, type, button->code);
+ }
+
+ bdev->poll_dev = poll_dev;
+ bdev->pdata = pdata;
+ platform_set_drvdata(pdev, bdev);
+
+ error = input_register_polled_device(poll_dev);
+ if (error) {
+ dev_err(dev, "unable to register polled device, err=%d\n",
+ error);
+ goto err_free_gpio;
+ }
+
+ /* report initial state of the buttons */
+ for (i = 0; i < pdata->nbuttons; i++)
+ gpio_buttons_check_state(input, &pdata->buttons[i],
+ &bdev->data[i]);
+
+ return 0;
+
+err_free_gpio:
+ for (i = i - 1; i >= 0; i--)
+ gpio_free(pdata->buttons[i].gpio);
+
+ input_free_polled_device(poll_dev);
+
+err_free_bdev:
+ kfree(bdev);
+
+ platform_set_drvdata(pdev, NULL);
+ return error;
+}
+
+static int __devexit gpio_buttons_remove(struct platform_device *pdev)
+{
+ struct gpio_buttons_dev *bdev = platform_get_drvdata(pdev);
+ struct gpio_buttons_platform_data *pdata = bdev->pdata;
+ int i;
+
+ input_unregister_polled_device(bdev->poll_dev);
+
+ for (i = 0; i < pdata->nbuttons; i++)
+ gpio_free(pdata->buttons[i].gpio);
+
+ input_free_polled_device(bdev->poll_dev);
+
+ kfree(bdev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver gpio_buttons_driver = {
+ .probe = gpio_buttons_probe,
+ .remove = __devexit_p(gpio_buttons_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_buttons_init(void)
+{
+ return platform_driver_register(&gpio_buttons_driver);
+}
+
+static void __exit gpio_buttons_exit(void)
+{
+ platform_driver_unregister(&gpio_buttons_driver);
+}
+
+module_init(gpio_buttons_init);
+module_exit(gpio_buttons_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_DESCRIPTION("Polled GPIO Buttons driver");
diff --git a/target/linux/generic/files/drivers/leds/ledtrig-morse.c b/target/linux/generic/files/drivers/leds/ledtrig-morse.c
new file mode 100644
index 000000000..bc58afe4c
--- /dev/null
+++ b/target/linux/generic/files/drivers/leds/ledtrig-morse.c
@@ -0,0 +1,366 @@
+/*
+ * LED Morse Trigger
+ *
+ * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
+ *
+ * This file was based on: drivers/led/ledtrig-timer.c
+ * Copyright 2005-2006 Openedhand Ltd.
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * also based on the patch '[PATCH] 2.5.59 morse code panics' posted
+ * in the LKML by Tomas Szepe at Thu, 30 Jan 2003
+ * Copyright (C) 2002 Andrew Rodland <arodland@noln.com>
+ * Copyright (C) 2003 Tomas Szepe <szepe@pinerecords.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+
+#include "leds.h"
+
+#define MORSE_DELAY_BASE (HZ/2)
+
+#define MORSE_STATE_BLINK_START 0
+#define MORSE_STATE_BLINK_STOP 1
+
+#define MORSE_DIT_LEN 1
+#define MORSE_DAH_LEN 3
+#define MORSE_SPACE_LEN 7
+
+struct morse_trig_data {
+ unsigned long delay;
+ char *msg;
+
+ unsigned char morse;
+ unsigned char state;
+ char *msgpos;
+ struct timer_list timer;
+};
+
+const unsigned char morsetable[] = {
+ 0122, 0, 0310, 0, 0, 0163, /* "#$%&' */
+ 055, 0155, 0, 0, 0163, 0141, 0152, 0051, /* ()*+,-./ */
+ 077, 076, 074, 070, 060, 040, 041, 043, 047, 057, /* 0-9 */
+ 0107, 0125, 0, 0061, 0, 0114, 0, /* :;<=>?@ */
+ 006, 021, 025, 011, 002, 024, 013, 020, 004, /* A-I */
+ 036, 015, 022, 007, 005, 017, 026, 033, 012, /* J-R */
+ 010, 003, 014, 030, 016, 031, 035, 023, /* S-Z */
+ 0, 0, 0, 0, 0154 /* [\]^_ */
+};
+
+static inline unsigned char tomorse(char c) {
+ if (c >= 'a' && c <= 'z')
+ c = c - 'a' + 'A';
+ if (c >= '"' && c <= '_') {
+ return morsetable[c - '"'];
+ } else
+ return 0;
+}
+
+static inline unsigned long dit_len(struct morse_trig_data *morse_data)
+{
+ return MORSE_DIT_LEN*morse_data->delay;
+}
+
+static inline unsigned long dah_len(struct morse_trig_data *morse_data)
+{
+ return MORSE_DAH_LEN*morse_data->delay;
+}
+
+static inline unsigned long space_len(struct morse_trig_data *morse_data)
+{
+ return MORSE_SPACE_LEN*morse_data->delay;
+}
+
+static void morse_timer_function(unsigned long data)
+{
+ struct led_classdev *led_cdev = (struct led_classdev *)data;
+ struct morse_trig_data *morse_data = led_cdev->trigger_data;
+ unsigned long brightness = LED_OFF;
+ unsigned long delay = 0;
+
+ if (!morse_data->msg)
+ goto set_led;
+
+ switch (morse_data->state) {
+ case MORSE_STATE_BLINK_START:
+ /* Starting a new blink. We have a valid code in morse. */
+ delay = (morse_data->morse & 001) ? dah_len(morse_data):
+ dit_len(morse_data);
+ brightness = LED_FULL;
+ morse_data->state = MORSE_STATE_BLINK_STOP;
+ morse_data->morse >>= 1;
+ break;
+ case MORSE_STATE_BLINK_STOP:
+ /* Coming off of a blink. */
+ morse_data->state = MORSE_STATE_BLINK_START;
+
+ if (morse_data->morse > 1) {
+ /* Not done yet, just a one-dit pause. */
+ delay = dit_len(morse_data);
+ break;
+ }
+
+ /* Get a new char, figure out how much space. */
+ /* First time through */
+ if (!morse_data->msgpos)
+ morse_data->msgpos = (char *)morse_data->msg;
+
+ if (!*morse_data->msgpos) {
+ /* Repeating */
+ morse_data->msgpos = (char *)morse_data->msg;
+ delay = space_len(morse_data);
+ } else {
+ /* Inter-letter space */
+ delay = dah_len(morse_data);
+ }
+
+ if (!(morse_data->morse = tomorse(*morse_data->msgpos))) {
+ delay = space_len(morse_data);
+ /* And get us back here */
+ morse_data->state = MORSE_STATE_BLINK_STOP;
+ }
+ morse_data->msgpos++;
+ break;
+ }
+
+ mod_timer(&morse_data->timer, jiffies + msecs_to_jiffies(delay));
+
+set_led:
+ led_set_brightness(led_cdev, brightness);
+}
+
+static ssize_t _morse_delay_show(struct led_classdev *led_cdev, char *buf)
+{
+ struct morse_trig_data *morse_data = led_cdev->trigger_data;
+
+ sprintf(buf, "%lu\n", morse_data->delay);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t _morse_delay_store(struct led_classdev *led_cdev,
+ const char *buf, size_t size)
+{
+ struct morse_trig_data *morse_data = led_cdev->trigger_data;
+ char *after;
+ unsigned long state = simple_strtoul(buf, &after, 10);
+ size_t count = after - buf;
+ int ret = -EINVAL;
+
+ if (*after && isspace(*after))
+ count++;
+
+ if (count == size) {
+ morse_data->delay = state;
+ mod_timer(&morse_data->timer, jiffies + 1);
+ ret = count;
+ }
+
+ return ret;
+}
+
+static ssize_t _morse_msg_show(struct led_classdev *led_cdev, char *buf)
+{
+ struct morse_trig_data *morse_data = led_cdev->trigger_data;
+
+ if (!morse_data->msg)
+ sprintf(buf, "<none>\n");
+ else
+ sprintf(buf, "%s\n", morse_data->msg);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t _morse_msg_store(struct led_classdev *led_cdev,
+ const char *buf, size_t size)
+{
+ struct morse_trig_data *morse_data = led_cdev->trigger_data;
+ char *m;
+
+ m = kmalloc(size, GFP_KERNEL);
+ if (!m)
+ return -ENOMEM;
+
+ memcpy(m,buf,size);
+ m[size]='\0';
+
+ if (morse_data->msg)
+ kfree(morse_data->msg);
+
+ morse_data->msg = m;
+ morse_data->msgpos = NULL;
+ morse_data->state = MORSE_STATE_BLINK_STOP;
+
+ mod_timer(&morse_data->timer, jiffies + 1);
+
+ return size;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+static ssize_t morse_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return _morse_delay_show(led_cdev, buf);
+}
+
+static ssize_t morse_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return _morse_delay_store(led_cdev, buf, size);
+}
+
+static ssize_t morse_msg_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return _morse_msg_show(led_cdev, buf);
+}
+
+static ssize_t morse_msg_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return _morse_msg_store(led_cdev, buf, size);
+}
+
+static DEVICE_ATTR(delay, 0644, morse_delay_show, morse_delay_store);
+static DEVICE_ATTR(message, 0644, morse_msg_show, morse_msg_store);
+
+#define led_device_create_file(leddev, attr) \
+ device_create_file(leddev->dev, &dev_attr_ ## attr)
+#define led_device_remove_file(leddev, attr) \
+ device_remove_file(leddev->dev, &dev_attr_ ## attr)
+
+#else
+static ssize_t morse_delay_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+
+ return _morse_delay_show(led_cdev, buf);
+}
+
+static ssize_t morse_delay_store(struct class_device *dev, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+
+ return _morse_delay_store(led_cdev, buf, size);
+}
+
+static ssize_t morse_msg_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+
+ return _morse_msg_show(led_cdev, buf);
+}
+
+static ssize_t morse_msg_store(struct class_device *dev, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+
+ return _morse_msg_store(led_cdev, buf, size);
+}
+
+static CLASS_DEVICE_ATTR(delay, 0644, morse_delay_show, morse_delay_store);
+static CLASS_DEVICE_ATTR(message, 0644, morse_msg_show, morse_msg_store);
+
+#define led_device_create_file(leddev, attr) \
+ class_device_create_file(leddev->class_dev, &class_device_attr_ ## attr)
+#define led_device_remove_file(leddev, attr) \
+ class_device_remove_file(leddev->class_dev, &class_device_attr_ ## attr)
+
+#endif
+
+static void morse_trig_activate(struct led_classdev *led_cdev)
+{
+ struct morse_trig_data *morse_data;
+ int rc;
+
+ morse_data = kzalloc(sizeof(*morse_data), GFP_KERNEL);
+ if (!morse_data)
+ return;
+
+ morse_data->delay = MORSE_DELAY_BASE;
+ init_timer(&morse_data->timer);
+ morse_data->timer.function = morse_timer_function;
+ morse_data->timer.data = (unsigned long)led_cdev;
+
+ rc = led_device_create_file(led_cdev, delay);
+ if (rc) goto err;
+
+ rc = led_device_create_file(led_cdev, message);
+ if (rc) goto err_delay;
+
+ led_cdev->trigger_data = morse_data;
+
+ return;
+
+err_delay:
+ led_device_remove_file(led_cdev, delay);
+err:
+ kfree(morse_data);
+}
+
+static void morse_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct morse_trig_data *morse_data = led_cdev->trigger_data;
+
+ if (!morse_data)
+ return;
+
+ led_device_remove_file(led_cdev, message);
+ led_device_remove_file(led_cdev, delay);
+
+ del_timer_sync(&morse_data->timer);
+ if (morse_data->msg)
+ kfree(morse_data->msg);
+
+ kfree(morse_data);
+}
+
+static struct led_trigger morse_led_trigger = {
+ .name = "morse",
+ .activate = morse_trig_activate,
+ .deactivate = morse_trig_deactivate,
+};
+
+static int __init morse_trig_init(void)
+{
+ return led_trigger_register(&morse_led_trigger);
+}
+
+static void __exit morse_trig_exit(void)
+{
+ led_trigger_unregister(&morse_led_trigger);
+}
+
+module_init(morse_trig_init);
+module_exit(morse_trig_exit);
+
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_DESCRIPTION("Morse LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/files/drivers/leds/ledtrig-netdev.c b/target/linux/generic/files/drivers/leds/ledtrig-netdev.c
new file mode 100644
index 000000000..6c56acb27
--- /dev/null
+++ b/target/linux/generic/files/drivers/leds/ledtrig-netdev.c
@@ -0,0 +1,451 @@
+/*
+ * LED Kernel Netdev Trigger
+ *
+ * Toggles the LED to reflect the link and traffic state of a named net device
+ *
+ * Copyright 2007 Oliver Jowett <oliver@opencloud.com>
+ *
+ * Derived from ledtrig-timer.c which is:
+ * Copyright 2005-2006 Openedhand Ltd.
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/leds.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <net/net_namespace.h>
+#endif
+
+#include "leds.h"
+
+/*
+ * Configurable sysfs attributes:
+ *
+ * device_name - network device name to monitor
+ *
+ * interval - duration of LED blink, in milliseconds
+ *
+ * mode - either "none" (LED is off) or a space separated list of one or more of:
+ * link: LED's normal state reflects whether the link is up (has carrier) or not
+ * tx: LED blinks on transmitted data
+ * rx: LED blinks on receive data
+ *
+ * Some suggestions:
+ *
+ * Simple link status LED:
+ * $ echo netdev >someled/trigger
+ * $ echo eth0 >someled/device_name
+ * $ echo link >someled/mode
+ *
+ * Ethernet-style link/activity LED:
+ * $ echo netdev >someled/trigger
+ * $ echo eth0 >someled/device_name
+ * $ echo "link tx rx" >someled/mode
+ *
+ * Modem-style tx/rx LEDs:
+ * $ echo netdev >led1/trigger
+ * $ echo ppp0 >led1/device_name
+ * $ echo tx >led1/mode
+ * $ echo netdev >led2/trigger
+ * $ echo ppp0 >led2/device_name
+ * $ echo rx >led2/mode
+ *
+ */
+
+#define MODE_LINK 1
+#define MODE_TX 2
+#define MODE_RX 4
+
+struct led_netdev_data {
+ rwlock_t lock;
+
+ struct timer_list timer;
+ struct notifier_block notifier;
+
+ struct led_classdev *led_cdev;
+ struct net_device *net_dev;
+
+ char device_name[IFNAMSIZ];
+ unsigned interval;
+ unsigned mode;
+ unsigned link_up;
+ unsigned last_activity;
+};
+
+static void set_baseline_state(struct led_netdev_data *trigger_data)
+{
+ if ((trigger_data->mode & MODE_LINK) != 0 && trigger_data->link_up)
+ led_set_brightness(trigger_data->led_cdev, LED_FULL);
+ else
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+
+ if ((trigger_data->mode & (MODE_TX | MODE_RX)) != 0 && trigger_data->link_up)
+ mod_timer(&trigger_data->timer, jiffies + trigger_data->interval);
+ else
+ del_timer(&trigger_data->timer);
+}
+
+static ssize_t led_device_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ read_lock(&trigger_data->lock);
+ sprintf(buf, "%s\n", trigger_data->device_name);
+ read_unlock(&trigger_data->lock);
+
+ return strlen(buf) + 1;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+extern struct net init_net;
+#endif
+
+static ssize_t led_device_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (size < 0 || size >= IFNAMSIZ)
+ return -EINVAL;
+
+ write_lock(&trigger_data->lock);
+
+ strcpy(trigger_data->device_name, buf);
+ if (size > 0 && trigger_data->device_name[size-1] == '\n')
+ trigger_data->device_name[size-1] = 0;
+
+ if (trigger_data->device_name[0] != 0) {
+ /* check for existing device to update from */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ trigger_data->net_dev = dev_get_by_name(&init_net, trigger_data->device_name);
+#else
+ trigger_data->net_dev = dev_get_by_name(trigger_data->device_name);
+#endif
+ if (trigger_data->net_dev != NULL)
+ trigger_data->link_up = (dev_get_flags(trigger_data->net_dev) & IFF_LOWER_UP) != 0;
+ set_baseline_state(trigger_data); /* updates LEDs, may start timers */
+ }
+
+ write_unlock(&trigger_data->lock);
+ return size;
+}
+
+static DEVICE_ATTR(device_name, 0644, led_device_name_show, led_device_name_store);
+
+static ssize_t led_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ read_lock(&trigger_data->lock);
+
+ if (trigger_data->mode == 0) {
+ strcpy(buf, "none\n");
+ } else {
+ if (trigger_data->mode & MODE_LINK)
+ strcat(buf, "link ");
+ if (trigger_data->mode & MODE_TX)
+ strcat(buf, "tx ");
+ if (trigger_data->mode & MODE_RX)
+ strcat(buf, "rx ");
+ strcat(buf, "\n");
+ }
+
+ read_unlock(&trigger_data->lock);
+
+ return strlen(buf)+1;
+}
+
+static ssize_t led_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ char copybuf[128];
+ int new_mode = -1;
+ char *p, *token;
+
+ /* take a copy since we don't want to trash the inbound buffer when using strsep */
+ strncpy(copybuf, buf, sizeof(copybuf));
+ copybuf[sizeof(copybuf) - 1] = 0;
+ p = copybuf;
+
+ while ((token = strsep(&p, " \t\n")) != NULL) {
+ if (!*token)
+ continue;
+
+ if (new_mode == -1)
+ new_mode = 0;
+
+ if (!strcmp(token, "none"))
+ new_mode = 0;
+ else if (!strcmp(token, "tx"))
+ new_mode |= MODE_TX;
+ else if (!strcmp(token, "rx"))
+ new_mode |= MODE_RX;
+ else if (!strcmp(token, "link"))
+ new_mode |= MODE_LINK;
+ else
+ return -EINVAL;
+ }
+
+ if (new_mode == -1)
+ return -EINVAL;
+
+ write_lock(&trigger_data->lock);
+ trigger_data->mode = new_mode;
+ set_baseline_state(trigger_data);
+ write_unlock(&trigger_data->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR(mode, 0644, led_mode_show, led_mode_store);
+
+static ssize_t led_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ read_lock(&trigger_data->lock);
+ sprintf(buf, "%u\n", jiffies_to_msecs(trigger_data->interval));
+ read_unlock(&trigger_data->lock);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t led_interval_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ int ret = -EINVAL;
+ char *after;
+ unsigned long value = simple_strtoul(buf, &after, 10);
+ size_t count = after - buf;
+
+ if (*after && isspace(*after))
+ count++;
+
+ /* impose some basic bounds on the timer interval */
+ if (count == size && value >= 5 && value <= 10000) {
+ write_lock(&trigger_data->lock);
+ trigger_data->interval = msecs_to_jiffies(value);
+ set_baseline_state(trigger_data); // resets timer
+ write_unlock(&trigger_data->lock);
+ ret = count;
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR(interval, 0644, led_interval_show, led_interval_store);
+
+static int netdev_trig_notify(struct notifier_block *nb,
+ unsigned long evt,
+ void *dv)
+{
+ struct net_device *dev = dv;
+ struct led_netdev_data *trigger_data = container_of(nb, struct led_netdev_data, notifier);
+
+ if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
+ return NOTIFY_DONE;
+
+ write_lock(&trigger_data->lock);
+
+ if (strcmp(dev->name, trigger_data->device_name))
+ goto done;
+
+ if (evt == NETDEV_REGISTER) {
+ if (trigger_data->net_dev != NULL)
+ dev_put(trigger_data->net_dev);
+ dev_hold(dev);
+ trigger_data->net_dev = dev;
+ trigger_data->link_up = 0;
+ goto done;
+ }
+
+ if (evt == NETDEV_UNREGISTER && trigger_data->net_dev != NULL) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ goto done;
+ }
+
+ /* UP / DOWN / CHANGE */
+
+ trigger_data->link_up = (evt != NETDEV_DOWN && netif_carrier_ok(dev));
+ set_baseline_state(trigger_data);
+
+done:
+ write_unlock(&trigger_data->lock);
+ return NOTIFY_DONE;
+}
+
+/* here's the real work! */
+static void netdev_trig_timer(unsigned long arg)
+{
+ struct led_netdev_data *trigger_data = (struct led_netdev_data *)arg;
+ const struct net_device_stats *dev_stats;
+ unsigned new_activity;
+
+ write_lock(&trigger_data->lock);
+
+ if (!trigger_data->link_up || !trigger_data->net_dev || (trigger_data->mode & (MODE_TX | MODE_RX)) == 0) {
+ /* we don't need to do timer work, just reflect link state. */
+ led_set_brightness(trigger_data->led_cdev, ((trigger_data->mode & MODE_LINK) != 0 && trigger_data->link_up) ? LED_FULL : LED_OFF);
+ goto no_restart;
+ }
+
+ dev_stats = dev_get_stats(trigger_data->net_dev);
+ new_activity =
+ ((trigger_data->mode & MODE_TX) ? dev_stats->tx_packets : 0) +
+ ((trigger_data->mode & MODE_RX) ? dev_stats->rx_packets : 0);
+
+ if (trigger_data->mode & MODE_LINK) {
+ /* base state is ON (link present) */
+ /* if there's no link, we don't get this far and the LED is off */
+
+ /* OFF -> ON always */
+ /* ON -> OFF on activity */
+ if (trigger_data->led_cdev->brightness == LED_OFF) {
+ led_set_brightness(trigger_data->led_cdev, LED_FULL);
+ } else if (trigger_data->last_activity != new_activity) {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ }
+ } else {
+ /* base state is OFF */
+ /* ON -> OFF always */
+ /* OFF -> ON on activity */
+ if (trigger_data->led_cdev->brightness == LED_FULL) {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ } else if (trigger_data->last_activity != new_activity) {
+ led_set_brightness(trigger_data->led_cdev, LED_FULL);
+ }
+ }
+
+ trigger_data->last_activity = new_activity;
+ mod_timer(&trigger_data->timer, jiffies + trigger_data->interval);
+
+no_restart:
+ write_unlock(&trigger_data->lock);
+}
+
+static void netdev_trig_activate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data;
+ int rc;
+
+ trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
+ if (!trigger_data)
+ return;
+
+ rwlock_init(&trigger_data->lock);
+
+ trigger_data->notifier.notifier_call = netdev_trig_notify;
+ trigger_data->notifier.priority = 10;
+
+ setup_timer(&trigger_data->timer, netdev_trig_timer, (unsigned long) trigger_data);
+
+ trigger_data->led_cdev = led_cdev;
+ trigger_data->net_dev = NULL;
+ trigger_data->device_name[0] = 0;
+
+ trigger_data->mode = 0;
+ trigger_data->interval = msecs_to_jiffies(50);
+ trigger_data->link_up = 0;
+ trigger_data->last_activity = 0;
+
+ led_cdev->trigger_data = trigger_data;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
+ if (rc)
+ goto err_out;
+ rc = device_create_file(led_cdev->dev, &dev_attr_mode);
+ if (rc)
+ goto err_out_device_name;
+ rc = device_create_file(led_cdev->dev, &dev_attr_interval);
+ if (rc)
+ goto err_out_mode;
+
+ register_netdevice_notifier(&trigger_data->notifier);
+ return;
+
+err_out_mode:
+ device_remove_file(led_cdev->dev, &dev_attr_mode);
+err_out_device_name:
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+err_out:
+ led_cdev->trigger_data = NULL;
+ kfree(trigger_data);
+}
+
+static void netdev_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (trigger_data) {
+ unregister_netdevice_notifier(&trigger_data->notifier);
+
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+ device_remove_file(led_cdev->dev, &dev_attr_mode);
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+
+ write_lock(&trigger_data->lock);
+
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+
+ write_unlock(&trigger_data->lock);
+
+ del_timer_sync(&trigger_data->timer);
+
+ kfree(trigger_data);
+ }
+}
+
+static struct led_trigger netdev_led_trigger = {
+ .name = "netdev",
+ .activate = netdev_trig_activate,
+ .deactivate = netdev_trig_deactivate,
+};
+
+static int __init netdev_trig_init(void)
+{
+ return led_trigger_register(&netdev_led_trigger);
+}
+
+static void __exit netdev_trig_exit(void)
+{
+ led_trigger_unregister(&netdev_led_trigger);
+}
+
+module_init(netdev_trig_init);
+module_exit(netdev_trig_exit);
+
+MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
+MODULE_DESCRIPTION("Netdev LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/files/drivers/leds/ledtrig-usbdev.c b/target/linux/generic/files/drivers/leds/ledtrig-usbdev.c
new file mode 100644
index 000000000..70b0e392a
--- /dev/null
+++ b/target/linux/generic/files/drivers/leds/ledtrig-usbdev.c
@@ -0,0 +1,348 @@
+/*
+ * LED USB device Trigger
+ *
+ * Toggles the LED to reflect the presence and activity of an USB device
+ * Copyright (C) Gabor Juhos <juhosg@openwrt.org>
+ *
+ * derived from ledtrig-netdev.c:
+ * Copyright 2007 Oliver Jowett <oliver@opencloud.com>
+ *
+ * ledtrig-netdev.c derived from ledtrig-timer.c:
+ * Copyright 2005-2006 Openedhand Ltd.
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/usb.h>
+
+#include "leds.h"
+
+#define DEV_BUS_ID_SIZE 32
+
+/*
+ * Configurable sysfs attributes:
+ *
+ * device_name - name of the USB device to monitor
+ * activity_interval - duration of LED blink, in milliseconds
+ */
+
+struct usbdev_trig_data {
+ rwlock_t lock;
+
+ struct timer_list timer;
+ struct notifier_block notifier;
+
+ struct led_classdev *led_cdev;
+ struct usb_device *usb_dev;
+
+ char device_name[DEV_BUS_ID_SIZE];
+ unsigned interval;
+ int last_urbnum;
+};
+
+static void usbdev_trig_update_state(struct usbdev_trig_data *td)
+{
+ if (td->usb_dev)
+ led_set_brightness(td->led_cdev, LED_FULL);
+ else
+ led_set_brightness(td->led_cdev, LED_OFF);
+
+ if (td->interval && td->usb_dev)
+ mod_timer(&td->timer, jiffies + td->interval);
+ else
+ del_timer(&td->timer);
+}
+
+static ssize_t usbdev_trig_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct usbdev_trig_data *td = led_cdev->trigger_data;
+
+ read_lock(&td->lock);
+ sprintf(buf, "%s\n", td->device_name);
+ read_unlock(&td->lock);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t usbdev_trig_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct usbdev_trig_data *td = led_cdev->trigger_data;
+
+ if (size < 0 || size >= DEV_BUS_ID_SIZE)
+ return -EINVAL;
+
+ write_lock(&td->lock);
+
+ strcpy(td->device_name, buf);
+ if (size > 0 && td->device_name[size - 1] == '\n')
+ td->device_name[size - 1] = 0;
+
+ if (td->device_name[0] != 0) {
+ struct usb_device *usb_dev;
+
+ /* check for existing device to update from */
+ usb_dev = usb_find_device_by_name(td->device_name);
+ if (usb_dev) {
+ if (td->usb_dev)
+ usb_put_dev(td->usb_dev);
+
+ td->usb_dev = usb_dev;
+ td->last_urbnum = atomic_read(&usb_dev->urbnum);
+ }
+
+ /* updates LEDs, may start timers */
+ usbdev_trig_update_state(td);
+ }
+
+ write_unlock(&td->lock);
+ return size;
+}
+
+static DEVICE_ATTR(device_name, 0644, usbdev_trig_name_show,
+ usbdev_trig_name_store);
+
+static ssize_t usbdev_trig_interval_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct usbdev_trig_data *td = led_cdev->trigger_data;
+
+ read_lock(&td->lock);
+ sprintf(buf, "%u\n", jiffies_to_msecs(td->interval));
+ read_unlock(&td->lock);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t usbdev_trig_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct usbdev_trig_data *td = led_cdev->trigger_data;
+ int ret = -EINVAL;
+ char *after;
+ unsigned long value = simple_strtoul(buf, &after, 10);
+ size_t count = after - buf;
+
+ if (*after && isspace(*after))
+ count++;
+
+ if (count == size && value <= 10000) {
+ write_lock(&td->lock);
+ td->interval = msecs_to_jiffies(value);
+ usbdev_trig_update_state(td); /* resets timer */
+ write_unlock(&td->lock);
+ ret = count;
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR(activity_interval, 0644, usbdev_trig_interval_show,
+ usbdev_trig_interval_store);
+
+static int usbdev_trig_notify(struct notifier_block *nb,
+ unsigned long evt,
+ void *data)
+{
+ struct usb_device *usb_dev;
+ struct usbdev_trig_data *td;
+
+ if (evt != USB_DEVICE_ADD && evt != USB_DEVICE_REMOVE)
+ return NOTIFY_DONE;
+
+ usb_dev = data;
+ td = container_of(nb, struct usbdev_trig_data, notifier);
+
+ write_lock(&td->lock);
+
+ if (strcmp(dev_name(&usb_dev->dev), td->device_name))
+ goto done;
+
+ if (evt == USB_DEVICE_ADD) {
+ usb_get_dev(usb_dev);
+ if (td->usb_dev != NULL)
+ usb_put_dev(td->usb_dev);
+ td->usb_dev = usb_dev;
+ td->last_urbnum = atomic_read(&usb_dev->urbnum);
+ } else if (evt == USB_DEVICE_REMOVE) {
+ if (td->usb_dev != NULL) {
+ usb_put_dev(td->usb_dev);
+ td->usb_dev = NULL;
+ }
+ }
+
+ usbdev_trig_update_state(td);
+
+done:
+ write_unlock(&td->lock);
+ return NOTIFY_DONE;
+}
+
+/* here's the real work! */
+static void usbdev_trig_timer(unsigned long arg)
+{
+ struct usbdev_trig_data *td = (struct usbdev_trig_data *)arg;
+ int new_urbnum;
+
+ write_lock(&td->lock);
+
+ if (!td->usb_dev || td->interval == 0) {
+ /*
+ * we don't need to do timer work, just reflect device presence
+ */
+ if (td->usb_dev)
+ led_set_brightness(td->led_cdev, LED_FULL);
+ else
+ led_set_brightness(td->led_cdev, LED_OFF);
+
+ goto no_restart;
+ }
+
+ if (td->interval)
+ new_urbnum = atomic_read(&td->usb_dev->urbnum);
+ else
+ new_urbnum = 0;
+
+ if (td->usb_dev) {
+ /*
+ * Base state is ON (device is present). If there's no device,
+ * we don't get this far and the LED is off.
+ * OFF -> ON always
+ * ON -> OFF on activity
+ */
+ if (td->led_cdev->brightness == LED_OFF)
+ led_set_brightness(td->led_cdev, LED_FULL);
+ else if (td->last_urbnum != new_urbnum)
+ led_set_brightness(td->led_cdev, LED_OFF);
+ } else {
+ /*
+ * base state is OFF
+ * ON -> OFF always
+ * OFF -> ON on activity
+ */
+ if (td->led_cdev->brightness == LED_FULL)
+ led_set_brightness(td->led_cdev, LED_OFF);
+ else if (td->last_urbnum != new_urbnum)
+ led_set_brightness(td->led_cdev, LED_FULL);
+ }
+
+ td->last_urbnum = new_urbnum;
+ mod_timer(&td->timer, jiffies + td->interval);
+
+no_restart:
+ write_unlock(&td->lock);
+}
+
+static void usbdev_trig_activate(struct led_classdev *led_cdev)
+{
+ struct usbdev_trig_data *td;
+ int rc;
+
+ td = kzalloc(sizeof(struct usbdev_trig_data), GFP_KERNEL);
+ if (!td)
+ return;
+
+ rwlock_init(&td->lock);
+
+ td->notifier.notifier_call = usbdev_trig_notify;
+ td->notifier.priority = 10;
+
+ setup_timer(&td->timer, usbdev_trig_timer, (unsigned long) td);
+
+ td->led_cdev = led_cdev;
+ td->interval = msecs_to_jiffies(50);
+
+ led_cdev->trigger_data = td;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
+ if (rc)
+ goto err_out;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_activity_interval);
+ if (rc)
+ goto err_out_device_name;
+
+ usb_register_notify(&td->notifier);
+ return;
+
+err_out_device_name:
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+err_out:
+ led_cdev->trigger_data = NULL;
+ kfree(td);
+}
+
+static void usbdev_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct usbdev_trig_data *td = led_cdev->trigger_data;
+
+ if (td) {
+ usb_unregister_notify(&td->notifier);
+
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+ device_remove_file(led_cdev->dev, &dev_attr_activity_interval);
+
+ write_lock(&td->lock);
+
+ if (td->usb_dev) {
+ usb_put_dev(td->usb_dev);
+ td->usb_dev = NULL;
+ }
+
+ write_unlock(&td->lock);
+
+ del_timer_sync(&td->timer);
+
+ kfree(td);
+ }
+}
+
+static struct led_trigger usbdev_led_trigger = {
+ .name = "usbdev",
+ .activate = usbdev_trig_activate,
+ .deactivate = usbdev_trig_deactivate,
+};
+
+static int __init usbdev_trig_init(void)
+{
+ return led_trigger_register(&usbdev_led_trigger);
+}
+
+static void __exit usbdev_trig_exit(void)
+{
+ led_trigger_unregister(&usbdev_led_trigger);
+}
+
+module_init(usbdev_trig_init);
+module_exit(usbdev_trig_exit);
+
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_DESCRIPTION("USB device LED trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/generic/files/drivers/mtd/myloader.c b/target/linux/generic/files/drivers/mtd/myloader.c
new file mode 100644
index 000000000..a13752dd9
--- /dev/null
+++ b/target/linux/generic/files/drivers/mtd/myloader.c
@@ -0,0 +1,186 @@
+/*
+ * Parse MyLoader-style flash partition tables and produce a Linux partition
+ * array to match.
+ *
+ * Copyright (C) 2007-2009 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was based on drivers/mtd/redboot.c
+ * Author: Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/byteorder/generic.h>
+#include <linux/myloader.h>
+
+#define BLOCK_LEN_MIN 0x10000
+#define PART_NAME_LEN 32
+
+struct part_data {
+ struct mylo_partition_table tab;
+ char names[MYLO_MAX_PARTITIONS][PART_NAME_LEN];
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static int myloader_parse_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+#else
+static int myloader_parse_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ unsigned long origin)
+#endif
+{
+ struct part_data *buf;
+ struct mylo_partition_table *tab;
+ struct mylo_partition *part;
+ struct mtd_partition *mtd_parts;
+ struct mtd_partition *mtd_part;
+ int num_parts;
+ int ret, i;
+ size_t retlen;
+ char *names;
+ unsigned long offset;
+ unsigned long blocklen;
+
+ buf = vmalloc(sizeof(*buf));
+ if (!buf) {
+ return -ENOMEM;
+ goto out;
+ }
+ tab = &buf->tab;
+
+ blocklen = master->erasesize;
+ if (blocklen < BLOCK_LEN_MIN)
+ blocklen = BLOCK_LEN_MIN;
+
+ offset = blocklen;
+
+ /* Find the partition table */
+ for (i = 0; i < 4; i++, offset += blocklen) {
+ printk(KERN_DEBUG "%s: searching for MyLoader partition table"
+ " at offset 0x%lx\n", master->name, offset);
+
+ ret = master->read(master, offset, sizeof(*buf), &retlen,
+ (void *)buf);
+ if (ret)
+ goto out_free_buf;
+
+ if (retlen != sizeof(*buf)) {
+ ret = -EIO;
+ goto out_free_buf;
+ }
+
+ /* Check for Partition Table magic number */
+ if (tab->magic == le32_to_cpu(MYLO_MAGIC_PARTITIONS))
+ break;
+
+ }
+
+ if (tab->magic != le32_to_cpu(MYLO_MAGIC_PARTITIONS)) {
+ printk(KERN_DEBUG "%s: no MyLoader partition table found\n",
+ master->name);
+ ret = 0;
+ goto out_free_buf;
+ }
+
+ /* The MyLoader and the Partition Table is always present */
+ num_parts = 2;
+
+ /* Detect number of used partitions */
+ for (i = 0; i < MYLO_MAX_PARTITIONS; i++) {
+ part = &tab->partitions[i];
+
+ if (le16_to_cpu(part->type) == PARTITION_TYPE_FREE)
+ continue;
+
+ num_parts++;
+ }
+
+ mtd_parts = kzalloc((num_parts * sizeof(*mtd_part) +
+ num_parts * PART_NAME_LEN), GFP_KERNEL);
+
+ if (!mtd_parts) {
+ ret = -ENOMEM;
+ goto out_free_buf;
+ }
+
+ mtd_part = mtd_parts;
+ names = (char *)&mtd_parts[num_parts];
+
+ strncpy(names, "myloader", PART_NAME_LEN);
+ mtd_part->name = names;
+ mtd_part->offset = 0;
+ mtd_part->size = offset;
+ mtd_part->mask_flags = MTD_WRITEABLE;
+ mtd_part++;
+ names += PART_NAME_LEN;
+
+ strncpy(names, "partition_table", PART_NAME_LEN);
+ mtd_part->name = names;
+ mtd_part->offset = offset;
+ mtd_part->size = blocklen;
+ mtd_part->mask_flags = MTD_WRITEABLE;
+ mtd_part++;
+ names += PART_NAME_LEN;
+
+ for (i = 0; i < MYLO_MAX_PARTITIONS; i++) {
+ part = &tab->partitions[i];
+
+ if (le16_to_cpu(part->type) == PARTITION_TYPE_FREE)
+ continue;
+
+ if ((buf->names[i][0]) && (buf->names[i][0] != '\xff'))
+ strncpy(names, buf->names[i], PART_NAME_LEN);
+ else
+ snprintf(names, PART_NAME_LEN, "partition%d", i);
+
+ mtd_part->offset = le32_to_cpu(part->addr);
+ mtd_part->size = le32_to_cpu(part->size);
+ mtd_part->name = names;
+ mtd_part++;
+ names += PART_NAME_LEN;
+ }
+
+ *pparts = mtd_parts;
+ ret = num_parts;
+
+ out_free_buf:
+ vfree(buf);
+ out:
+ return ret;
+}
+
+static struct mtd_part_parser myloader_mtd_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = myloader_parse_partitions,
+ .name = "MyLoader",
+};
+
+static int __init myloader_mtd_parser_init(void)
+{
+ return register_mtd_parser(&myloader_mtd_parser);
+}
+
+static void __exit myloader_mtd_parser_exit(void)
+{
+ deregister_mtd_parser(&myloader_mtd_parser);
+}
+
+module_init(myloader_mtd_parser_init);
+module_exit(myloader_mtd_parser_exit);
+
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_DESCRIPTION("Parsing code for MyLoader partition tables");
+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/generic/files/drivers/net/phy/adm6996.c b/target/linux/generic/files/drivers/net/phy/adm6996.c
new file mode 100644
index 000000000..347c12970
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/adm6996.c
@@ -0,0 +1,737 @@
+/*
+ * ADM6996 switch driver
+ *
+ * swconfig interface based on ar8216.c
+ *
+ * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org>
+ * VLAN support Copyright (c) 2010, 2011 Peter Lebbing <peter@digitalbrains.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation
+ */
+
+/*#define DEBUG 1*/
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/switch.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include "adm6996.h"
+
+MODULE_DESCRIPTION("Infineon ADM6996 Switch");
+MODULE_AUTHOR("Felix Fietkau, Peter Lebbing <peter@digitalbrains.com>");
+MODULE_LICENSE("GPL");
+
+enum adm6996_model {
+ ADM6996FC,
+ ADM6996M
+};
+
+static const char * const adm6996_model_name[] =
+{
+ "ADM6996FC",
+ "ADM6996M"
+};
+
+struct adm6996_priv {
+ struct switch_dev dev;
+ struct phy_device *phydev;
+
+ enum adm6996_model model;
+
+ bool enable_vlan;
+ bool vlan_enabled; /* Current hardware state */
+
+#ifdef DEBUG
+ u16 addr; /* Debugging: register address to operate on */
+#endif
+
+ u16 pvid[ADM_NUM_PORTS]; /* Primary VLAN ID */
+
+ u16 vlan_id[ADM_NUM_VLANS];
+ u8 vlan_table[ADM_NUM_VLANS]; /* bitmap, 1 = port is member */
+ u8 vlan_tagged[ADM_NUM_VLANS]; /* bitmap, 1 = tagged member */
+
+ struct mutex reg_mutex;
+
+ /* use abstraction for regops, we want to add gpio support in the future */
+ u16 (*read)(struct phy_device *phydev, enum admreg reg);
+ void (*write)(struct phy_device *phydev, enum admreg reg, u16 val);
+};
+
+#define to_adm(_dev) container_of(_dev, struct adm6996_priv, dev)
+#define phy_to_adm(_phy) ((struct adm6996_priv *) (_phy)->priv)
+
+static inline u16
+r16(struct phy_device *pdev, enum admreg reg)
+{
+ return phy_to_adm(pdev)->read(pdev, reg);
+}
+
+static inline void
+w16(struct phy_device *pdev, enum admreg reg, u16 val)
+{
+ phy_to_adm(pdev)->write(pdev, reg, val);
+}
+
+
+static u16
+adm6996_read_mii_reg(struct phy_device *phydev, enum admreg reg)
+{
+ return phydev->bus->read(phydev->bus, PHYADDR(reg));
+}
+
+static void
+adm6996_write_mii_reg(struct phy_device *phydev, enum admreg reg, u16 val)
+{
+ phydev->bus->write(phydev->bus, PHYADDR(reg), val);
+}
+
+static int
+adm6996_set_enable_vlan(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ if (val->value.i > 1)
+ return -EINVAL;
+
+ priv->enable_vlan = val->value.i;
+
+ return 0;
+};
+
+static int
+adm6996_get_enable_vlan(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ val->value.i = priv->enable_vlan;
+
+ return 0;
+};
+
+#ifdef DEBUG
+
+static int
+adm6996_set_addr(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ if (val->value.i > 1023)
+ return -EINVAL;
+
+ priv->addr = val->value.i;
+
+ return 0;
+};
+
+static int
+adm6996_get_addr(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ val->value.i = priv->addr;
+
+ return 0;
+};
+
+static int
+adm6996_set_data(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ if (val->value.i > 65535)
+ return -EINVAL;
+
+ w16(priv->phydev, priv->addr, val->value.i);
+
+ return 0;
+};
+
+static int
+adm6996_get_data(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ val->value.i = r16(priv->phydev, priv->addr);
+
+ return 0;
+};
+
+#endif /* def DEBUG */
+
+static int
+adm6996_set_pvid(struct switch_dev *dev, int port, int vlan)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ dev_dbg (&priv->phydev->dev, "set_pvid port %d vlan %d\n", port
+ , vlan);
+
+ if (vlan > ADM_VLAN_MAX_ID)
+ return -EINVAL;
+
+ priv->pvid[port] = vlan;
+
+ return 0;
+}
+
+static int
+adm6996_get_pvid(struct switch_dev *dev, int port, int *vlan)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ dev_dbg (&priv->phydev->dev, "get_pvid port %d\n", port);
+ *vlan = priv->pvid[port];
+
+ return 0;
+}
+
+static int
+adm6996_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ dev_dbg (&priv->phydev->dev, "set_vid port %d vid %d\n", val->port_vlan,
+ val->value.i);
+
+ if (val->value.i > ADM_VLAN_MAX_ID)
+ return -EINVAL;
+
+ priv->vlan_id[val->port_vlan] = val->value.i;
+
+ return 0;
+};
+
+static int
+adm6996_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ dev_dbg (&priv->phydev->dev, "get_vid port %d\n", val->port_vlan);
+
+ val->value.i = priv->vlan_id[val->port_vlan];
+
+ return 0;
+};
+
+static int
+adm6996_get_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+ u8 ports = priv->vlan_table[val->port_vlan];
+ u8 tagged = priv->vlan_tagged[val->port_vlan];
+ int i;
+
+ dev_dbg (&priv->phydev->dev, "get_ports port_vlan %d\n",
+ val->port_vlan);
+
+ val->len = 0;
+
+ for (i = 0; i < ADM_NUM_PORTS; i++) {
+ struct switch_port *p;
+
+ if (!(ports & (1 << i)))
+ continue;
+
+ p = &val->value.ports[val->len++];
+ p->id = i;
+ if (tagged & (1 << i))
+ p->flags = (1 << SWITCH_PORT_FLAG_TAGGED);
+ else
+ p->flags = 0;
+ }
+
+ return 0;
+};
+
+static int
+adm6996_set_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+ u8 *ports = &priv->vlan_table[val->port_vlan];
+ u8 *tagged = &priv->vlan_tagged[val->port_vlan];
+ int i;
+
+ dev_dbg (&priv->phydev->dev, "set_ports port_vlan %d ports",
+ val->port_vlan);
+
+ *ports = 0;
+ *tagged = 0;
+
+ for (i = 0; i < val->len; i++) {
+ struct switch_port *p = &val->value.ports[i];
+
+#ifdef DEBUG
+ pr_cont(" %d%s", p->id,
+ ((p->flags & (1 << SWITCH_PORT_FLAG_TAGGED)) ? "T" :
+ ""));
+#endif
+
+ if (p->flags & (1 << SWITCH_PORT_FLAG_TAGGED))
+ *tagged |= (1 << p->id);
+
+ *ports |= (1 << p->id);
+ }
+
+#ifdef DEBUG
+ pr_cont("\n");
+#endif
+
+ return 0;
+};
+
+/*
+ * Precondition: reg_mutex must be held
+ */
+static void
+adm6996_enable_vlan(struct adm6996_priv *priv)
+{
+ u16 reg;
+
+ reg = r16(priv->phydev, ADM_OTBE_P2_PVID);
+ reg &= ~(ADM_OTBE_MASK);
+ w16(priv->phydev, ADM_OTBE_P2_PVID, reg);
+ reg = r16(priv->phydev, ADM_IFNTE);
+ reg &= ~(ADM_IFNTE_MASK);
+ w16(priv->phydev, ADM_IFNTE, reg);
+ reg = r16(priv->phydev, ADM_VID_CHECK);
+ reg |= ADM_VID_CHECK_MASK;
+ w16(priv->phydev, ADM_VID_CHECK, reg);
+ reg = r16(priv->phydev, ADM_SYSC0);
+ reg |= ADM_NTTE;
+ reg &= ~(ADM_RVID1);
+ w16(priv->phydev, ADM_SYSC0, reg);
+ reg = r16(priv->phydev, ADM_SYSC3);
+ reg |= ADM_TBV;
+ w16(priv->phydev, ADM_SYSC3, reg);
+
+};
+
+/*
+ * Disable VLANs
+ *
+ * Sets VLAN mapping for port-based VLAN with all ports connected to
+ * eachother (this is also the power-on default).
+ *
+ * Precondition: reg_mutex must be held
+ */
+static void
+adm6996_disable_vlan(struct adm6996_priv *priv)
+{
+ u16 reg;
+ int i;
+
+ for (i = 0; i < ADM_NUM_PORTS; i++) {
+ reg = ADM_VLAN_FILT_MEMBER_MASK;
+ w16(priv->phydev, ADM_VLAN_FILT_L(i), reg);
+ reg = ADM_VLAN_FILT_VALID | ADM_VLAN_FILT_VID(1);
+ w16(priv->phydev, ADM_VLAN_FILT_H(i), reg);
+ }
+
+ reg = r16(priv->phydev, ADM_OTBE_P2_PVID);
+ reg |= ADM_OTBE_MASK;
+ w16(priv->phydev, ADM_OTBE_P2_PVID, reg);
+ reg = r16(priv->phydev, ADM_IFNTE);
+ reg |= ADM_IFNTE_MASK;
+ w16(priv->phydev, ADM_IFNTE, reg);
+ reg = r16(priv->phydev, ADM_VID_CHECK);
+ reg &= ~(ADM_VID_CHECK_MASK);
+ w16(priv->phydev, ADM_VID_CHECK, reg);
+ reg = r16(priv->phydev, ADM_SYSC0);
+ reg &= ~(ADM_NTTE);
+ reg |= ADM_RVID1;
+ w16(priv->phydev, ADM_SYSC0, reg);
+ reg = r16(priv->phydev, ADM_SYSC3);
+ reg &= ~(ADM_TBV);
+ w16(priv->phydev, ADM_SYSC3, reg);
+}
+
+/*
+ * Precondition: reg_mutex must be held
+ */
+static void
+adm6996_apply_port_pvids(struct adm6996_priv *priv)
+{
+ u16 reg;
+ int i;
+
+ for (i = 0; i < ADM_NUM_PORTS; i++) {
+ reg = r16(priv->phydev, adm_portcfg[i]);
+ reg &= ~(ADM_PORTCFG_PVID_MASK);
+ reg |= ADM_PORTCFG_PVID(priv->pvid[i]);
+ w16(priv->phydev, adm_portcfg[i], reg);
+ }
+
+ w16(priv->phydev, ADM_P0_PVID, ADM_P0_PVID_VAL(priv->pvid[0]));
+ w16(priv->phydev, ADM_P1_PVID, ADM_P1_PVID_VAL(priv->pvid[1]));
+ reg = r16(priv->phydev, ADM_OTBE_P2_PVID);
+ reg &= ~(ADM_P2_PVID_MASK);
+ reg |= ADM_P2_PVID_VAL(priv->pvid[2]);
+ w16(priv->phydev, ADM_OTBE_P2_PVID, reg);
+ reg = ADM_P3_PVID_VAL(priv->pvid[3]);
+ reg |= ADM_P4_PVID_VAL(priv->pvid[4]);
+ w16(priv->phydev, ADM_P3_P4_PVID, reg);
+ w16(priv->phydev, ADM_P5_PVID, ADM_P5_PVID_VAL(priv->pvid[5]));
+}
+
+/*
+ * Precondition: reg_mutex must be held
+ */
+static void
+adm6996_apply_vlan_filters(struct adm6996_priv *priv)
+{
+ u8 ports, tagged;
+ u16 vid, reg;
+ int i;
+
+ for (i = 0; i < ADM_NUM_VLANS; i++) {
+ vid = priv->vlan_id[i];
+ ports = priv->vlan_table[i];
+ tagged = priv->vlan_tagged[i];
+
+ if (ports == 0) {
+ /* Disable VLAN entry */
+ w16(priv->phydev, ADM_VLAN_FILT_H(i), 0);
+ w16(priv->phydev, ADM_VLAN_FILT_L(i), 0);
+ continue;
+ }
+
+ reg = ADM_VLAN_FILT_MEMBER(ports);
+ reg |= ADM_VLAN_FILT_TAGGED(tagged);
+ w16(priv->phydev, ADM_VLAN_FILT_L(i), reg);
+ reg = ADM_VLAN_FILT_VALID | ADM_VLAN_FILT_VID(vid);
+ w16(priv->phydev, ADM_VLAN_FILT_H(i), reg);
+ }
+}
+
+static int
+adm6996_hw_apply(struct switch_dev *dev)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ dev_dbg(&priv->phydev->dev, "hw_apply\n");
+
+ mutex_lock(&priv->reg_mutex);
+
+ if (!priv->enable_vlan) {
+ if (priv->vlan_enabled) {
+ adm6996_disable_vlan(priv);
+ priv->vlan_enabled = 0;
+ }
+ goto out;
+ }
+
+ if (!priv->vlan_enabled) {
+ adm6996_enable_vlan(priv);
+ priv->vlan_enabled = 1;
+ }
+
+ adm6996_apply_port_pvids(priv);
+ adm6996_apply_vlan_filters(priv);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+/*
+ * Reset the switch
+ *
+ * The ADM6996 can't do a software-initiated reset, so we just initialise the
+ * registers we support in this driver.
+ *
+ * Precondition: reg_mutex must be held
+ */
+static void
+adm6996_perform_reset (struct adm6996_priv *priv)
+{
+ int i;
+
+ /* initialize port and vlan settings */
+ for (i = 0; i < ADM_NUM_PORTS - 1; i++) {
+ w16(priv->phydev, adm_portcfg[i], ADM_PORTCFG_INIT |
+ ADM_PORTCFG_PVID(0));
+ }
+ w16(priv->phydev, adm_portcfg[5], ADM_PORTCFG_CPU);
+
+ /* reset all PHY ports */
+ for (i = 0; i < ADM_PHY_PORTS; i++) {
+ w16(priv->phydev, ADM_PHY_PORT(i), ADM_PHYCFG_INIT);
+ }
+
+ priv->enable_vlan = 0;
+ priv->vlan_enabled = 0;
+
+ for (i = 0; i < ADM_NUM_PORTS; i++) {
+ priv->pvid[i] = 0;
+ }
+
+ for (i = 0; i < ADM_NUM_VLANS; i++) {
+ priv->vlan_id[i] = i;
+ priv->vlan_table[i] = 0;
+ priv->vlan_tagged[i] = 0;
+ }
+
+ if (priv->model == ADM6996M) {
+ /* Clear VLAN priority map so prio's are unused */
+ w16 (priv->phydev, ADM_VLAN_PRIOMAP, 0);
+
+ adm6996_disable_vlan(priv);
+ adm6996_apply_port_pvids(priv);
+ }
+}
+
+static int
+adm6996_reset_switch(struct switch_dev *dev)
+{
+ struct adm6996_priv *priv = to_adm(dev);
+
+ dev_dbg (&priv->phydev->dev, "reset\n");
+ mutex_lock(&priv->reg_mutex);
+ adm6996_perform_reset (priv);
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+static struct switch_attr adm6996_globals[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "Enable VLANs",
+ .set = adm6996_set_enable_vlan,
+ .get = adm6996_get_enable_vlan,
+ },
+#ifdef DEBUG
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "addr",
+ .description =
+ "Direct register access: set register address (0 - 1023)",
+ .set = adm6996_set_addr,
+ .get = adm6996_get_addr,
+ },
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "data",
+ .description =
+ "Direct register access: read/write to register (0 - 65535)",
+ .set = adm6996_set_data,
+ .get = adm6996_get_data,
+ },
+#endif /* def DEBUG */
+};
+
+static struct switch_attr adm6996_port[] = {
+};
+
+static struct switch_attr adm6996_vlan[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "vid",
+ .description = "VLAN ID",
+ .set = adm6996_set_vid,
+ .get = adm6996_get_vid,
+ },
+};
+
+static const struct switch_dev_ops adm6996_ops = {
+ .attr_global = {
+ .attr = adm6996_globals,
+ .n_attr = ARRAY_SIZE(adm6996_globals),
+ },
+ .attr_port = {
+ .attr = adm6996_port,
+ .n_attr = ARRAY_SIZE(adm6996_port),
+ },
+ .attr_vlan = {
+ .attr = adm6996_vlan,
+ .n_attr = ARRAY_SIZE(adm6996_vlan),
+ },
+ .get_port_pvid = adm6996_get_pvid,
+ .set_port_pvid = adm6996_set_pvid,
+ .get_vlan_ports = adm6996_get_ports,
+ .set_vlan_ports = adm6996_set_ports,
+ .apply_config = adm6996_hw_apply,
+ .reset_switch = adm6996_reset_switch,
+};
+
+static int adm6996_config_init(struct phy_device *pdev)
+{
+ struct adm6996_priv *priv;
+ struct switch_dev *swdev;
+
+ int ret;
+ u16 test, old;
+
+ pdev->supported = ADVERTISED_100baseT_Full;
+ pdev->advertising = ADVERTISED_100baseT_Full;
+
+ if (pdev->addr != 0) {
+ pr_info ("%s: PHY overlaps ADM6996, providing fixed PHY 0x%x.\n"
+ , pdev->attached_dev->name, pdev->addr);
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(struct adm6996_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ mutex_init(&priv->reg_mutex);
+ priv->phydev = pdev;
+ priv->read = adm6996_read_mii_reg;
+ priv->write = adm6996_write_mii_reg;
+ pdev->priv = priv;
+
+ /* Detect type of chip */
+ old = r16(pdev, ADM_VID_CHECK);
+ test = old ^ (1 << 12);
+ w16(pdev, ADM_VID_CHECK, test);
+ test ^= r16(pdev, ADM_VID_CHECK);
+ if (test & (1 << 12)) {
+ /*
+ * Bit 12 of this register is read-only.
+ * This is the FC model.
+ */
+ priv->model = ADM6996FC;
+ } else {
+ /* Bit 12 is read-write. This is the M model. */
+ priv->model = ADM6996M;
+ w16(pdev, ADM_VID_CHECK, old);
+ }
+
+ swdev = &priv->dev;
+ swdev->name = (adm6996_model_name[priv->model]);
+ swdev->cpu_port = ADM_CPU_PORT;
+ swdev->ports = ADM_NUM_PORTS;
+ swdev->vlans = ADM_NUM_VLANS;
+ swdev->ops = &adm6996_ops;
+
+ pr_info ("%s: %s model PHY found.\n", pdev->attached_dev->name,
+ swdev->name);
+
+ mutex_lock(&priv->reg_mutex);
+ adm6996_perform_reset (priv);
+ mutex_unlock(&priv->reg_mutex);
+
+ if (priv->model == ADM6996M) {
+ if ((ret = register_switch(swdev, pdev->attached_dev)) < 0) {
+ kfree(priv);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Warning: phydev->priv is NULL if phydev->addr != 0
+ */
+static int adm6996_read_status(struct phy_device *phydev)
+{
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ phydev->link = 1;
+ return 0;
+}
+
+/*
+ * Warning: phydev->priv is NULL if phydev->addr != 0
+ */
+static int adm6996_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int adm6996_fixup(struct phy_device *dev)
+{
+ struct mii_bus *bus = dev->bus;
+ u16 reg;
+
+ /* Our custom registers are at PHY addresses 0-10. Claim those. */
+ if (dev->addr > 10)
+ return 0;
+
+ /* look for the switch on the bus */
+ reg = bus->read(bus, PHYADDR(ADM_SIG0)) & ADM_SIG0_MASK;
+ if (reg != ADM_SIG0_VAL)
+ return 0;
+
+ reg = bus->read(bus, PHYADDR(ADM_SIG1)) & ADM_SIG1_MASK;
+ if (reg != ADM_SIG1_VAL)
+ return 0;
+
+ dev->phy_id = (ADM_SIG0_VAL << 16) | ADM_SIG1_VAL;
+
+ return 0;
+}
+
+static int adm6996_probe(struct phy_device *pdev)
+{
+ return 0;
+}
+
+static void adm6996_remove(struct phy_device *pdev)
+{
+ struct adm6996_priv *priv = phy_to_adm(pdev);
+
+ if (priv != NULL && priv->model == ADM6996M)
+ unregister_switch(&priv->dev);
+
+ kfree(priv);
+}
+
+
+static struct phy_driver adm6996_driver = {
+ .name = "Infineon ADM6996",
+ .phy_id = (ADM_SIG0_VAL << 16) | ADM_SIG1_VAL,
+ .phy_id_mask = 0xffffffff,
+ .features = PHY_BASIC_FEATURES,
+ .probe = adm6996_probe,
+ .remove = adm6996_remove,
+ .config_init = &adm6996_config_init,
+ .config_aneg = &adm6996_config_aneg,
+ .read_status = &adm6996_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init adm6996_init(void)
+{
+ phy_register_fixup_for_id(PHY_ANY_ID, adm6996_fixup);
+ return phy_driver_register(&adm6996_driver);
+}
+
+static void __exit adm6996_exit(void)
+{
+ phy_driver_unregister(&adm6996_driver);
+}
+
+module_init(adm6996_init);
+module_exit(adm6996_exit);
diff --git a/target/linux/generic/files/drivers/net/phy/adm6996.h b/target/linux/generic/files/drivers/net/phy/adm6996.h
new file mode 100644
index 000000000..6922dfcbb
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/adm6996.h
@@ -0,0 +1,162 @@
+/*
+ * ADM6996 switch driver
+ *
+ * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (c) 2010,2011 Peter Lebbing <peter@digitalbrains.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation
+ */
+#ifndef __ADM6996_H
+#define __ADM6996_H
+
+/*
+ * ADM_PHY_PORTS: Number of ports with a PHY.
+ * We only control ports 0 to 3, because if 4 is connected, it is most likely
+ * not connected to the switch but to a separate MII and MAC for the WAN port.
+ */
+#define ADM_PHY_PORTS 4
+#define ADM_NUM_PORTS 6
+#define ADM_CPU_PORT 5
+
+#define ADM_NUM_VLANS 16
+#define ADM_VLAN_MAX_ID 4094
+
+enum admreg {
+ ADM_EEPROM_BASE = 0x0,
+ ADM_P0_CFG = ADM_EEPROM_BASE + 1,
+ ADM_P1_CFG = ADM_EEPROM_BASE + 3,
+ ADM_P2_CFG = ADM_EEPROM_BASE + 5,
+ ADM_P3_CFG = ADM_EEPROM_BASE + 7,
+ ADM_P4_CFG = ADM_EEPROM_BASE + 8,
+ ADM_P5_CFG = ADM_EEPROM_BASE + 9,
+ ADM_SYSC0 = ADM_EEPROM_BASE + 0xa,
+ ADM_VLAN_PRIOMAP = ADM_EEPROM_BASE + 0xe,
+ ADM_SYSC3 = ADM_EEPROM_BASE + 0x11,
+ /* Input Force No Tag Enable */
+ ADM_IFNTE = ADM_EEPROM_BASE + 0x20,
+ ADM_VID_CHECK = ADM_EEPROM_BASE + 0x26,
+ ADM_P0_PVID = ADM_EEPROM_BASE + 0x28,
+ ADM_P1_PVID = ADM_EEPROM_BASE + 0x29,
+ /* Output Tag Bypass Enable and P2 PVID */
+ ADM_OTBE_P2_PVID = ADM_EEPROM_BASE + 0x2a,
+ ADM_P3_P4_PVID = ADM_EEPROM_BASE + 0x2b,
+ ADM_P5_PVID = ADM_EEPROM_BASE + 0x2c,
+ ADM_EEPROM_EXT_BASE = 0x40,
+#define ADM_VLAN_FILT_L(n) (ADM_EEPROM_EXT_BASE + 2 * (n))
+#define ADM_VLAN_FILT_H(n) (ADM_EEPROM_EXT_BASE + 1 + 2 * (n))
+ ADM_COUNTER_BASE = 0xa0,
+ ADM_SIG0 = ADM_COUNTER_BASE + 0,
+ ADM_SIG1 = ADM_COUNTER_BASE + 1,
+ ADM_PHY_BASE = 0x200,
+#define ADM_PHY_PORT(n) (ADM_PHY_BASE + (0x20 * n))
+};
+
+/* Chip identification patterns */
+#define ADM_SIG0_MASK 0xffff
+#define ADM_SIG0_VAL 0x1023
+#define ADM_SIG1_MASK 0xffff
+#define ADM_SIG1_VAL 0x0007
+
+enum {
+ ADM_PHYCFG_COLTST = (1 << 7), /* Enable collision test */
+ ADM_PHYCFG_DPLX = (1 << 8), /* Enable full duplex */
+ ADM_PHYCFG_ANEN_RST = (1 << 9), /* Restart auto negotiation (self clear) */
+ ADM_PHYCFG_ISO = (1 << 10), /* Isolate PHY */
+ ADM_PHYCFG_PDN = (1 << 11), /* Power down PHY */
+ ADM_PHYCFG_ANEN = (1 << 12), /* Enable auto negotiation */
+ ADM_PHYCFG_SPEED_100 = (1 << 13), /* Enable 100 Mbit/s */
+ ADM_PHYCFG_LPBK = (1 << 14), /* Enable loopback operation */
+ ADM_PHYCFG_RST = (1 << 15), /* Reset the port (self clear) */
+ ADM_PHYCFG_INIT = (
+ ADM_PHYCFG_RST |
+ ADM_PHYCFG_SPEED_100 |
+ ADM_PHYCFG_ANEN |
+ ADM_PHYCFG_ANEN_RST
+ )
+};
+
+enum {
+ ADM_PORTCFG_FC = (1 << 0), /* Enable 802.x flow control */
+ ADM_PORTCFG_AN = (1 << 1), /* Enable auto-negotiation */
+ ADM_PORTCFG_SPEED_100 = (1 << 2), /* Enable 100 Mbit/s */
+ ADM_PORTCFG_DPLX = (1 << 3), /* Enable full duplex */
+ ADM_PORTCFG_OT = (1 << 4), /* Output tagged packets */
+ ADM_PORTCFG_PD = (1 << 5), /* Port disable */
+ ADM_PORTCFG_TV_PRIO = (1 << 6), /* 0 = VLAN based priority
+ * 1 = TOS based priority */
+ ADM_PORTCFG_PPE = (1 << 7), /* Port based priority enable */
+ ADM_PORTCFG_PP_S = (1 << 8), /* Port based priority, 2 bits */
+ ADM_PORTCFG_PVID_BASE = (1 << 10), /* Primary VLAN id, 4 bits */
+ ADM_PORTCFG_FSE = (1 << 14), /* Fx select enable */
+ ADM_PORTCFG_CAM = (1 << 15), /* Crossover Auto MDIX */
+
+ ADM_PORTCFG_INIT = (
+ ADM_PORTCFG_FC |
+ ADM_PORTCFG_AN |
+ ADM_PORTCFG_SPEED_100 |
+ ADM_PORTCFG_DPLX |
+ ADM_PORTCFG_CAM
+ ),
+ ADM_PORTCFG_CPU = (
+ ADM_PORTCFG_FC |
+ ADM_PORTCFG_SPEED_100 |
+ ADM_PORTCFG_OT |
+ ADM_PORTCFG_DPLX
+ ),
+};
+
+#define ADM_PORTCFG_PPID(n) ((n & 0x3) << 8)
+#define ADM_PORTCFG_PVID(n) ((n & 0xf) << 10)
+#define ADM_PORTCFG_PVID_MASK (0xf << 10)
+
+#define ADM_IFNTE_MASK (0x3f << 9)
+#define ADM_VID_CHECK_MASK (0x3f << 6)
+
+#define ADM_P0_PVID_VAL(n) ((((n) & 0xff0) >> 4) << 0)
+#define ADM_P1_PVID_VAL(n) ((((n) & 0xff0) >> 4) << 0)
+#define ADM_P2_PVID_VAL(n) ((((n) & 0xff0) >> 4) << 0)
+#define ADM_P3_PVID_VAL(n) ((((n) & 0xff0) >> 4) << 0)
+#define ADM_P4_PVID_VAL(n) ((((n) & 0xff0) >> 4) << 8)
+#define ADM_P5_PVID_VAL(n) ((((n) & 0xff0) >> 4) << 0)
+#define ADM_P2_PVID_MASK 0xff
+
+#define ADM_OTBE(n) (((n) & 0x3f) << 8)
+#define ADM_OTBE_MASK (0x3f << 8)
+
+/* ADM_SYSC0 */
+enum {
+ ADM_NTTE = (1 << 2), /* New Tag Transmit Enable */
+ ADM_RVID1 = (1 << 8) /* Replace VLAN ID 1 */
+};
+
+/* Tag Based VLAN in ADM_SYSC3 */
+#define ADM_TBV (1 << 5)
+
+static const u8 adm_portcfg[] = {
+ [0] = ADM_P0_CFG,
+ [1] = ADM_P1_CFG,
+ [2] = ADM_P2_CFG,
+ [3] = ADM_P3_CFG,
+ [4] = ADM_P4_CFG,
+ [5] = ADM_P5_CFG,
+};
+
+/* Fields in ADM_VLAN_FILT_L(x) */
+#define ADM_VLAN_FILT_FID(n) (((n) & 0xf) << 12)
+#define ADM_VLAN_FILT_TAGGED(n) (((n) & 0x3f) << 6)
+#define ADM_VLAN_FILT_MEMBER(n) (((n) & 0x3f) << 0)
+#define ADM_VLAN_FILT_MEMBER_MASK 0x3f
+/* Fields in ADM_VLAN_FILT_H(x) */
+#define ADM_VLAN_FILT_VALID (1 << 15)
+#define ADM_VLAN_FILT_VID(n) (((n) & 0xfff) << 0)
+
+
+/*
+ * Split the register address in phy id and register
+ * it will get combined again by the mdio bus op
+ */
+#define PHYADDR(_reg) ((_reg >> 5) & 0xff), (_reg & 0x1f)
+
+#endif
diff --git a/target/linux/generic/files/drivers/net/phy/ar8216.c b/target/linux/generic/files/drivers/net/phy/ar8216.c
new file mode 100644
index 000000000..4df2dbbdb
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/ar8216.c
@@ -0,0 +1,1536 @@
+/*
+ * ar8216.c: AR8216 switch driver
+ *
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/if.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/bitops.h>
+#include <net/genetlink.h>
+#include <linux/switch.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
+#include <linux/ar8216_platform.h>
+#include "ar8216.h"
+
+/* size of the vlan table */
+#define AR8X16_MAX_VLANS 128
+#define AR8X16_PROBE_RETRIES 10
+#define AR8X16_MAX_PORTS 8
+
+struct ar8216_priv;
+
+#define AR8XXX_CAP_GIGE BIT(0)
+
+enum {
+ AR8XXX_VER_AR8216 = 0x01,
+ AR8XXX_VER_AR8236 = 0x03,
+ AR8XXX_VER_AR8316 = 0x10,
+ AR8XXX_VER_AR8327 = 0x12,
+};
+
+struct ar8xxx_chip {
+ unsigned long caps;
+
+ int (*hw_init)(struct ar8216_priv *priv);
+ void (*init_globals)(struct ar8216_priv *priv);
+ void (*init_port)(struct ar8216_priv *priv, int port);
+ void (*setup_port)(struct ar8216_priv *priv, int port, u32 egress,
+ u32 ingress, u32 members, u32 pvid);
+ u32 (*read_port_status)(struct ar8216_priv *priv, int port);
+ int (*atu_flush)(struct ar8216_priv *priv);
+ void (*vtu_flush)(struct ar8216_priv *priv);
+ void (*vtu_load_vlan)(struct ar8216_priv *priv, u32 vid, u32 port_mask);
+};
+
+struct ar8216_priv {
+ struct switch_dev dev;
+ struct phy_device *phy;
+ u32 (*read)(struct ar8216_priv *priv, int reg);
+ void (*write)(struct ar8216_priv *priv, int reg, u32 val);
+ const struct net_device_ops *ndo_old;
+ struct net_device_ops ndo;
+ struct mutex reg_mutex;
+ u8 chip_ver;
+ u8 chip_rev;
+ const struct ar8xxx_chip *chip;
+ bool initialized;
+ bool port4_phy;
+ char buf[80];
+
+ bool init;
+ bool mii_lo_first;
+
+ /* all fields below are cleared on reset */
+ bool vlan;
+ u16 vlan_id[AR8X16_MAX_VLANS];
+ u8 vlan_table[AR8X16_MAX_VLANS];
+ u8 vlan_tagged;
+ u16 pvid[AR8X16_MAX_PORTS];
+};
+
+#define to_ar8216(_dev) container_of(_dev, struct ar8216_priv, dev)
+
+static inline bool ar8xxx_has_gige(struct ar8216_priv *priv)
+{
+ return priv->chip->caps & AR8XXX_CAP_GIGE;
+}
+
+static inline bool chip_is_ar8216(struct ar8216_priv *priv)
+{
+ return priv->chip_ver == AR8XXX_VER_AR8216;
+}
+
+static inline bool chip_is_ar8236(struct ar8216_priv *priv)
+{
+ return priv->chip_ver == AR8XXX_VER_AR8236;
+}
+
+static inline bool chip_is_ar8316(struct ar8216_priv *priv)
+{
+ return priv->chip_ver == AR8XXX_VER_AR8316;
+}
+
+static inline bool chip_is_ar8327(struct ar8216_priv *priv)
+{
+ return priv->chip_ver == AR8XXX_VER_AR8327;
+}
+
+static inline void
+split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+{
+ regaddr >>= 1;
+ *r1 = regaddr & 0x1e;
+
+ regaddr >>= 5;
+ *r2 = regaddr & 0x7;
+
+ regaddr >>= 3;
+ *page = regaddr & 0x1ff;
+}
+
+static u32
+ar8216_mii_read(struct ar8216_priv *priv, int reg)
+{
+ struct phy_device *phy = priv->phy;
+ struct mii_bus *bus = phy->bus;
+ u16 r1, r2, page;
+ u16 lo, hi;
+
+ split_addr((u32) reg, &r1, &r2, &page);
+
+ mutex_lock(&bus->mdio_lock);
+
+ bus->write(bus, 0x18, 0, page);
+ usleep_range(1000, 2000); /* wait for the page switch to propagate */
+ lo = bus->read(bus, 0x10 | r2, r1);
+ hi = bus->read(bus, 0x10 | r2, r1 + 1);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return (hi << 16) | lo;
+}
+
+static void
+ar8216_mii_write(struct ar8216_priv *priv, int reg, u32 val)
+{
+ struct phy_device *phy = priv->phy;
+ struct mii_bus *bus = phy->bus;
+ u16 r1, r2, r3;
+ u16 lo, hi;
+
+ split_addr((u32) reg, &r1, &r2, &r3);
+ lo = val & 0xffff;
+ hi = (u16) (val >> 16);
+
+ mutex_lock(&bus->mdio_lock);
+
+ bus->write(bus, 0x18, 0, r3);
+ usleep_range(1000, 2000); /* wait for the page switch to propagate */
+ if (priv->mii_lo_first) {
+ bus->write(bus, 0x10 | r2, r1, lo);
+ bus->write(bus, 0x10 | r2, r1 + 1, hi);
+ } else {
+ bus->write(bus, 0x10 | r2, r1 + 1, hi);
+ bus->write(bus, 0x10 | r2, r1, lo);
+ }
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+ar8216_phy_dbg_write(struct ar8216_priv *priv, int phy_addr,
+ u16 dbg_addr, u16 dbg_data)
+{
+ struct mii_bus *bus = priv->phy->bus;
+
+ mutex_lock(&bus->mdio_lock);
+ bus->write(bus, phy_addr, MII_ATH_DBG_ADDR, dbg_addr);
+ bus->write(bus, phy_addr, MII_ATH_DBG_DATA, dbg_data);
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+ar8216_phy_mmd_write(struct ar8216_priv *priv, int phy_addr, u16 addr, u16 data)
+{
+ struct mii_bus *bus = priv->phy->bus;
+
+ mutex_lock(&bus->mdio_lock);
+ bus->write(bus, phy_addr, MII_ATH_MMD_ADDR, addr);
+ bus->write(bus, phy_addr, MII_ATH_MMD_DATA, data);
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static u32
+ar8216_rmw(struct ar8216_priv *priv, int reg, u32 mask, u32 val)
+{
+ u32 v;
+
+ lockdep_assert_held(&priv->reg_mutex);
+
+ v = priv->read(priv, reg);
+ v &= ~mask;
+ v |= val;
+ priv->write(priv, reg, v);
+
+ return v;
+}
+
+static void
+ar8216_read_port_link(struct ar8216_priv *priv, int port,
+ struct switch_port_link *link)
+{
+ u32 status;
+ u32 speed;
+
+ memset(link, '\0', sizeof(*link));
+
+ status = priv->chip->read_port_status(priv, port);
+
+ link->aneg = !!(status & AR8216_PORT_STATUS_LINK_AUTO);
+ if (link->aneg) {
+ link->link = !!(status & AR8216_PORT_STATUS_LINK_UP);
+ if (!link->link)
+ return;
+ } else {
+ link->link = true;
+ }
+
+ link->duplex = !!(status & AR8216_PORT_STATUS_DUPLEX);
+ link->tx_flow = !!(status & AR8216_PORT_STATUS_TXFLOW);
+ link->rx_flow = !!(status & AR8216_PORT_STATUS_RXFLOW);
+
+ speed = (status & AR8216_PORT_STATUS_SPEED) >>
+ AR8216_PORT_STATUS_SPEED_S;
+
+ switch (speed) {
+ case AR8216_PORT_SPEED_10M:
+ link->speed = SWITCH_PORT_SPEED_10;
+ break;
+ case AR8216_PORT_SPEED_100M:
+ link->speed = SWITCH_PORT_SPEED_100;
+ break;
+ case AR8216_PORT_SPEED_1000M:
+ link->speed = SWITCH_PORT_SPEED_1000;
+ break;
+ default:
+ link->speed = SWITCH_PORT_SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static struct sk_buff *
+ar8216_mangle_tx(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ar8216_priv *priv = dev->phy_ptr;
+ unsigned char *buf;
+
+ if (unlikely(!priv))
+ goto error;
+
+ if (!priv->vlan)
+ goto send;
+
+ if (unlikely(skb_headroom(skb) < 2)) {
+ if (pskb_expand_head(skb, 2, 0, GFP_ATOMIC) < 0)
+ goto error;
+ }
+
+ buf = skb_push(skb, 2);
+ buf[0] = 0x10;
+ buf[1] = 0x80;
+
+send:
+ return skb;
+
+error:
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static void
+ar8216_mangle_rx(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ar8216_priv *priv;
+ unsigned char *buf;
+ int port, vlan;
+
+ priv = dev->phy_ptr;
+ if (!priv)
+ return;
+
+ /* don't strip the header if vlan mode is disabled */
+ if (!priv->vlan)
+ return;
+
+ /* strip header, get vlan id */
+ buf = skb->data;
+ skb_pull(skb, 2);
+
+ /* check for vlan header presence */
+ if ((buf[12 + 2] != 0x81) || (buf[13 + 2] != 0x00))
+ return;
+
+ port = buf[0] & 0xf;
+
+ /* no need to fix up packets coming from a tagged source */
+ if (priv->vlan_tagged & (1 << port))
+ return;
+
+ /* lookup port vid from local table, the switch passes an invalid vlan id */
+ vlan = priv->vlan_id[priv->pvid[port]];
+
+ buf[14 + 2] &= 0xf0;
+ buf[14 + 2] |= vlan >> 8;
+ buf[15 + 2] = vlan & 0xff;
+}
+
+static int
+ar8216_wait_bit(struct ar8216_priv *priv, int reg, u32 mask, u32 val)
+{
+ int timeout = 20;
+ u32 t = 0;
+
+ while (1) {
+ t = priv->read(priv, reg);
+ if ((t & mask) == val)
+ return 0;
+
+ if (timeout-- <= 0)
+ break;
+
+ udelay(10);
+ }
+
+ pr_err("ar8216: timeout on reg %08x: %08x & %08x != %08x\n",
+ (unsigned int) reg, t, mask, val);
+ return -ETIMEDOUT;
+}
+
+static void
+ar8216_vtu_op(struct ar8216_priv *priv, u32 op, u32 val)
+{
+ if (ar8216_wait_bit(priv, AR8216_REG_VTU, AR8216_VTU_ACTIVE, 0))
+ return;
+ if ((op & AR8216_VTU_OP) == AR8216_VTU_OP_LOAD) {
+ val &= AR8216_VTUDATA_MEMBER;
+ val |= AR8216_VTUDATA_VALID;
+ priv->write(priv, AR8216_REG_VTU_DATA, val);
+ }
+ op |= AR8216_VTU_ACTIVE;
+ priv->write(priv, AR8216_REG_VTU, op);
+}
+
+static void
+ar8216_vtu_flush(struct ar8216_priv *priv)
+{
+ ar8216_vtu_op(priv, AR8216_VTU_OP_FLUSH, 0);
+}
+
+static void
+ar8216_vtu_load_vlan(struct ar8216_priv *priv, u32 vid, u32 port_mask)
+{
+ u32 op;
+
+ op = AR8216_VTU_OP_LOAD | (vid << AR8216_VTU_VID_S);
+ ar8216_vtu_op(priv, op, port_mask);
+}
+
+static int
+ar8216_atu_flush(struct ar8216_priv *priv)
+{
+ int ret;
+
+ ret = ar8216_wait_bit(priv, AR8216_REG_ATU, AR8216_ATU_ACTIVE, 0);
+ if (!ret)
+ priv->write(priv, AR8216_REG_ATU, AR8216_ATU_OP_FLUSH);
+
+ return ret;
+}
+
+static u32
+ar8216_read_port_status(struct ar8216_priv *priv, int port)
+{
+ return priv->read(priv, AR8216_REG_PORT_STATUS(port));
+}
+
+static void
+ar8216_setup_port(struct ar8216_priv *priv, int port, u32 egress, u32 ingress,
+ u32 members, u32 pvid)
+{
+ u32 header;
+
+ if (chip_is_ar8216(priv) && priv->vlan && port == AR8216_PORT_CPU)
+ header = AR8216_PORT_CTRL_HEADER;
+ else
+ header = 0;
+
+ ar8216_rmw(priv, AR8216_REG_PORT_CTRL(port),
+ AR8216_PORT_CTRL_LEARN | AR8216_PORT_CTRL_VLAN_MODE |
+ AR8216_PORT_CTRL_SINGLE_VLAN | AR8216_PORT_CTRL_STATE |
+ AR8216_PORT_CTRL_HEADER | AR8216_PORT_CTRL_LEARN_LOCK,
+ AR8216_PORT_CTRL_LEARN | header |
+ (egress << AR8216_PORT_CTRL_VLAN_MODE_S) |
+ (AR8216_PORT_STATE_FORWARD << AR8216_PORT_CTRL_STATE_S));
+
+ ar8216_rmw(priv, AR8216_REG_PORT_VLAN(port),
+ AR8216_PORT_VLAN_DEST_PORTS | AR8216_PORT_VLAN_MODE |
+ AR8216_PORT_VLAN_DEFAULT_ID,
+ (members << AR8216_PORT_VLAN_DEST_PORTS_S) |
+ (ingress << AR8216_PORT_VLAN_MODE_S) |
+ (pvid << AR8216_PORT_VLAN_DEFAULT_ID_S));
+}
+
+static int
+ar8216_hw_init(struct ar8216_priv *priv)
+{
+ return 0;
+}
+
+static void
+ar8216_init_globals(struct ar8216_priv *priv)
+{
+ /* standard atheros magic */
+ priv->write(priv, 0x38, 0xc000050e);
+
+ ar8216_rmw(priv, AR8216_REG_GLOBAL_CTRL,
+ AR8216_GCTRL_MTU, 1518 + 8 + 2);
+}
+
+static void
+ar8216_init_port(struct ar8216_priv *priv, int port)
+{
+ /* Enable port learning and tx */
+ priv->write(priv, AR8216_REG_PORT_CTRL(port),
+ AR8216_PORT_CTRL_LEARN |
+ (4 << AR8216_PORT_CTRL_STATE_S));
+
+ priv->write(priv, AR8216_REG_PORT_VLAN(port), 0);
+
+ if (port == AR8216_PORT_CPU) {
+ priv->write(priv, AR8216_REG_PORT_STATUS(port),
+ AR8216_PORT_STATUS_LINK_UP |
+ (ar8xxx_has_gige(priv) ?
+ AR8216_PORT_SPEED_1000M : AR8216_PORT_SPEED_100M) |
+ AR8216_PORT_STATUS_TXMAC |
+ AR8216_PORT_STATUS_RXMAC |
+ (chip_is_ar8316(priv) ? AR8216_PORT_STATUS_RXFLOW : 0) |
+ (chip_is_ar8316(priv) ? AR8216_PORT_STATUS_TXFLOW : 0) |
+ AR8216_PORT_STATUS_DUPLEX);
+ } else {
+ priv->write(priv, AR8216_REG_PORT_STATUS(port),
+ AR8216_PORT_STATUS_LINK_AUTO);
+ }
+}
+
+static const struct ar8xxx_chip ar8216_chip = {
+ .hw_init = ar8216_hw_init,
+ .init_globals = ar8216_init_globals,
+ .init_port = ar8216_init_port,
+ .setup_port = ar8216_setup_port,
+ .read_port_status = ar8216_read_port_status,
+ .atu_flush = ar8216_atu_flush,
+ .vtu_flush = ar8216_vtu_flush,
+ .vtu_load_vlan = ar8216_vtu_load_vlan,
+};
+
+static void
+ar8236_setup_port(struct ar8216_priv *priv, int port, u32 egress, u32 ingress,
+ u32 members, u32 pvid)
+{
+ ar8216_rmw(priv, AR8216_REG_PORT_CTRL(port),
+ AR8216_PORT_CTRL_LEARN | AR8216_PORT_CTRL_VLAN_MODE |
+ AR8216_PORT_CTRL_SINGLE_VLAN | AR8216_PORT_CTRL_STATE |
+ AR8216_PORT_CTRL_HEADER | AR8216_PORT_CTRL_LEARN_LOCK,
+ AR8216_PORT_CTRL_LEARN |
+ (egress << AR8216_PORT_CTRL_VLAN_MODE_S) |
+ (AR8216_PORT_STATE_FORWARD << AR8216_PORT_CTRL_STATE_S));
+
+ ar8216_rmw(priv, AR8236_REG_PORT_VLAN(port),
+ AR8236_PORT_VLAN_DEFAULT_ID,
+ (pvid << AR8236_PORT_VLAN_DEFAULT_ID_S));
+
+ ar8216_rmw(priv, AR8236_REG_PORT_VLAN2(port),
+ AR8236_PORT_VLAN2_VLAN_MODE |
+ AR8236_PORT_VLAN2_MEMBER,
+ (ingress << AR8236_PORT_VLAN2_VLAN_MODE_S) |
+ (members << AR8236_PORT_VLAN2_MEMBER_S));
+}
+
+static int
+ar8236_hw_init(struct ar8216_priv *priv)
+{
+ int i;
+ struct mii_bus *bus;
+
+ if (priv->initialized)
+ return 0;
+
+ /* Initialize the PHYs */
+ bus = priv->phy->bus;
+ for (i = 0; i < 5; i++) {
+ mdiobus_write(bus, i, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+ mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+ }
+ msleep(1000);
+
+ priv->initialized = true;
+ return 0;
+}
+
+static void
+ar8236_init_globals(struct ar8216_priv *priv)
+{
+ /* enable jumbo frames */
+ ar8216_rmw(priv, AR8216_REG_GLOBAL_CTRL,
+ AR8316_GCTRL_MTU, 9018 + 8 + 2);
+}
+
+static const struct ar8xxx_chip ar8236_chip = {
+ .hw_init = ar8236_hw_init,
+ .init_globals = ar8236_init_globals,
+ .init_port = ar8216_init_port,
+ .setup_port = ar8236_setup_port,
+ .read_port_status = ar8216_read_port_status,
+ .atu_flush = ar8216_atu_flush,
+ .vtu_flush = ar8216_vtu_flush,
+ .vtu_load_vlan = ar8216_vtu_load_vlan,
+};
+
+static int
+ar8316_hw_init(struct ar8216_priv *priv)
+{
+ int i;
+ u32 val, newval;
+ struct mii_bus *bus;
+
+ val = priv->read(priv, 0x8);
+
+ if (priv->phy->interface == PHY_INTERFACE_MODE_RGMII) {
+ if (priv->port4_phy) {
+ /* value taken from Ubiquiti RouterStation Pro */
+ newval = 0x81461bea;
+ printk(KERN_INFO "ar8316: Using port 4 as PHY\n");
+ } else {
+ newval = 0x01261be2;
+ printk(KERN_INFO "ar8316: Using port 4 as switch port\n");
+ }
+ } else if (priv->phy->interface == PHY_INTERFACE_MODE_GMII) {
+ /* value taken from AVM Fritz!Box 7390 sources */
+ newval = 0x010e5b71;
+ } else {
+ /* no known value for phy interface */
+ printk(KERN_ERR "ar8316: unsupported mii mode: %d.\n",
+ priv->phy->interface);
+ return -EINVAL;
+ }
+
+ if (val == newval)
+ goto out;
+
+ priv->write(priv, 0x8, newval);
+
+ /* Initialize the ports */
+ bus = priv->phy->bus;
+ for (i = 0; i < 5; i++) {
+ if ((i == 4) && priv->port4_phy &&
+ priv->phy->interface == PHY_INTERFACE_MODE_RGMII) {
+ /* work around for phy4 rgmii mode */
+ ar8216_phy_dbg_write(priv, i, 0x12, 0x480c);
+ /* rx delay */
+ ar8216_phy_dbg_write(priv, i, 0x0, 0x824e);
+ /* tx delay */
+ ar8216_phy_dbg_write(priv, i, 0x5, 0x3d47);
+ msleep(1000);
+ }
+
+ /* initialize the port itself */
+ mdiobus_write(bus, i, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
+ mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+ msleep(1000);
+ }
+
+out:
+ priv->initialized = true;
+ return 0;
+}
+
+static void
+ar8316_init_globals(struct ar8216_priv *priv)
+{
+ /* standard atheros magic */
+ priv->write(priv, 0x38, 0xc000050e);
+
+ /* enable cpu port to receive multicast and broadcast frames */
+ priv->write(priv, AR8216_REG_FLOOD_MASK, 0x003f003f);
+
+ /* enable jumbo frames */
+ ar8216_rmw(priv, AR8216_REG_GLOBAL_CTRL,
+ AR8316_GCTRL_MTU, 9018 + 8 + 2);
+}
+
+static const struct ar8xxx_chip ar8316_chip = {
+ .caps = AR8XXX_CAP_GIGE,
+ .hw_init = ar8316_hw_init,
+ .init_globals = ar8316_init_globals,
+ .init_port = ar8216_init_port,
+ .setup_port = ar8216_setup_port,
+ .read_port_status = ar8216_read_port_status,
+ .atu_flush = ar8216_atu_flush,
+ .vtu_flush = ar8216_vtu_flush,
+ .vtu_load_vlan = ar8216_vtu_load_vlan,
+};
+
+static u32
+ar8327_get_pad_cfg(struct ar8327_pad_cfg *cfg)
+{
+ u32 t;
+
+ if (!cfg)
+ return 0;
+
+ t = 0;
+ switch (cfg->mode) {
+ case AR8327_PAD_NC:
+ break;
+
+ case AR8327_PAD_MAC2MAC_MII:
+ t = AR8327_PAD_MAC_MII_EN;
+ if (cfg->rxclk_sel)
+ t |= AR8327_PAD_MAC_MII_RXCLK_SEL;
+ if (cfg->txclk_sel)
+ t |= AR8327_PAD_MAC_MII_TXCLK_SEL;
+ break;
+
+ case AR8327_PAD_MAC2MAC_GMII:
+ t = AR8327_PAD_MAC_GMII_EN;
+ if (cfg->rxclk_sel)
+ t |= AR8327_PAD_MAC_GMII_RXCLK_SEL;
+ if (cfg->txclk_sel)
+ t |= AR8327_PAD_MAC_GMII_TXCLK_SEL;
+ break;
+
+ case AR8327_PAD_MAC_SGMII:
+ t = AR8327_PAD_SGMII_EN;
+ break;
+
+ case AR8327_PAD_MAC2PHY_MII:
+ t = AR8327_PAD_PHY_MII_EN;
+ if (cfg->rxclk_sel)
+ t |= AR8327_PAD_PHY_MII_RXCLK_SEL;
+ if (cfg->txclk_sel)
+ t |= AR8327_PAD_PHY_MII_TXCLK_SEL;
+ break;
+
+ case AR8327_PAD_MAC2PHY_GMII:
+ t = AR8327_PAD_PHY_GMII_EN;
+ if (cfg->pipe_rxclk_sel)
+ t |= AR8327_PAD_PHY_GMII_PIPE_RXCLK_SEL;
+ if (cfg->rxclk_sel)
+ t |= AR8327_PAD_PHY_GMII_RXCLK_SEL;
+ if (cfg->txclk_sel)
+ t |= AR8327_PAD_PHY_GMII_TXCLK_SEL;
+ break;
+
+ case AR8327_PAD_MAC_RGMII:
+ t = AR8327_PAD_RGMII_EN;
+ t |= cfg->txclk_delay_sel << AR8327_PAD_RGMII_TXCLK_DELAY_SEL_S;
+ t |= cfg->rxclk_delay_sel << AR8327_PAD_RGMII_RXCLK_DELAY_SEL_S;
+ if (cfg->rxclk_delay_en)
+ t |= AR8327_PAD_RGMII_RXCLK_DELAY_EN;
+ if (cfg->txclk_delay_en)
+ t |= AR8327_PAD_RGMII_TXCLK_DELAY_EN;
+ break;
+
+ case AR8327_PAD_PHY_GMII:
+ t = AR8327_PAD_PHYX_GMII_EN;
+ break;
+
+ case AR8327_PAD_PHY_RGMII:
+ t = AR8327_PAD_PHYX_RGMII_EN;
+ break;
+
+ case AR8327_PAD_PHY_MII:
+ t = AR8327_PAD_PHYX_MII_EN;
+ break;
+ }
+
+ return t;
+}
+
+static void
+ar8327_phy_fixup(struct ar8216_priv *priv, int phy)
+{
+ switch (priv->chip_rev) {
+ case 1:
+ /* For 100M waveform */
+ ar8216_phy_dbg_write(priv, phy, 0, 0x02ea);
+ /* Turn on Gigabit clock */
+ ar8216_phy_dbg_write(priv, phy, 0x3d, 0x68a0);
+ break;
+
+ case 2:
+ ar8216_phy_mmd_write(priv, phy, 0x7, 0x3c);
+ ar8216_phy_mmd_write(priv, phy, 0x4007, 0x0);
+ /* fallthrough */
+ case 4:
+ ar8216_phy_mmd_write(priv, phy, 0x3, 0x800d);
+ ar8216_phy_mmd_write(priv, phy, 0x4003, 0x803f);
+
+ ar8216_phy_dbg_write(priv, phy, 0x3d, 0x6860);
+ ar8216_phy_dbg_write(priv, phy, 0x5, 0x2c46);
+ ar8216_phy_dbg_write(priv, phy, 0x3c, 0x6000);
+ break;
+ }
+}
+
+static int
+ar8327_hw_init(struct ar8216_priv *priv)
+{
+ struct ar8327_platform_data *pdata;
+ struct ar8327_led_cfg *led_cfg;
+ struct mii_bus *bus;
+ u32 pos, new_pos;
+ u32 t;
+ int i;
+
+ pdata = priv->phy->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ t = ar8327_get_pad_cfg(pdata->pad0_cfg);
+ priv->write(priv, AR8327_REG_PAD0_MODE, t);
+ t = ar8327_get_pad_cfg(pdata->pad5_cfg);
+ priv->write(priv, AR8327_REG_PAD5_MODE, t);
+ t = ar8327_get_pad_cfg(pdata->pad6_cfg);
+ priv->write(priv, AR8327_REG_PAD6_MODE, t);
+
+ pos = priv->read(priv, AR8327_REG_POWER_ON_STRIP);
+ new_pos = pos;
+
+ led_cfg = pdata->led_cfg;
+ if (led_cfg) {
+ if (led_cfg->open_drain)
+ new_pos |= AR8327_POWER_ON_STRIP_LED_OPEN_EN;
+ else
+ new_pos &= ~AR8327_POWER_ON_STRIP_LED_OPEN_EN;
+
+ priv->write(priv, AR8327_REG_LED_CTRL0, led_cfg->led_ctrl0);
+ priv->write(priv, AR8327_REG_LED_CTRL1, led_cfg->led_ctrl1);
+ priv->write(priv, AR8327_REG_LED_CTRL2, led_cfg->led_ctrl2);
+ priv->write(priv, AR8327_REG_LED_CTRL3, led_cfg->led_ctrl3);
+ }
+
+ if (new_pos != pos) {
+ new_pos |= AR8327_POWER_ON_STRIP_POWER_ON_SEL;
+ priv->write(priv, AR8327_REG_POWER_ON_STRIP, new_pos);
+ }
+
+ bus = priv->phy->bus;
+ for (i = 0; i < AR8327_NUM_PHYS; i++) {
+ ar8327_phy_fixup(priv, i);
+
+ /* start aneg on the PHY */
+ mdiobus_write(bus, i, MII_ADVERTISE, ADVERTISE_ALL |
+ ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+ mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
+ mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+ }
+
+ msleep(1000);
+
+ return 0;
+}
+
+static void
+ar8327_init_globals(struct ar8216_priv *priv)
+{
+ u32 t;
+
+ /* enable CPU port and disable mirror port */
+ t = AR8327_FWD_CTRL0_CPU_PORT_EN |
+ AR8327_FWD_CTRL0_MIRROR_PORT;
+ priv->write(priv, AR8327_REG_FWD_CTRL0, t);
+
+ /* forward multicast and broadcast frames to CPU */
+ t = (AR8327_PORTS_ALL << AR8327_FWD_CTRL1_UC_FLOOD_S) |
+ (AR8327_PORTS_ALL << AR8327_FWD_CTRL1_MC_FLOOD_S) |
+ (AR8327_PORTS_ALL << AR8327_FWD_CTRL1_BC_FLOOD_S);
+ priv->write(priv, AR8327_REG_FWD_CTRL1, t);
+
+ /* setup MTU */
+ ar8216_rmw(priv, AR8327_REG_MAX_FRAME_SIZE,
+ AR8327_MAX_FRAME_SIZE_MTU, 1518 + 8 + 2);
+}
+
+static void
+ar8327_init_cpuport(struct ar8216_priv *priv)
+{
+ struct ar8327_platform_data *pdata;
+ struct ar8327_port_cfg *cfg;
+ u32 t;
+
+ pdata = priv->phy->dev.platform_data;
+ if (!pdata)
+ return;
+
+ cfg = &pdata->cpuport_cfg;
+ if (!cfg->force_link) {
+ priv->write(priv, AR8327_REG_PORT_STATUS(AR8216_PORT_CPU),
+ AR8216_PORT_STATUS_LINK_AUTO);
+ return;
+ }
+
+ t = AR8216_PORT_STATUS_TXMAC | AR8216_PORT_STATUS_RXMAC;
+ t |= cfg->duplex ? AR8216_PORT_STATUS_DUPLEX : 0;
+ t |= cfg->rxpause ? AR8216_PORT_STATUS_RXFLOW : 0;
+ t |= cfg->txpause ? AR8216_PORT_STATUS_TXFLOW : 0;
+ switch (cfg->speed) {
+ case AR8327_PORT_SPEED_10:
+ t |= AR8216_PORT_SPEED_10M;
+ break;
+ case AR8327_PORT_SPEED_100:
+ t |= AR8216_PORT_SPEED_100M;
+ break;
+ case AR8327_PORT_SPEED_1000:
+ t |= AR8216_PORT_SPEED_1000M;
+ break;
+ }
+
+ priv->write(priv, AR8327_REG_PORT_STATUS(AR8216_PORT_CPU), t);
+}
+
+static void
+ar8327_init_port(struct ar8216_priv *priv, int port)
+{
+ u32 t;
+
+ if (port == AR8216_PORT_CPU) {
+ ar8327_init_cpuport(priv);
+ } else {
+ t = AR8216_PORT_STATUS_LINK_AUTO;
+ priv->write(priv, AR8327_REG_PORT_STATUS(port), t);
+ }
+
+ priv->write(priv, AR8327_REG_PORT_HEADER(port), 0);
+
+ priv->write(priv, AR8327_REG_PORT_VLAN0(port), 0);
+
+ t = AR8327_PORT_VLAN1_OUT_MODE_UNTOUCH << AR8327_PORT_VLAN1_OUT_MODE_S;
+ priv->write(priv, AR8327_REG_PORT_VLAN1(port), t);
+
+ t = AR8327_PORT_LOOKUP_LEARN;
+ t |= AR8216_PORT_STATE_FORWARD << AR8327_PORT_LOOKUP_STATE_S;
+ priv->write(priv, AR8327_REG_PORT_LOOKUP(port), t);
+}
+
+static u32
+ar8327_read_port_status(struct ar8216_priv *priv, int port)
+{
+ return priv->read(priv, AR8327_REG_PORT_STATUS(port));
+}
+
+static int
+ar8327_atu_flush(struct ar8216_priv *priv)
+{
+ int ret;
+
+ ret = ar8216_wait_bit(priv, AR8327_REG_ATU_FUNC,
+ AR8327_ATU_FUNC_BUSY, 0);
+ if (!ret)
+ priv->write(priv, AR8327_REG_ATU_FUNC,
+ AR8327_ATU_FUNC_OP_FLUSH);
+
+ return ret;
+}
+
+static void
+ar8327_vtu_op(struct ar8216_priv *priv, u32 op, u32 val)
+{
+ if (ar8216_wait_bit(priv, AR8327_REG_VTU_FUNC1,
+ AR8327_VTU_FUNC1_BUSY, 0))
+ return;
+
+ if ((op & AR8327_VTU_FUNC1_OP) == AR8327_VTU_FUNC1_OP_LOAD)
+ priv->write(priv, AR8327_REG_VTU_FUNC0, val);
+
+ op |= AR8327_VTU_FUNC1_BUSY;
+ priv->write(priv, AR8327_REG_VTU_FUNC1, op);
+}
+
+static void
+ar8327_vtu_flush(struct ar8216_priv *priv)
+{
+ ar8327_vtu_op(priv, AR8327_VTU_FUNC1_OP_FLUSH, 0);
+}
+
+static void
+ar8327_vtu_load_vlan(struct ar8216_priv *priv, u32 vid, u32 port_mask)
+{
+ u32 op;
+ u32 val;
+ int i;
+
+ op = AR8327_VTU_FUNC1_OP_LOAD | (vid << AR8327_VTU_FUNC1_VID_S);
+ val = AR8327_VTU_FUNC0_VALID | AR8327_VTU_FUNC0_IVL;
+ for (i = 0; i < AR8327_NUM_PORTS; i++) {
+ u32 mode;
+
+ if ((port_mask & BIT(i)) == 0)
+ mode = AR8327_VTU_FUNC0_EG_MODE_NOT;
+ else if (priv->vlan == 0)
+ mode = AR8327_VTU_FUNC0_EG_MODE_KEEP;
+ else if (priv->vlan_tagged & BIT(i))
+ mode = AR8327_VTU_FUNC0_EG_MODE_TAG;
+ else
+ mode = AR8327_VTU_FUNC0_EG_MODE_UNTAG;
+
+ val |= mode << AR8327_VTU_FUNC0_EG_MODE_S(i);
+ }
+ ar8327_vtu_op(priv, op, val);
+}
+
+static void
+ar8327_setup_port(struct ar8216_priv *priv, int port, u32 egress, u32 ingress,
+ u32 members, u32 pvid)
+{
+ u32 t;
+ u32 mode;
+
+ t = pvid << AR8327_PORT_VLAN0_DEF_SVID_S;
+ t |= pvid << AR8327_PORT_VLAN0_DEF_CVID_S;
+ priv->write(priv, AR8327_REG_PORT_VLAN0(port), t);
+
+ mode = AR8327_PORT_VLAN1_OUT_MODE_UNMOD;
+ switch (egress) {
+ case AR8216_OUT_KEEP:
+ mode = AR8327_PORT_VLAN1_OUT_MODE_UNTOUCH;
+ break;
+ case AR8216_OUT_STRIP_VLAN:
+ mode = AR8327_PORT_VLAN1_OUT_MODE_UNTAG;
+ break;
+ case AR8216_OUT_ADD_VLAN:
+ mode = AR8327_PORT_VLAN1_OUT_MODE_TAG;
+ break;
+ }
+
+ t = AR8327_PORT_VLAN1_PORT_VLAN_PROP;
+ t |= mode << AR8327_PORT_VLAN1_OUT_MODE_S;
+ priv->write(priv, AR8327_REG_PORT_VLAN1(port), t);
+
+ t = members;
+ t |= AR8327_PORT_LOOKUP_LEARN;
+ t |= ingress << AR8327_PORT_LOOKUP_IN_MODE_S;
+ t |= AR8216_PORT_STATE_FORWARD << AR8327_PORT_LOOKUP_STATE_S;
+ priv->write(priv, AR8327_REG_PORT_LOOKUP(port), t);
+}
+
+static const struct ar8xxx_chip ar8327_chip = {
+ .caps = AR8XXX_CAP_GIGE,
+ .hw_init = ar8327_hw_init,
+ .init_globals = ar8327_init_globals,
+ .init_port = ar8327_init_port,
+ .setup_port = ar8327_setup_port,
+ .read_port_status = ar8327_read_port_status,
+ .atu_flush = ar8327_atu_flush,
+ .vtu_flush = ar8327_vtu_flush,
+ .vtu_load_vlan = ar8327_vtu_load_vlan,
+};
+
+static int
+ar8216_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ priv->vlan = !!val->value.i;
+ return 0;
+}
+
+static int
+ar8216_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ val->value.i = priv->vlan;
+ return 0;
+}
+
+
+static int
+ar8216_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+
+ /* make sure no invalid PVIDs get set */
+
+ if (vlan >= dev->vlans)
+ return -EINVAL;
+
+ priv->pvid[port] = vlan;
+ return 0;
+}
+
+static int
+ar8216_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ *vlan = priv->pvid[port];
+ return 0;
+}
+
+static int
+ar8216_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ priv->vlan_id[val->port_vlan] = val->value.i;
+ return 0;
+}
+
+static int
+ar8216_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ val->value.i = priv->vlan_id[val->port_vlan];
+ return 0;
+}
+
+static int
+ar8216_sw_get_port_link(struct switch_dev *dev, int port,
+ struct switch_port_link *link)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+
+ ar8216_read_port_link(priv, port, link);
+ return 0;
+}
+
+static int
+ar8216_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ u8 ports = priv->vlan_table[val->port_vlan];
+ int i;
+
+ val->len = 0;
+ for (i = 0; i < dev->ports; i++) {
+ struct switch_port *p;
+
+ if (!(ports & (1 << i)))
+ continue;
+
+ p = &val->value.ports[val->len++];
+ p->id = i;
+ if (priv->vlan_tagged & (1 << i))
+ p->flags = (1 << SWITCH_PORT_FLAG_TAGGED);
+ else
+ p->flags = 0;
+ }
+ return 0;
+}
+
+static int
+ar8216_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ u8 *vt = &priv->vlan_table[val->port_vlan];
+ int i, j;
+
+ *vt = 0;
+ for (i = 0; i < val->len; i++) {
+ struct switch_port *p = &val->value.ports[i];
+
+ if (p->flags & (1 << SWITCH_PORT_FLAG_TAGGED)) {
+ priv->vlan_tagged |= (1 << p->id);
+ } else {
+ priv->vlan_tagged &= ~(1 << p->id);
+ priv->pvid[p->id] = val->port_vlan;
+
+ /* make sure that an untagged port does not
+ * appear in other vlans */
+ for (j = 0; j < AR8X16_MAX_VLANS; j++) {
+ if (j == val->port_vlan)
+ continue;
+ priv->vlan_table[j] &= ~(1 << p->id);
+ }
+ }
+
+ *vt |= 1 << p->id;
+ }
+ return 0;
+}
+
+static int
+ar8216_sw_hw_apply(struct switch_dev *dev)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ u8 portmask[AR8X16_MAX_PORTS];
+ int i, j;
+
+ mutex_lock(&priv->reg_mutex);
+ /* flush all vlan translation unit entries */
+ priv->chip->vtu_flush(priv);
+
+ memset(portmask, 0, sizeof(portmask));
+ if (!priv->init) {
+ /* calculate the port destination masks and load vlans
+ * into the vlan translation unit */
+ for (j = 0; j < AR8X16_MAX_VLANS; j++) {
+ u8 vp = priv->vlan_table[j];
+
+ if (!vp)
+ continue;
+
+ for (i = 0; i < dev->ports; i++) {
+ u8 mask = (1 << i);
+ if (vp & mask)
+ portmask[i] |= vp & ~mask;
+ }
+
+ priv->chip->vtu_load_vlan(priv, priv->vlan_id[j],
+ priv->vlan_table[j]);
+ }
+ } else {
+ /* vlan disabled:
+ * isolate all ports, but connect them to the cpu port */
+ for (i = 0; i < dev->ports; i++) {
+ if (i == AR8216_PORT_CPU)
+ continue;
+
+ portmask[i] = 1 << AR8216_PORT_CPU;
+ portmask[AR8216_PORT_CPU] |= (1 << i);
+ }
+ }
+
+ /* update the port destination mask registers and tag settings */
+ for (i = 0; i < dev->ports; i++) {
+ int egress, ingress;
+ int pvid;
+
+ if (priv->vlan) {
+ pvid = priv->vlan_id[priv->pvid[i]];
+ if (priv->vlan_tagged & (1 << i))
+ egress = AR8216_OUT_ADD_VLAN;
+ else
+ egress = AR8216_OUT_STRIP_VLAN;
+ ingress = AR8216_IN_SECURE;
+ } else {
+ pvid = i;
+ egress = AR8216_OUT_KEEP;
+ ingress = AR8216_IN_PORT_ONLY;
+ }
+
+ priv->chip->setup_port(priv, i, egress, ingress, portmask[i],
+ pvid);
+ }
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+static int
+ar8216_sw_reset_switch(struct switch_dev *dev)
+{
+ struct ar8216_priv *priv = to_ar8216(dev);
+ int i;
+
+ mutex_lock(&priv->reg_mutex);
+ memset(&priv->vlan, 0, sizeof(struct ar8216_priv) -
+ offsetof(struct ar8216_priv, vlan));
+
+ for (i = 0; i < AR8X16_MAX_VLANS; i++)
+ priv->vlan_id[i] = i;
+
+ /* Configure all ports */
+ for (i = 0; i < dev->ports; i++)
+ priv->chip->init_port(priv, i);
+
+ priv->chip->init_globals(priv);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ar8216_sw_hw_apply(dev);
+}
+
+static struct switch_attr ar8216_globals[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "Enable VLAN mode",
+ .set = ar8216_sw_set_vlan,
+ .get = ar8216_sw_get_vlan,
+ .max = 1
+ },
+};
+
+static struct switch_attr ar8216_port[] = {
+};
+
+static struct switch_attr ar8216_vlan[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "vid",
+ .description = "VLAN ID (0-4094)",
+ .set = ar8216_sw_set_vid,
+ .get = ar8216_sw_get_vid,
+ .max = 4094,
+ },
+};
+
+static const struct switch_dev_ops ar8216_sw_ops = {
+ .attr_global = {
+ .attr = ar8216_globals,
+ .n_attr = ARRAY_SIZE(ar8216_globals),
+ },
+ .attr_port = {
+ .attr = ar8216_port,
+ .n_attr = ARRAY_SIZE(ar8216_port),
+ },
+ .attr_vlan = {
+ .attr = ar8216_vlan,
+ .n_attr = ARRAY_SIZE(ar8216_vlan),
+ },
+ .get_port_pvid = ar8216_sw_get_pvid,
+ .set_port_pvid = ar8216_sw_set_pvid,
+ .get_vlan_ports = ar8216_sw_get_ports,
+ .set_vlan_ports = ar8216_sw_set_ports,
+ .apply_config = ar8216_sw_hw_apply,
+ .reset_switch = ar8216_sw_reset_switch,
+ .get_port_link = ar8216_sw_get_port_link,
+};
+
+static int
+ar8216_id_chip(struct ar8216_priv *priv)
+{
+ u32 val;
+ u16 id;
+ int i;
+
+ val = ar8216_mii_read(priv, AR8216_REG_CTRL);
+ if (val == ~0)
+ return -ENODEV;
+
+ id = val & (AR8216_CTRL_REVISION | AR8216_CTRL_VERSION);
+ for (i = 0; i < AR8X16_PROBE_RETRIES; i++) {
+ u16 t;
+
+ val = ar8216_mii_read(priv, AR8216_REG_CTRL);
+ if (val == ~0)
+ return -ENODEV;
+
+ t = val & (AR8216_CTRL_REVISION | AR8216_CTRL_VERSION);
+ if (t != id)
+ return -ENODEV;
+ }
+
+ priv->chip_ver = (id & AR8216_CTRL_VERSION) >> AR8216_CTRL_VERSION_S;
+ priv->chip_rev = (id & AR8216_CTRL_REVISION);
+
+ switch (priv->chip_ver) {
+ case AR8XXX_VER_AR8216:
+ priv->chip = &ar8216_chip;
+ break;
+ case AR8XXX_VER_AR8236:
+ priv->chip = &ar8236_chip;
+ break;
+ case AR8XXX_VER_AR8316:
+ priv->chip = &ar8316_chip;
+ break;
+ case AR8XXX_VER_AR8327:
+ priv->mii_lo_first = true;
+ priv->chip = &ar8327_chip;
+ break;
+ default:
+ printk(KERN_DEBUG
+ "ar8216: Unknown Atheros device [ver=%d, rev=%d, phy_id=%04x%04x]\n",
+ priv->chip_ver, priv->chip_rev,
+ mdiobus_read(priv->phy->bus, priv->phy->addr, 2),
+ mdiobus_read(priv->phy->bus, priv->phy->addr, 3));
+
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+ar8216_config_init(struct phy_device *pdev)
+{
+ struct ar8216_priv *priv = pdev->priv;
+ struct net_device *dev = pdev->attached_dev;
+ struct switch_dev *swdev;
+ int ret;
+
+ if (!priv) {
+ priv = kzalloc(sizeof(struct ar8216_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+ }
+
+ priv->phy = pdev;
+
+ ret = ar8216_id_chip(priv);
+ if (ret)
+ goto err_free_priv;
+
+ if (pdev->addr != 0) {
+ if (ar8xxx_has_gige(priv)) {
+ pdev->supported |= SUPPORTED_1000baseT_Full;
+ pdev->advertising |= ADVERTISED_1000baseT_Full;
+ }
+
+ if (chip_is_ar8316(priv)) {
+ /* check if we're attaching to the switch twice */
+ pdev = pdev->bus->phy_map[0];
+ if (!pdev) {
+ kfree(priv);
+ return 0;
+ }
+
+ /* switch device has not been initialized, reuse priv */
+ if (!pdev->priv) {
+ priv->port4_phy = true;
+ pdev->priv = priv;
+ return 0;
+ }
+
+ kfree(priv);
+
+ /* switch device has been initialized, reinit */
+ priv = pdev->priv;
+ priv->dev.ports = (AR8216_NUM_PORTS - 1);
+ priv->initialized = false;
+ priv->port4_phy = true;
+ ar8316_hw_init(priv);
+ return 0;
+ }
+
+ kfree(priv);
+ return 0;
+ }
+
+ if (ar8xxx_has_gige(priv))
+ pdev->supported = SUPPORTED_1000baseT_Full;
+ else
+ pdev->supported = SUPPORTED_100baseT_Full;
+ pdev->advertising = pdev->supported;
+
+ mutex_init(&priv->reg_mutex);
+ priv->read = ar8216_mii_read;
+ priv->write = ar8216_mii_write;
+
+ pdev->priv = priv;
+
+ swdev = &priv->dev;
+ swdev->cpu_port = AR8216_PORT_CPU;
+ swdev->ops = &ar8216_sw_ops;
+ swdev->ports = AR8216_NUM_PORTS;
+
+ if (chip_is_ar8316(priv)) {
+ swdev->name = "Atheros AR8316";
+ swdev->vlans = AR8X16_MAX_VLANS;
+
+ if (priv->port4_phy) {
+ /* port 5 connected to the other mac, therefore unusable */
+ swdev->ports = (AR8216_NUM_PORTS - 1);
+ }
+ } else if (chip_is_ar8236(priv)) {
+ swdev->name = "Atheros AR8236";
+ swdev->vlans = AR8216_NUM_VLANS;
+ swdev->ports = AR8216_NUM_PORTS;
+ } else if (chip_is_ar8327(priv)) {
+ swdev->name = "Atheros AR8327";
+ swdev->vlans = AR8X16_MAX_VLANS;
+ swdev->ports = AR8327_NUM_PORTS;
+ } else {
+ swdev->name = "Atheros AR8216";
+ swdev->vlans = AR8216_NUM_VLANS;
+ }
+
+ ret = register_switch(&priv->dev, pdev->attached_dev);
+ if (ret)
+ goto err_free_priv;
+
+ printk(KERN_INFO "%s: %s switch driver attached.\n",
+ pdev->attached_dev->name, swdev->name);
+
+ priv->init = true;
+
+ ret = priv->chip->hw_init(priv);
+ if (ret)
+ goto err_free_priv;
+
+ ret = ar8216_sw_reset_switch(&priv->dev);
+ if (ret)
+ goto err_free_priv;
+
+ dev->phy_ptr = priv;
+
+ /* VID fixup only needed on ar8216 */
+ if (chip_is_ar8216(priv) && pdev->addr == 0) {
+ dev->priv_flags |= IFF_NO_IP_ALIGN;
+ dev->eth_mangle_rx = ar8216_mangle_rx;
+ dev->eth_mangle_tx = ar8216_mangle_tx;
+ }
+
+ priv->init = false;
+
+ return 0;
+
+err_free_priv:
+ kfree(priv);
+ return ret;
+}
+
+static int
+ar8216_read_status(struct phy_device *phydev)
+{
+ struct ar8216_priv *priv = phydev->priv;
+ struct switch_port_link link;
+ int ret;
+
+ if (phydev->addr != 0)
+ return genphy_read_status(phydev);
+
+ ar8216_read_port_link(priv, phydev->addr, &link);
+ phydev->link = !!link.link;
+ if (!phydev->link)
+ return 0;
+
+ switch (link.speed) {
+ case SWITCH_PORT_SPEED_10:
+ phydev->speed = SPEED_10;
+ break;
+ case SWITCH_PORT_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case SWITCH_PORT_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ default:
+ phydev->speed = 0;
+ }
+ phydev->duplex = link.duplex ? DUPLEX_FULL : DUPLEX_HALF;
+
+ /* flush the address translation unit */
+ mutex_lock(&priv->reg_mutex);
+ ret = priv->chip->atu_flush(priv);
+ mutex_unlock(&priv->reg_mutex);
+
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+
+ return ret;
+}
+
+static int
+ar8216_config_aneg(struct phy_device *phydev)
+{
+ if (phydev->addr == 0)
+ return 0;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int
+ar8216_probe(struct phy_device *pdev)
+{
+ struct ar8216_priv priv;
+
+ priv.phy = pdev;
+ return ar8216_id_chip(&priv);
+}
+
+static void
+ar8216_remove(struct phy_device *pdev)
+{
+ struct ar8216_priv *priv = pdev->priv;
+ struct net_device *dev = pdev->attached_dev;
+
+ if (!priv)
+ return;
+
+ dev->priv_flags &= ~IFF_NO_IP_ALIGN;
+ dev->eth_mangle_rx = NULL;
+ dev->eth_mangle_tx = NULL;
+
+ if (pdev->addr == 0)
+ unregister_switch(&priv->dev);
+ kfree(priv);
+}
+
+static struct phy_driver ar8216_driver = {
+ .phy_id = 0x004d0000,
+ .name = "Atheros AR8216/AR8236/AR8316",
+ .phy_id_mask = 0xffff0000,
+ .features = PHY_BASIC_FEATURES,
+ .probe = ar8216_probe,
+ .remove = ar8216_remove,
+ .config_init = &ar8216_config_init,
+ .config_aneg = &ar8216_config_aneg,
+ .read_status = &ar8216_read_status,
+ .driver = { .owner = THIS_MODULE },
+};
+
+int __init
+ar8216_init(void)
+{
+ return phy_driver_register(&ar8216_driver);
+}
+
+void __exit
+ar8216_exit(void)
+{
+ phy_driver_unregister(&ar8216_driver);
+}
+
+module_init(ar8216_init);
+module_exit(ar8216_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/target/linux/generic/files/drivers/net/phy/ar8216.h b/target/linux/generic/files/drivers/net/phy/ar8216.h
new file mode 100644
index 000000000..8948c5205
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/ar8216.h
@@ -0,0 +1,341 @@
+/*
+ * ar8216.h: AR8216 switch driver
+ *
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AR8216_H
+#define __AR8216_H
+
+#define BITS(_s, _n) (((1UL << (_n)) - 1) << _s)
+
+#define AR8216_PORT_CPU 0
+#define AR8216_NUM_PORTS 6
+#define AR8216_NUM_VLANS 16
+#define AR8316_NUM_VLANS 4096
+
+/* Atheros specific MII registers */
+#define MII_ATH_MMD_ADDR 0x0d
+#define MII_ATH_MMD_DATA 0x0e
+#define MII_ATH_DBG_ADDR 0x1d
+#define MII_ATH_DBG_DATA 0x1e
+
+#define AR8216_REG_CTRL 0x0000
+#define AR8216_CTRL_REVISION BITS(0, 8)
+#define AR8216_CTRL_REVISION_S 0
+#define AR8216_CTRL_VERSION BITS(8, 8)
+#define AR8216_CTRL_VERSION_S 8
+#define AR8216_CTRL_RESET BIT(31)
+
+#define AR8216_REG_FLOOD_MASK 0x002C
+#define AR8216_FM_UNI_DEST_PORTS BITS(0, 6)
+#define AR8216_FM_MULTI_DEST_PORTS BITS(16, 6)
+
+#define AR8216_REG_GLOBAL_CTRL 0x0030
+#define AR8216_GCTRL_MTU BITS(0, 11)
+#define AR8236_GCTRL_MTU BITS(0, 14)
+#define AR8316_GCTRL_MTU BITS(0, 14)
+
+#define AR8216_REG_VTU 0x0040
+#define AR8216_VTU_OP BITS(0, 3)
+#define AR8216_VTU_OP_NOOP 0x0
+#define AR8216_VTU_OP_FLUSH 0x1
+#define AR8216_VTU_OP_LOAD 0x2
+#define AR8216_VTU_OP_PURGE 0x3
+#define AR8216_VTU_OP_REMOVE_PORT 0x4
+#define AR8216_VTU_ACTIVE BIT(3)
+#define AR8216_VTU_FULL BIT(4)
+#define AR8216_VTU_PORT BITS(8, 4)
+#define AR8216_VTU_PORT_S 8
+#define AR8216_VTU_VID BITS(16, 12)
+#define AR8216_VTU_VID_S 16
+#define AR8216_VTU_PRIO BITS(28, 3)
+#define AR8216_VTU_PRIO_S 28
+#define AR8216_VTU_PRIO_EN BIT(31)
+
+#define AR8216_REG_VTU_DATA 0x0044
+#define AR8216_VTUDATA_MEMBER BITS(0, 10)
+#define AR8236_VTUDATA_MEMBER BITS(0, 7)
+#define AR8216_VTUDATA_VALID BIT(11)
+
+#define AR8216_REG_ATU 0x0050
+#define AR8216_ATU_OP BITS(0, 3)
+#define AR8216_ATU_OP_NOOP 0x0
+#define AR8216_ATU_OP_FLUSH 0x1
+#define AR8216_ATU_OP_LOAD 0x2
+#define AR8216_ATU_OP_PURGE 0x3
+#define AR8216_ATU_OP_FLUSH_LOCKED 0x4
+#define AR8216_ATU_OP_FLUSH_UNICAST 0x5
+#define AR8216_ATU_OP_GET_NEXT 0x6
+#define AR8216_ATU_ACTIVE BIT(3)
+#define AR8216_ATU_PORT_NUM BITS(8, 4)
+#define AR8216_ATU_FULL_VIO BIT(12)
+#define AR8216_ATU_ADDR4 BITS(16, 8)
+#define AR8216_ATU_ADDR5 BITS(24, 8)
+
+#define AR8216_REG_ATU_DATA 0x0054
+#define AR8216_ATU_ADDR3 BITS(0, 8)
+#define AR8216_ATU_ADDR2 BITS(8, 8)
+#define AR8216_ATU_ADDR1 BITS(16, 8)
+#define AR8216_ATU_ADDR0 BITS(24, 8)
+
+#define AR8216_REG_ATU_CTRL 0x005C
+#define AR8216_ATU_CTRL_AGE_EN BIT(17)
+#define AR8216_ATU_CTRL_AGE_TIME BITS(0, 16)
+#define AR8216_ATU_CTRL_AGE_TIME_S 0
+
+#define AR8216_PORT_OFFSET(_i) (0x0100 * (_i + 1))
+#define AR8216_REG_PORT_STATUS(_i) (AR8216_PORT_OFFSET(_i) + 0x0000)
+#define AR8216_PORT_STATUS_SPEED BITS(0,2)
+#define AR8216_PORT_STATUS_SPEED_S 0
+#define AR8216_PORT_STATUS_TXMAC BIT(2)
+#define AR8216_PORT_STATUS_RXMAC BIT(3)
+#define AR8216_PORT_STATUS_TXFLOW BIT(4)
+#define AR8216_PORT_STATUS_RXFLOW BIT(5)
+#define AR8216_PORT_STATUS_DUPLEX BIT(6)
+#define AR8216_PORT_STATUS_LINK_UP BIT(8)
+#define AR8216_PORT_STATUS_LINK_AUTO BIT(9)
+#define AR8216_PORT_STATUS_LINK_PAUSE BIT(10)
+
+#define AR8216_REG_PORT_CTRL(_i) (AR8216_PORT_OFFSET(_i) + 0x0004)
+
+/* port forwarding state */
+#define AR8216_PORT_CTRL_STATE BITS(0, 3)
+#define AR8216_PORT_CTRL_STATE_S 0
+
+#define AR8216_PORT_CTRL_LEARN_LOCK BIT(7)
+
+/* egress 802.1q mode */
+#define AR8216_PORT_CTRL_VLAN_MODE BITS(8, 2)
+#define AR8216_PORT_CTRL_VLAN_MODE_S 8
+
+#define AR8216_PORT_CTRL_IGMP_SNOOP BIT(10)
+#define AR8216_PORT_CTRL_HEADER BIT(11)
+#define AR8216_PORT_CTRL_MAC_LOOP BIT(12)
+#define AR8216_PORT_CTRL_SINGLE_VLAN BIT(13)
+#define AR8216_PORT_CTRL_LEARN BIT(14)
+#define AR8216_PORT_CTRL_MIRROR_TX BIT(16)
+#define AR8216_PORT_CTRL_MIRROR_RX BIT(17)
+
+#define AR8216_REG_PORT_VLAN(_i) (AR8216_PORT_OFFSET(_i) + 0x0008)
+
+#define AR8216_PORT_VLAN_DEFAULT_ID BITS(0, 12)
+#define AR8216_PORT_VLAN_DEFAULT_ID_S 0
+
+#define AR8216_PORT_VLAN_DEST_PORTS BITS(16, 9)
+#define AR8216_PORT_VLAN_DEST_PORTS_S 16
+
+/* bit0 added to the priority field of egress frames */
+#define AR8216_PORT_VLAN_TX_PRIO BIT(27)
+
+/* port default priority */
+#define AR8216_PORT_VLAN_PRIORITY BITS(28, 2)
+#define AR8216_PORT_VLAN_PRIORITY_S 28
+
+/* ingress 802.1q mode */
+#define AR8216_PORT_VLAN_MODE BITS(30, 2)
+#define AR8216_PORT_VLAN_MODE_S 30
+
+#define AR8216_REG_PORT_RATE(_i) (AR8216_PORT_OFFSET(_i) + 0x000c)
+#define AR8216_REG_PORT_PRIO(_i) (AR8216_PORT_OFFSET(_i) + 0x0010)
+
+
+#define AR8236_REG_PORT_VLAN(_i) (AR8216_PORT_OFFSET((_i)) + 0x0008)
+#define AR8236_PORT_VLAN_DEFAULT_ID BITS(16, 12)
+#define AR8236_PORT_VLAN_DEFAULT_ID_S 16
+#define AR8236_PORT_VLAN_PRIORITY BITS(29, 3)
+#define AR8236_PORT_VLAN_PRIORITY_S 28
+
+#define AR8236_REG_PORT_VLAN2(_i) (AR8216_PORT_OFFSET((_i)) + 0x000c)
+#define AR8236_PORT_VLAN2_MEMBER BITS(16, 7)
+#define AR8236_PORT_VLAN2_MEMBER_S 16
+#define AR8236_PORT_VLAN2_TX_PRIO BIT(23)
+#define AR8236_PORT_VLAN2_VLAN_MODE BITS(30, 2)
+#define AR8236_PORT_VLAN2_VLAN_MODE_S 30
+
+#define AR8327_NUM_PORTS 7
+#define AR8327_NUM_PHYS 5
+#define AR8327_PORTS_ALL 0x7f
+
+#define AR8327_REG_MASK 0x000
+
+#define AR8327_REG_PAD0_MODE 0x004
+#define AR8327_REG_PAD5_MODE 0x008
+#define AR8327_REG_PAD6_MODE 0x00c
+#define AR8327_PAD_MAC_MII_RXCLK_SEL BIT(0)
+#define AR8327_PAD_MAC_MII_TXCLK_SEL BIT(1)
+#define AR8327_PAD_MAC_MII_EN BIT(2)
+#define AR8327_PAD_MAC_GMII_RXCLK_SEL BIT(4)
+#define AR8327_PAD_MAC_GMII_TXCLK_SEL BIT(5)
+#define AR8327_PAD_MAC_GMII_EN BIT(6)
+#define AR8327_PAD_SGMII_EN BIT(7)
+#define AR8327_PAD_PHY_MII_RXCLK_SEL BIT(8)
+#define AR8327_PAD_PHY_MII_TXCLK_SEL BIT(9)
+#define AR8327_PAD_PHY_MII_EN BIT(10)
+#define AR8327_PAD_PHY_GMII_PIPE_RXCLK_SEL BIT(11)
+#define AR8327_PAD_PHY_GMII_RXCLK_SEL BIT(12)
+#define AR8327_PAD_PHY_GMII_TXCLK_SEL BIT(13)
+#define AR8327_PAD_PHY_GMII_EN BIT(14)
+#define AR8327_PAD_PHYX_GMII_EN BIT(16)
+#define AR8327_PAD_PHYX_RGMII_EN BIT(17)
+#define AR8327_PAD_PHYX_MII_EN BIT(18)
+#define AR8327_PAD_RGMII_RXCLK_DELAY_SEL BITS(20, 2)
+#define AR8327_PAD_RGMII_RXCLK_DELAY_SEL_S 20
+#define AR8327_PAD_RGMII_TXCLK_DELAY_SEL BITS(22, 2)
+#define AR8327_PAD_RGMII_TXCLK_DELAY_SEL_S 22
+#define AR8327_PAD_RGMII_RXCLK_DELAY_EN BIT(24)
+#define AR8327_PAD_RGMII_TXCLK_DELAY_EN BIT(25)
+#define AR8327_PAD_RGMII_EN BIT(26)
+
+#define AR8327_REG_POWER_ON_STRIP 0x010
+#define AR8327_POWER_ON_STRIP_POWER_ON_SEL BIT(31)
+#define AR8327_POWER_ON_STRIP_LED_OPEN_EN BIT(24)
+
+#define AR8327_REG_INT_STATUS0 0x020
+#define AR8327_INT0_VT_DONE BIT(20)
+
+#define AR8327_REG_INT_STATUS1 0x024
+#define AR8327_REG_INT_MASK0 0x028
+#define AR8327_REG_INT_MASK1 0x02c
+#define AR8327_REG_SERVICE_TAG 0x048
+#define AR8327_REG_LED_CTRL0 0x050
+#define AR8327_REG_LED_CTRL1 0x054
+#define AR8327_REG_LED_CTRL2 0x058
+#define AR8327_REG_LED_CTRL3 0x05c
+#define AR8327_REG_MAC_ADDR0 0x060
+#define AR8327_REG_MAC_ADDR1 0x064
+
+#define AR8327_REG_MAX_FRAME_SIZE 0x078
+#define AR8327_MAX_FRAME_SIZE_MTU BITS(0, 14)
+
+#define AR8327_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
+
+#define AR8327_REG_HEADER_CTRL 0x098
+#define AR8327_REG_PORT_HEADER(_i) (0x09c + (_i) * 4)
+
+#define AR8327_REG_PORT_VLAN0(_i) (0x420 + (_i) * 0x8)
+#define AR8327_PORT_VLAN0_DEF_SVID BITS(0, 12)
+#define AR8327_PORT_VLAN0_DEF_SVID_S 0
+#define AR8327_PORT_VLAN0_DEF_CVID BITS(16, 12)
+#define AR8327_PORT_VLAN0_DEF_CVID_S 16
+
+#define AR8327_REG_PORT_VLAN1(_i) (0x424 + (_i) * 0x8)
+#define AR8327_PORT_VLAN1_PORT_VLAN_PROP BIT(6)
+#define AR8327_PORT_VLAN1_OUT_MODE BITS(12, 2)
+#define AR8327_PORT_VLAN1_OUT_MODE_S 12
+#define AR8327_PORT_VLAN1_OUT_MODE_UNMOD 0
+#define AR8327_PORT_VLAN1_OUT_MODE_UNTAG 1
+#define AR8327_PORT_VLAN1_OUT_MODE_TAG 2
+#define AR8327_PORT_VLAN1_OUT_MODE_UNTOUCH 3
+
+#define AR8327_REG_ATU_DATA0 0x600
+#define AR8327_REG_ATU_DATA1 0x604
+#define AR8327_REG_ATU_DATA2 0x608
+
+#define AR8327_REG_ATU_FUNC 0x60c
+#define AR8327_ATU_FUNC_OP BITS(0, 4)
+#define AR8327_ATU_FUNC_OP_NOOP 0x0
+#define AR8327_ATU_FUNC_OP_FLUSH 0x1
+#define AR8327_ATU_FUNC_OP_LOAD 0x2
+#define AR8327_ATU_FUNC_OP_PURGE 0x3
+#define AR8327_ATU_FUNC_OP_FLUSH_LOCKED 0x4
+#define AR8327_ATU_FUNC_OP_FLUSH_UNICAST 0x5
+#define AR8327_ATU_FUNC_OP_GET_NEXT 0x6
+#define AR8327_ATU_FUNC_OP_SEARCH_MAC 0x7
+#define AR8327_ATU_FUNC_OP_CHANGE_TRUNK 0x8
+#define AR8327_ATU_FUNC_BUSY BIT(31)
+
+#define AR8327_REG_VTU_FUNC0 0x0610
+#define AR8327_VTU_FUNC0_EG_MODE BITS(4, 14)
+#define AR8327_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
+#define AR8327_VTU_FUNC0_EG_MODE_KEEP 0
+#define AR8327_VTU_FUNC0_EG_MODE_UNTAG 1
+#define AR8327_VTU_FUNC0_EG_MODE_TAG 2
+#define AR8327_VTU_FUNC0_EG_MODE_NOT 3
+#define AR8327_VTU_FUNC0_IVL BIT(19)
+#define AR8327_VTU_FUNC0_VALID BIT(20)
+
+#define AR8327_REG_VTU_FUNC1 0x0614
+#define AR8327_VTU_FUNC1_OP BITS(0, 3)
+#define AR8327_VTU_FUNC1_OP_NOOP 0
+#define AR8327_VTU_FUNC1_OP_FLUSH 1
+#define AR8327_VTU_FUNC1_OP_LOAD 2
+#define AR8327_VTU_FUNC1_OP_PURGE 3
+#define AR8327_VTU_FUNC1_OP_REMOVE_PORT 4
+#define AR8327_VTU_FUNC1_OP_GET_NEXT 5
+#define AR8327_VTU_FUNC1_OP_GET_ONE 6
+#define AR8327_VTU_FUNC1_FULL BIT(4)
+#define AR8327_VTU_FUNC1_PORT BIT(8, 4)
+#define AR8327_VTU_FUNC1_PORT_S 8
+#define AR8327_VTU_FUNC1_VID BIT(16, 12)
+#define AR8327_VTU_FUNC1_VID_S 16
+#define AR8327_VTU_FUNC1_BUSY BIT(31)
+
+#define AR8327_REG_FWD_CTRL0 0x620
+#define AR8327_FWD_CTRL0_CPU_PORT_EN BIT(10)
+#define AR8327_FWD_CTRL0_MIRROR_PORT BITS(4, 4)
+#define AR8327_FWD_CTRL0_MIRROR_PORT_S 4
+
+#define AR8327_REG_FWD_CTRL1 0x624
+#define AR8327_FWD_CTRL1_UC_FLOOD BITS(0, 7)
+#define AR8327_FWD_CTRL1_UC_FLOOD_S 0
+#define AR8327_FWD_CTRL1_MC_FLOOD BITS(8, 7)
+#define AR8327_FWD_CTRL1_MC_FLOOD_S 8
+#define AR8327_FWD_CTRL1_BC_FLOOD BITS(16, 7)
+#define AR8327_FWD_CTRL1_BC_FLOOD_S 16
+#define AR8327_FWD_CTRL1_IGMP BITS(24, 7)
+#define AR8327_FWD_CTRL1_IGMP_S 24
+
+#define AR8327_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc)
+#define AR8327_PORT_LOOKUP_MEMBER BITS(0, 7)
+#define AR8327_PORT_LOOKUP_IN_MODE BITS(8, 2)
+#define AR8327_PORT_LOOKUP_IN_MODE_S 8
+#define AR8327_PORT_LOOKUP_STATE BITS(16, 3)
+#define AR8327_PORT_LOOKUP_STATE_S 16
+#define AR8327_PORT_LOOKUP_LEARN BIT(20)
+
+#define AR8327_REG_PORT_PRIO(_i) (0x664 + (_i) * 0xc)
+
+/* port speed */
+enum {
+ AR8216_PORT_SPEED_10M = 0,
+ AR8216_PORT_SPEED_100M = 1,
+ AR8216_PORT_SPEED_1000M = 2,
+ AR8216_PORT_SPEED_ERR = 3,
+};
+
+/* ingress 802.1q mode */
+enum {
+ AR8216_IN_PORT_ONLY = 0,
+ AR8216_IN_PORT_FALLBACK = 1,
+ AR8216_IN_VLAN_ONLY = 2,
+ AR8216_IN_SECURE = 3
+};
+
+/* egress 802.1q mode */
+enum {
+ AR8216_OUT_KEEP = 0,
+ AR8216_OUT_STRIP_VLAN = 1,
+ AR8216_OUT_ADD_VLAN = 2
+};
+
+/* port forwarding state */
+enum {
+ AR8216_PORT_STATE_DISABLED = 0,
+ AR8216_PORT_STATE_BLOCK = 1,
+ AR8216_PORT_STATE_LISTEN = 2,
+ AR8216_PORT_STATE_LEARN = 3,
+ AR8216_PORT_STATE_FORWARD = 4
+};
+
+#endif
diff --git a/target/linux/generic/files/drivers/net/phy/ip17xx.c b/target/linux/generic/files/drivers/net/phy/ip17xx.c
new file mode 100644
index 000000000..c82c39e05
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/ip17xx.c
@@ -0,0 +1,1410 @@
+/*
+ * ip17xx.c: Swconfig configuration for IC+ IP17xx switch family
+ *
+ * Copyright (C) 2008 Patrick Horn <patrick.horn@gmail.com>
+ * Copyright (C) 2008, 2010 Martin Mares <mj@ucw.cz>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+#include <linux/switch.h>
+#include <linux/device.h>
+
+#define MAX_VLANS 16
+#define MAX_PORTS 9
+#undef DUMP_MII_IO
+
+typedef struct ip17xx_reg {
+ u16 p; // phy
+ u16 m; // mii
+} reg;
+typedef char bitnum;
+
+#define NOTSUPPORTED {-1,-1}
+
+#define REG_SUPP(x) (((x).m != ((u16)-1)) && ((x).p != (u16)-1))
+
+struct ip17xx_state;
+
+/*********** CONSTANTS ***********/
+struct register_mappings {
+ char *NAME;
+ u16 MODEL_NO; // Compare to bits 4-9 of MII register 0,3.
+ bitnum NUM_PORTS;
+ bitnum CPU_PORT;
+
+/* The default VLAN for each port.
+ Default: 0x0001 for Ports 0,1,2,3
+ 0x0002 for Ports 4,5 */
+ reg VLAN_DEFAULT_TAG_REG[MAX_PORTS];
+
+/* These ports are tagged.
+ Default: 0x00 */
+ reg ADD_TAG_REG;
+ reg REMOVE_TAG_REG;
+ bitnum ADD_TAG_BIT[MAX_PORTS];
+/* These ports are untagged.
+ Default: 0x00 (i.e. do not alter any VLAN tags...)
+ Maybe set to 0 if user disables VLANs. */
+ bitnum REMOVE_TAG_BIT[MAX_PORTS];
+
+/* Port M and Port N are on the same VLAN.
+ Default: All ports on all VLANs. */
+// Use register {29, 19+N/2}
+ reg VLAN_LOOKUP_REG;
+// Port 5 uses register {30, 18} but same as odd bits.
+ reg VLAN_LOOKUP_REG_5; // in a different register on IP175C.
+ bitnum VLAN_LOOKUP_EVEN_BIT[MAX_PORTS];
+ bitnum VLAN_LOOKUP_ODD_BIT[MAX_PORTS];
+
+/* This VLAN corresponds to which ports.
+ Default: 0x2f,0x30,0x3f,0x3f... */
+ reg TAG_VLAN_MASK_REG;
+ bitnum TAG_VLAN_MASK_EVEN_BIT[MAX_PORTS];
+ bitnum TAG_VLAN_MASK_ODD_BIT[MAX_PORTS];
+
+ int RESET_VAL;
+ reg RESET_REG;
+
+ reg MODE_REG;
+ int MODE_VAL;
+
+/* General flags */
+ reg ROUTER_CONTROL_REG;
+ reg VLAN_CONTROL_REG;
+ bitnum TAG_VLAN_BIT;
+ bitnum ROUTER_EN_BIT;
+ bitnum NUMLAN_GROUPS_MAX;
+ bitnum NUMLAN_GROUPS_BIT;
+
+ reg MII_REGISTER_EN;
+ bitnum MII_REGISTER_EN_BIT;
+
+ // set to 1 for 178C, 0 for 175C.
+ bitnum SIMPLE_VLAN_REGISTERS; // 175C has two vlans per register but 178C has only one.
+
+ // Pointers to functions which manipulate hardware state
+ int (*update_state)(struct ip17xx_state *state);
+ int (*set_vlan_mode)(struct ip17xx_state *state);
+ int (*reset)(struct ip17xx_state *state);
+};
+
+static int ip175c_update_state(struct ip17xx_state *state);
+static int ip175c_set_vlan_mode(struct ip17xx_state *state);
+static int ip175c_reset(struct ip17xx_state *state);
+
+static const struct register_mappings IP178C = {
+ .NAME = "IP178C",
+ .MODEL_NO = 0x18,
+ .VLAN_DEFAULT_TAG_REG = {
+ {30,3},{30,4},{30,5},{30,6},{30,7},{30,8},
+ {30,9},{30,10},{30,11},
+ },
+
+ .ADD_TAG_REG = {30,12},
+ .ADD_TAG_BIT = {0,1,2,3,4,5,6,7,8},
+ .REMOVE_TAG_REG = {30,13},
+ .REMOVE_TAG_BIT = {4,5,6,7,8,9,10,11,12},
+
+ .SIMPLE_VLAN_REGISTERS = 1,
+
+ .VLAN_LOOKUP_REG = {31,0},// +N
+ .VLAN_LOOKUP_REG_5 = NOTSUPPORTED, // not used with SIMPLE_VLAN_REGISTERS
+ .VLAN_LOOKUP_EVEN_BIT = {0,1,2,3,4,5,6,7,8},
+ .VLAN_LOOKUP_ODD_BIT = {0,1,2,3,4,5,6,7,8},
+
+ .TAG_VLAN_MASK_REG = {30,14}, // +N
+ .TAG_VLAN_MASK_EVEN_BIT = {0,1,2,3,4,5,6,7,8},
+ .TAG_VLAN_MASK_ODD_BIT = {0,1,2,3,4,5,6,7,8},
+
+ .RESET_VAL = 0x55AA,
+ .RESET_REG = {30,0},
+ .MODE_VAL = 0,
+ .MODE_REG = NOTSUPPORTED,
+
+ .ROUTER_CONTROL_REG = {30,30},
+ .ROUTER_EN_BIT = 11,
+ .NUMLAN_GROUPS_MAX = 8,
+ .NUMLAN_GROUPS_BIT = 8, // {0-2}
+
+ .VLAN_CONTROL_REG = {30,13},
+ .TAG_VLAN_BIT = 3,
+
+ .CPU_PORT = 8,
+ .NUM_PORTS = 9,
+
+ .MII_REGISTER_EN = NOTSUPPORTED,
+
+ .update_state = ip175c_update_state,
+ .set_vlan_mode = ip175c_set_vlan_mode,
+ .reset = ip175c_reset,
+};
+
+static const struct register_mappings IP175C = {
+ .NAME = "IP175C",
+ .MODEL_NO = 0x18,
+ .VLAN_DEFAULT_TAG_REG = {
+ {29,24},{29,25},{29,26},{29,27},{29,28},{29,30},
+ NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED
+ },
+
+ .ADD_TAG_REG = {29,23},
+ .REMOVE_TAG_REG = {29,23},
+ .ADD_TAG_BIT = {11,12,13,14,15,1,-1,-1,-1},
+ .REMOVE_TAG_BIT = {6,7,8,9,10,0,-1,-1,-1},
+
+ .SIMPLE_VLAN_REGISTERS = 0,
+
+ .VLAN_LOOKUP_REG = {29,19},// +N/2
+ .VLAN_LOOKUP_REG_5 = {30,18},
+ .VLAN_LOOKUP_EVEN_BIT = {8,9,10,11,12,15,-1,-1,-1},
+ .VLAN_LOOKUP_ODD_BIT = {0,1,2,3,4,7,-1,-1,-1},
+
+ .TAG_VLAN_MASK_REG = {30,1}, // +N/2
+ .TAG_VLAN_MASK_EVEN_BIT = {0,1,2,3,4,5,-1,-1,-1},
+ .TAG_VLAN_MASK_ODD_BIT = {8,9,10,11,12,13,-1,-1,-1},
+
+ .RESET_VAL = 0x175C,
+ .RESET_REG = {30,0},
+ .MODE_VAL = 0x175C,
+ .MODE_REG = {29,31},
+
+ .ROUTER_CONTROL_REG = {30,9},
+ .ROUTER_EN_BIT = 3,
+ .NUMLAN_GROUPS_MAX = 8,
+ .NUMLAN_GROUPS_BIT = 0, // {0-2}
+
+ .VLAN_CONTROL_REG = {30,9},
+ .TAG_VLAN_BIT = 7,
+
+ .NUM_PORTS = 6,
+ .CPU_PORT = 5,
+
+ .MII_REGISTER_EN = NOTSUPPORTED,
+
+ .update_state = ip175c_update_state,
+ .set_vlan_mode = ip175c_set_vlan_mode,
+ .reset = ip175c_reset,
+};
+
+static const struct register_mappings IP175A = {
+ .NAME = "IP175A",
+ .MODEL_NO = 0x05,
+ .VLAN_DEFAULT_TAG_REG = {
+ {0,24},{0,25},{0,26},{0,27},{0,28},NOTSUPPORTED,
+ NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED
+ },
+
+ .ADD_TAG_REG = {0,23},
+ .REMOVE_TAG_REG = {0,23},
+ .ADD_TAG_BIT = {11,12,13,14,15,-1,-1,-1,-1},
+ .REMOVE_TAG_BIT = {6,7,8,9,10,-1,-1,-1,-1},
+
+ .SIMPLE_VLAN_REGISTERS = 0,
+
+ // Only programmable via EEPROM
+ .VLAN_LOOKUP_REG = NOTSUPPORTED,// +N/2
+ .VLAN_LOOKUP_REG_5 = NOTSUPPORTED,
+ .VLAN_LOOKUP_EVEN_BIT = {8,9,10,11,12,-1,-1,-1,-1},
+ .VLAN_LOOKUP_ODD_BIT = {0,1,2,3,4,-1,-1,-1,-1},
+
+ .TAG_VLAN_MASK_REG = NOTSUPPORTED, // +N/2,
+ .TAG_VLAN_MASK_EVEN_BIT = {-1,-1,-1,-1,-1,-1,-1,-1,-1},
+ .TAG_VLAN_MASK_ODD_BIT = {-1,-1,-1,-1,-1,-1,-1,-1,-1},
+
+ .RESET_VAL = -1,
+ .RESET_REG = NOTSUPPORTED,
+ .MODE_VAL = 0,
+ .MODE_REG = NOTSUPPORTED,
+
+ .ROUTER_CONTROL_REG = NOTSUPPORTED,
+ .VLAN_CONTROL_REG = NOTSUPPORTED,
+ .TAG_VLAN_BIT = -1,
+ .ROUTER_EN_BIT = -1,
+ .NUMLAN_GROUPS_MAX = -1,
+ .NUMLAN_GROUPS_BIT = -1, // {0-2}
+
+ .NUM_PORTS = 5,
+ .CPU_PORT = 4,
+
+ .MII_REGISTER_EN = {0, 18},
+ .MII_REGISTER_EN_BIT = 7,
+
+ .update_state = ip175c_update_state,
+ .set_vlan_mode = ip175c_set_vlan_mode,
+ .reset = ip175c_reset,
+};
+
+
+static int ip175d_update_state(struct ip17xx_state *state);
+static int ip175d_set_vlan_mode(struct ip17xx_state *state);
+static int ip175d_reset(struct ip17xx_state *state);
+
+static const struct register_mappings IP175D = {
+ .NAME = "IP175D",
+ .MODEL_NO = 0x18,
+
+ // The IP175D has a completely different interface, so we leave most
+ // of the registers undefined and switch to different code paths.
+
+ .VLAN_DEFAULT_TAG_REG = {
+ NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED,
+ NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED,NOTSUPPORTED,
+ },
+
+ .ADD_TAG_REG = NOTSUPPORTED,
+ .REMOVE_TAG_REG = NOTSUPPORTED,
+
+ .SIMPLE_VLAN_REGISTERS = 0,
+
+ .VLAN_LOOKUP_REG = NOTSUPPORTED,
+ .VLAN_LOOKUP_REG_5 = NOTSUPPORTED,
+ .TAG_VLAN_MASK_REG = NOTSUPPORTED,
+
+ .RESET_VAL = 0x175D,
+ .RESET_REG = {20,2},
+ .MODE_REG = NOTSUPPORTED,
+
+ .ROUTER_CONTROL_REG = NOTSUPPORTED,
+ .ROUTER_EN_BIT = -1,
+ .NUMLAN_GROUPS_BIT = -1,
+
+ .VLAN_CONTROL_REG = NOTSUPPORTED,
+ .TAG_VLAN_BIT = -1,
+
+ .NUM_PORTS = 6,
+ .CPU_PORT = 5,
+
+ .MII_REGISTER_EN = NOTSUPPORTED,
+
+ .update_state = ip175d_update_state,
+ .set_vlan_mode = ip175d_set_vlan_mode,
+ .reset = ip175d_reset,
+};
+
+struct ip17xx_state {
+ struct switch_dev dev;
+ struct mii_bus *mii_bus;
+ bool registered;
+
+ int router_mode; // ROUTER_EN
+ int vlan_enabled; // TAG_VLAN_EN
+ struct port_state {
+ u16 pvid;
+ unsigned int shareports;
+ } ports[MAX_PORTS];
+ unsigned int add_tag;
+ unsigned int remove_tag;
+ int num_vlans;
+ struct vlan_state {
+ unsigned int ports;
+ unsigned int tag; // VLAN tag (IP175D only)
+ } vlans[MAX_VLANS];
+ const struct register_mappings *regs;
+ reg proc_mii; // phy/reg for the low level register access via swconfig
+
+ char buf[80];
+};
+
+#define get_state(_dev) container_of((_dev), struct ip17xx_state, dev)
+
+static int ip_phy_read(struct ip17xx_state *state, int port, int reg)
+{
+ int val = mdiobus_read(state->mii_bus, port, reg);
+ if (val < 0)
+ pr_warning("IP17xx: Unable to get MII register %d,%d: error %d\n", port, reg, -val);
+#ifdef DUMP_MII_IO
+ else
+ pr_debug("IP17xx: Read MII(%d,%d) -> %04x\n", port, reg, val);
+#endif
+ return val;
+}
+
+static int ip_phy_write(struct ip17xx_state *state, int port, int reg, u16 val)
+{
+ int err;
+
+#ifdef DUMP_MII_IO
+ pr_debug("IP17xx: Write MII(%d,%d) <- %04x\n", port, reg, val);
+#endif
+ err = mdiobus_write(state->mii_bus, port, reg, val);
+ if (err < 0)
+ pr_warning("IP17xx: Unable to write MII register %d,%d: error %d\n", port, reg, -err);
+ return err;
+}
+
+static int ip_phy_write_masked(struct ip17xx_state *state, int port, int reg, unsigned int mask, unsigned int data)
+{
+ int val = ip_phy_read(state, port, reg);
+ if (val < 0)
+ return 0;
+ return ip_phy_write(state, port, reg, (val & ~mask) | data);
+}
+
+static int getPhy(struct ip17xx_state *state, reg mii)
+{
+ if (!REG_SUPP(mii))
+ return -EFAULT;
+ return ip_phy_read(state, mii.p, mii.m);
+}
+
+static int setPhy(struct ip17xx_state *state, reg mii, u16 value)
+{
+ int err;
+
+ if (!REG_SUPP(mii))
+ return -EFAULT;
+ err = ip_phy_write(state, mii.p, mii.m, value);
+ if (err < 0)
+ return err;
+ mdelay(2);
+ getPhy(state, mii);
+ return 0;
+}
+
+
+/**
+ * These two macros are to simplify the mapping of logical bits to the bits in hardware.
+ * NOTE: these macros will return if there is an error!
+ */
+
+#define GET_PORT_BITS(state, bits, addr, bit_lookup) \
+ do { \
+ int i, val = getPhy((state), (addr)); \
+ if (val < 0) \
+ return val; \
+ (bits) = 0; \
+ for (i = 0; i < MAX_PORTS; i++) { \
+ if ((bit_lookup)[i] == -1) continue; \
+ if (val & (1<<(bit_lookup)[i])) \
+ (bits) |= (1<<i); \
+ } \
+ } while (0)
+
+#define SET_PORT_BITS(state, bits, addr, bit_lookup) \
+ do { \
+ int i, val = getPhy((state), (addr)); \
+ if (val < 0) \
+ return val; \
+ for (i = 0; i < MAX_PORTS; i++) { \
+ unsigned int newmask = ((bits)&(1<<i)); \
+ if ((bit_lookup)[i] == -1) continue; \
+ val &= ~(1<<(bit_lookup)[i]); \
+ val |= ((newmask>>i)<<(bit_lookup)[i]); \
+ } \
+ val = setPhy((state), (addr), val); \
+ if (val < 0) \
+ return val; \
+ } while (0)
+
+
+static int get_model(struct ip17xx_state *state)
+{
+ int id1, id2;
+ int oui_id, model_no, rev_no, chip_no;
+
+ id1 = ip_phy_read(state, 0, 2);
+ id2 = ip_phy_read(state, 0, 3);
+ oui_id = (id1 << 6) | ((id2 >> 10) & 0x3f);
+ model_no = (id2 >> 4) & 0x3f;
+ rev_no = id2 & 0xf;
+ pr_debug("IP17xx: Identified oui=%06x model=%02x rev=%X\n", oui_id, model_no, rev_no);
+
+ if (oui_id != 0x0090c3) // No other oui_id should have reached us anyway
+ return -ENODEV;
+
+ if (model_no == IP175A.MODEL_NO) {
+ state->regs = &IP175A;
+ } else if (model_no == IP175C.MODEL_NO) {
+ /*
+ * Several models share the same model_no:
+ * 178C has more PHYs, so we try whether the device responds to a read from PHY5
+ * 175D has a new chip ID register
+ * 175C has neither
+ */
+ if (ip_phy_read(state, 5, 2) == 0x0243) {
+ state->regs = &IP178C;
+ } else {
+ chip_no = ip_phy_read(state, 20, 0);
+ pr_debug("IP17xx: Chip ID register reads %04x\n", chip_no);
+ if (chip_no == 0x175d) {
+ state->regs = &IP175D;
+ } else {
+ state->regs = &IP175C;
+ }
+ }
+ } else {
+ pr_warning("IP17xx: Found an unknown IC+ switch with model number %02x, revision %X.\n", model_no, rev_no);
+ return -EPERM;
+ }
+ return 0;
+}
+
+/*** Low-level functions for the older models ***/
+
+/** Only set vlan and router flags in the switch **/
+static int ip175c_set_flags(struct ip17xx_state *state)
+{
+ int val;
+
+ if (!REG_SUPP(state->regs->ROUTER_CONTROL_REG)) {
+ return 0;
+ }
+
+ val = getPhy(state, state->regs->ROUTER_CONTROL_REG);
+ if (val < 0) {
+ return val;
+ }
+ if (state->regs->ROUTER_EN_BIT >= 0) {
+ if (state->router_mode) {
+ val |= (1<<state->regs->ROUTER_EN_BIT);
+ } else {
+ val &= (~(1<<state->regs->ROUTER_EN_BIT));
+ }
+ }
+ if (state->regs->TAG_VLAN_BIT >= 0) {
+ if (state->vlan_enabled) {
+ val |= (1<<state->regs->TAG_VLAN_BIT);
+ } else {
+ val &= (~(1<<state->regs->TAG_VLAN_BIT));
+ }
+ }
+ if (state->regs->NUMLAN_GROUPS_BIT >= 0) {
+ val &= (~((state->regs->NUMLAN_GROUPS_MAX-1)<<state->regs->NUMLAN_GROUPS_BIT));
+ if (state->num_vlans > state->regs->NUMLAN_GROUPS_MAX) {
+ val |= state->regs->NUMLAN_GROUPS_MAX << state->regs->NUMLAN_GROUPS_BIT;
+ } else if (state->num_vlans >= 1) {
+ val |= (state->num_vlans-1) << state->regs->NUMLAN_GROUPS_BIT;
+ }
+ }
+ return setPhy(state, state->regs->ROUTER_CONTROL_REG, val);
+}
+
+/** Set all VLAN and port state. Usually you should call "correct_vlan_state" first. **/
+static int ip175c_set_state(struct ip17xx_state *state)
+{
+ int j;
+ int i;
+ SET_PORT_BITS(state, state->add_tag,
+ state->regs->ADD_TAG_REG, state->regs->ADD_TAG_BIT);
+ SET_PORT_BITS(state, state->remove_tag,
+ state->regs->REMOVE_TAG_REG, state->regs->REMOVE_TAG_BIT);
+
+ if (REG_SUPP(state->regs->VLAN_LOOKUP_REG)) {
+ for (j=0; j<state->regs->NUM_PORTS; j++) {
+ reg addr;
+ const bitnum *bit_lookup = (j%2==0)?
+ state->regs->VLAN_LOOKUP_EVEN_BIT:
+ state->regs->VLAN_LOOKUP_ODD_BIT;
+
+ addr = state->regs->VLAN_LOOKUP_REG;
+ if (state->regs->SIMPLE_VLAN_REGISTERS) {
+ addr.m += j;
+ } else {
+ switch (j) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ case 3:
+ addr.m+=1;
+ break;
+ case 4:
+ addr.m+=2;
+ break;
+ case 5:
+ addr = state->regs->VLAN_LOOKUP_REG_5;
+ break;
+ default:
+ addr.m = -1; // shouldn't get here, but...
+ break;
+ }
+ }
+ //printf("shareports for %d is %02X\n",j,state->ports[j].shareports);
+ if (REG_SUPP(addr)) {
+ SET_PORT_BITS(state, state->ports[j].shareports, addr, bit_lookup);
+ }
+ }
+ }
+ if (REG_SUPP(state->regs->TAG_VLAN_MASK_REG)) {
+ for (j=0; j<MAX_VLANS; j++) {
+ reg addr = state->regs->TAG_VLAN_MASK_REG;
+ const bitnum *bit_lookup = (j%2==0)?
+ state->regs->TAG_VLAN_MASK_EVEN_BIT:
+ state->regs->TAG_VLAN_MASK_ODD_BIT;
+ unsigned int vlan_mask;
+ if (state->regs->SIMPLE_VLAN_REGISTERS) {
+ addr.m += j;
+ } else {
+ addr.m += j/2;
+ }
+ vlan_mask = state->vlans[j].ports;
+ SET_PORT_BITS(state, vlan_mask, addr, bit_lookup);
+ }
+ }
+
+ for (i=0; i<MAX_PORTS; i++) {
+ if (REG_SUPP(state->regs->VLAN_DEFAULT_TAG_REG[i])) {
+ int err = setPhy(state, state->regs->VLAN_DEFAULT_TAG_REG[i],
+ state->ports[i].pvid);
+ if (err < 0) {
+ return err;
+ }
+ }
+ }
+
+ return ip175c_set_flags(state);
+}
+
+/**
+ * Uses only the VLAN port mask and the add tag mask to generate the other fields:
+ * which ports are part of the same VLAN, removing vlan tags, and VLAN tag ids.
+ */
+static void ip175c_correct_vlan_state(struct ip17xx_state *state)
+{
+ int i, j;
+ state->num_vlans = 0;
+ for (i=0; i<MAX_VLANS; i++) {
+ if (state->vlans[i].ports != 0) {
+ state->num_vlans = i+1; // Hack -- we need to store the "set" vlans somewhere...
+ }
+ }
+
+ for (i=0; i<state->regs->NUM_PORTS; i++) {
+ unsigned int portmask = (1<<i);
+ if (!state->vlan_enabled) {
+ // Share with everybody!
+ state->ports[i].shareports = (1<<state->regs->NUM_PORTS)-1;
+ continue;
+ }
+ state->ports[i].shareports = portmask;
+ for (j=0; j<MAX_VLANS; j++) {
+ if (state->vlans[j].ports & portmask)
+ state->ports[i].shareports |= state->vlans[j].ports;
+ }
+ }
+}
+
+static int ip175c_update_state(struct ip17xx_state *state)
+{
+ ip175c_correct_vlan_state(state);
+ return ip175c_set_state(state);
+}
+
+static int ip175c_set_vlan_mode(struct ip17xx_state *state)
+{
+ return ip175c_update_state(state);
+}
+
+static int ip175c_reset(struct ip17xx_state *state)
+{
+ int err;
+
+ if (REG_SUPP(state->regs->MODE_REG)) {
+ err = setPhy(state, state->regs->MODE_REG, state->regs->MODE_VAL);
+ if (err < 0)
+ return err;
+ err = getPhy(state, state->regs->MODE_REG);
+ if (err < 0)
+ return err;
+ }
+
+ return ip175c_update_state(state);
+}
+
+/*** Low-level functions for IP175D ***/
+
+static int ip175d_update_state(struct ip17xx_state *state)
+{
+ unsigned int filter_mask = 0;
+ unsigned int ports[16], add[16], rem[16];
+ int i, j;
+ int err = 0;
+
+ for (i = 0; i < 16; i++) {
+ ports[i] = 0;
+ add[i] = 0;
+ rem[i] = 0;
+ if (!state->vlan_enabled) {
+ err |= ip_phy_write(state, 22, 14+i, i+1); // default tags
+ ports[i] = 0x3f;
+ continue;
+ }
+ if (!state->vlans[i].tag) {
+ // Reset the filter
+ err |= ip_phy_write(state, 22, 14+i, 0); // tag
+ continue;
+ }
+ filter_mask |= 1 << i;
+ err |= ip_phy_write(state, 22, 14+i, state->vlans[i].tag);
+ ports[i] = state->vlans[i].ports;
+ for (j = 0; j < 6; j++) {
+ if (ports[i] & (1 << j)) {
+ if (state->add_tag & (1 << j))
+ add[i] |= 1 << j;
+ if (state->remove_tag & (1 << j))
+ rem[i] |= 1 << j;
+ }
+ }
+ }
+
+ // Port masks, tag adds and removals
+ for (i = 0; i < 8; i++) {
+ err |= ip_phy_write(state, 23, i, ports[2*i] | (ports[2*i+1] << 8));
+ err |= ip_phy_write(state, 23, 8+i, add[2*i] | (add[2*i+1] << 8));
+ err |= ip_phy_write(state, 23, 16+i, rem[2*i] | (rem[2*i+1] << 8));
+ }
+ err |= ip_phy_write(state, 22, 10, filter_mask);
+
+ // Default VLAN tag for each port
+ for (i = 0; i < 6; i++)
+ err |= ip_phy_write(state, 22, 4+i, state->vlans[state->ports[i].pvid].tag);
+
+ return (err ? -EIO : 0);
+}
+
+static int ip175d_set_vlan_mode(struct ip17xx_state *state)
+{
+ int i;
+ int err = 0;
+
+ if (state->vlan_enabled) {
+ // VLAN classification rules: tag-based VLANs, use VID to classify,
+ // drop packets that cannot be classified.
+ err |= ip_phy_write_masked(state, 22, 0, 0x3fff, 0x003f);
+
+ // Ingress rules: CFI=1 dropped, null VID is untagged, VID=1 passed,
+ // VID=0xfff discarded, admin both tagged and untagged, ingress
+ // filters enabled.
+ err |= ip_phy_write_masked(state, 22, 1, 0x0fff, 0x0c3f);
+
+ // Egress rules: IGMP processing off, keep VLAN header off
+ err |= ip_phy_write_masked(state, 22, 2, 0x0fff, 0x0000);
+ } else {
+ // VLAN classification rules: everything off & clear table
+ err |= ip_phy_write_masked(state, 22, 0, 0xbfff, 0x8000);
+
+ // Ingress and egress rules: set to defaults
+ err |= ip_phy_write_masked(state, 22, 1, 0x0fff, 0x0c3f);
+ err |= ip_phy_write_masked(state, 22, 2, 0x0fff, 0x0000);
+ }
+
+ // Reset default VLAN for each port to 0
+ for (i = 0; i < 6; i++)
+ state->ports[i].pvid = 0;
+
+ err |= ip175d_update_state(state);
+
+ return (err ? -EIO : 0);
+}
+
+static int ip175d_reset(struct ip17xx_state *state)
+{
+ int err = 0;
+
+ // Disable the special tagging mode
+ err |= ip_phy_write_masked(state, 21, 22, 0x0003, 0x0000);
+
+ // Set 802.1q protocol type
+ err |= ip_phy_write(state, 22, 3, 0x8100);
+
+ state->vlan_enabled = 0;
+ err |= ip175d_set_vlan_mode(state);
+
+ return (err ? -EIO : 0);
+}
+
+/*** High-level functions ***/
+
+static int ip17xx_get_enable_vlan(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+
+ val->value.i = state->vlan_enabled;
+ return 0;
+}
+
+static void ip17xx_reset_vlan_config(struct ip17xx_state *state)
+{
+ int i;
+
+ state->remove_tag = (state->vlan_enabled ? ((1<<state->regs->NUM_PORTS)-1) : 0x0000);
+ state->add_tag = 0x0000;
+ for (i = 0; i < MAX_VLANS; i++) {
+ state->vlans[i].ports = 0x0000;
+ state->vlans[i].tag = (i ? i : 16);
+ }
+ for (i = 0; i < MAX_PORTS; i++)
+ state->ports[i].pvid = 0;
+}
+
+static int ip17xx_set_enable_vlan(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int enable;
+
+ enable = val->value.i;
+ if (state->vlan_enabled == enable) {
+ // Do not change any state.
+ return 0;
+ }
+ state->vlan_enabled = enable;
+
+ // Otherwise, if we are switching state, set fields to a known default.
+ ip17xx_reset_vlan_config(state);
+
+ return state->regs->set_vlan_mode(state);
+}
+
+static int ip17xx_get_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int b;
+ int ind;
+ unsigned int ports;
+
+ if (val->port_vlan >= dev->vlans || val->port_vlan < 0)
+ return -EINVAL;
+
+ ports = state->vlans[val->port_vlan].ports;
+ b = 0;
+ ind = 0;
+ while (b < MAX_PORTS) {
+ if (ports&1) {
+ int istagged = ((state->add_tag >> b) & 1);
+ val->value.ports[ind].id = b;
+ val->value.ports[ind].flags = (istagged << SWITCH_PORT_FLAG_TAGGED);
+ ind++;
+ }
+ b++;
+ ports >>= 1;
+ }
+ val->len = ind;
+
+ return 0;
+}
+
+static int ip17xx_set_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int i;
+
+ if (val->port_vlan >= dev->vlans || val->port_vlan < 0)
+ return -EINVAL;
+
+ state->vlans[val->port_vlan].ports = 0;
+ for (i = 0; i < val->len; i++) {
+ unsigned int bitmask = (1<<val->value.ports[i].id);
+ state->vlans[val->port_vlan].ports |= bitmask;
+ if (val->value.ports[i].flags & (1<<SWITCH_PORT_FLAG_TAGGED)) {
+ state->add_tag |= bitmask;
+ state->remove_tag &= (~bitmask);
+ } else {
+ state->add_tag &= (~bitmask);
+ state->remove_tag |= bitmask;
+ }
+ }
+
+ return state->regs->update_state(state);
+}
+
+static int ip17xx_apply(struct switch_dev *dev)
+{
+ struct ip17xx_state *state = get_state(dev);
+
+ if (REG_SUPP(state->regs->MII_REGISTER_EN)) {
+ int val = getPhy(state, state->regs->MII_REGISTER_EN);
+ if (val < 0) {
+ return val;
+ }
+ val |= (1<<state->regs->MII_REGISTER_EN_BIT);
+ return setPhy(state, state->regs->MII_REGISTER_EN, val);
+ }
+ return 0;
+}
+
+static int ip17xx_reset(struct switch_dev *dev)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int i, err;
+
+ if (REG_SUPP(state->regs->RESET_REG)) {
+ err = setPhy(state, state->regs->RESET_REG, state->regs->RESET_VAL);
+ if (err < 0)
+ return err;
+ err = getPhy(state, state->regs->RESET_REG);
+
+ /*
+ * Data sheet specifies reset period to be 2 msec.
+ * (I don't see any mention of the 2ms delay in the IP178C spec, only
+ * in IP175C, but it can't hurt.)
+ */
+ mdelay(2);
+ }
+
+ /* reset switch ports */
+ for (i = 0; i < state->regs->NUM_PORTS-1; i++) {
+ err = ip_phy_write(state, i, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+ }
+
+ state->router_mode = 0;
+ state->vlan_enabled = 0;
+ ip17xx_reset_vlan_config(state);
+
+ return state->regs->reset(state);
+}
+
+static int ip17xx_get_tagged(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+
+ if (state->add_tag & (1<<val->port_vlan)) {
+ if (state->remove_tag & (1<<val->port_vlan))
+ val->value.i = 3; // shouldn't ever happen.
+ else
+ val->value.i = 1;
+ } else {
+ if (state->remove_tag & (1<<val->port_vlan))
+ val->value.i = 0;
+ else
+ val->value.i = 2;
+ }
+ return 0;
+}
+
+static int ip17xx_set_tagged(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+
+ state->add_tag &= ~(1<<val->port_vlan);
+ state->remove_tag &= ~(1<<val->port_vlan);
+
+ if (val->value.i == 0)
+ state->remove_tag |= (1<<val->port_vlan);
+ if (val->value.i == 1)
+ state->add_tag |= (1<<val->port_vlan);
+
+ return state->regs->update_state(state);
+}
+
+/** Get the current phy address */
+static int ip17xx_get_phy(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+
+ val->value.i = state->proc_mii.p;
+ return 0;
+}
+
+/** Set a new phy address for low level access to registers */
+static int ip17xx_set_phy(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int new_reg = val->value.i;
+
+ if (new_reg < 0 || new_reg > 31)
+ state->proc_mii.p = (u16)-1;
+ else
+ state->proc_mii.p = (u16)new_reg;
+ return 0;
+}
+
+/** Get the current register number */
+static int ip17xx_get_reg(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+
+ val->value.i = state->proc_mii.m;
+ return 0;
+}
+
+/** Set a new register address for low level access to registers */
+static int ip17xx_set_reg(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int new_reg = val->value.i;
+
+ if (new_reg < 0 || new_reg > 31)
+ state->proc_mii.m = (u16)-1;
+ else
+ state->proc_mii.m = (u16)new_reg;
+ return 0;
+}
+
+/** Get the register content of state->proc_mii */
+static int ip17xx_get_val(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int retval = -EINVAL;
+ if (REG_SUPP(state->proc_mii))
+ retval = getPhy(state, state->proc_mii);
+
+ if (retval < 0) {
+ return retval;
+ } else {
+ val->value.i = retval;
+ return 0;
+ }
+}
+
+/** Write a value to the register defined by phy/reg above */
+static int ip17xx_set_val(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int myval, err = -EINVAL;
+
+ myval = val->value.i;
+ if (myval <= 0xffff && myval >= 0 && REG_SUPP(state->proc_mii)) {
+ err = setPhy(state, state->proc_mii, (u16)myval);
+ }
+ return err;
+}
+
+static int ip17xx_read_name(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ val->value.s = state->regs->NAME; // Just a const pointer, won't be freed by swconfig.
+ return 0;
+}
+
+static int ip17xx_get_tag(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int vlan = val->port_vlan;
+
+ if (vlan < 0 || vlan >= MAX_VLANS)
+ return -EINVAL;
+
+ val->value.i = state->vlans[vlan].tag;
+ return 0;
+}
+
+static int ip17xx_set_tag(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int vlan = val->port_vlan;
+ int tag = val->value.i;
+
+ if (vlan < 0 || vlan >= MAX_VLANS)
+ return -EINVAL;
+
+ if (tag < 0 || tag > 4095)
+ return -EINVAL;
+
+ state->vlans[vlan].tag = tag;
+ return state->regs->update_state(state);
+}
+
+static int ip17xx_set_port_speed(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int nr = val->port_vlan;
+ int ctrl;
+ int autoneg;
+ int speed;
+ if (val->value.i == 100) {
+ speed = 1;
+ autoneg = 0;
+ } else if (val->value.i == 10) {
+ speed = 0;
+ autoneg = 0;
+ } else {
+ autoneg = 1;
+ speed = 1;
+ }
+
+ /* Can't set speed for cpu port */
+ if (nr == state->regs->CPU_PORT)
+ return -EINVAL;
+
+ if (nr >= dev->ports || nr < 0)
+ return -EINVAL;
+
+ ctrl = ip_phy_read(state, nr, 0);
+ if (ctrl < 0)
+ return -EIO;
+
+ ctrl &= (~(1<<12));
+ ctrl &= (~(1<<13));
+ ctrl |= (autoneg<<12);
+ ctrl |= (speed<<13);
+
+ return ip_phy_write(state, nr, 0, ctrl);
+}
+
+static int ip17xx_get_port_speed(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int nr = val->port_vlan;
+ int speed, status;
+
+ if (nr == state->regs->CPU_PORT) {
+ val->value.i = 100;
+ return 0;
+ }
+
+ if (nr >= dev->ports || nr < 0)
+ return -EINVAL;
+
+ status = ip_phy_read(state, nr, 1);
+ speed = ip_phy_read(state, nr, 18);
+ if (status < 0 || speed < 0)
+ return -EIO;
+
+ if (status & 4)
+ val->value.i = ((speed & (1<<11)) ? 100 : 10);
+ else
+ val->value.i = 0;
+
+ return 0;
+}
+
+static int ip17xx_get_port_status(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+ int ctrl, speed, status;
+ int nr = val->port_vlan;
+ int len;
+ char *buf = state->buf; // fixed-length at 80.
+
+ if (nr == state->regs->CPU_PORT) {
+ sprintf(buf, "up, 100 Mbps, cpu port");
+ val->value.s = buf;
+ return 0;
+ }
+
+ if (nr >= dev->ports || nr < 0)
+ return -EINVAL;
+
+ ctrl = ip_phy_read(state, nr, 0);
+ status = ip_phy_read(state, nr, 1);
+ speed = ip_phy_read(state, nr, 18);
+ if (ctrl < 0 || status < 0 || speed < 0)
+ return -EIO;
+
+ if (status & 4)
+ len = sprintf(buf, "up, %d Mbps, %s duplex",
+ ((speed & (1<<11)) ? 100 : 10),
+ ((speed & (1<<10)) ? "full" : "half"));
+ else
+ len = sprintf(buf, "down");
+
+ if (ctrl & (1<<12)) {
+ len += sprintf(buf+len, ", auto-negotiate");
+ if (!(status & (1<<5)))
+ len += sprintf(buf+len, " (in progress)");
+ } else {
+ len += sprintf(buf+len, ", fixed speed (%d)",
+ ((ctrl & (1<<13)) ? 100 : 10));
+ }
+
+ buf[len] = '\0';
+ val->value.s = buf;
+ return 0;
+}
+
+static int ip17xx_get_pvid(struct switch_dev *dev, int port, int *val)
+{
+ struct ip17xx_state *state = get_state(dev);
+
+ *val = state->ports[port].pvid;
+ return 0;
+}
+
+static int ip17xx_set_pvid(struct switch_dev *dev, int port, int val)
+{
+ struct ip17xx_state *state = get_state(dev);
+
+ if (val < 0 || val >= MAX_VLANS)
+ return -EINVAL;
+
+ state->ports[port].pvid = val;
+ return state->regs->update_state(state);
+}
+
+
+enum Ports {
+ IP17XX_PORT_STATUS,
+ IP17XX_PORT_LINK,
+ IP17XX_PORT_TAGGED,
+ IP17XX_PORT_PVID,
+};
+
+enum Globals {
+ IP17XX_ENABLE_VLAN,
+ IP17XX_GET_NAME,
+ IP17XX_REGISTER_PHY,
+ IP17XX_REGISTER_MII,
+ IP17XX_REGISTER_VALUE,
+ IP17XX_REGISTER_ERRNO,
+};
+
+enum Vlans {
+ IP17XX_VLAN_TAG,
+};
+
+static const struct switch_attr ip17xx_global[] = {
+ [IP17XX_ENABLE_VLAN] = {
+ .id = IP17XX_ENABLE_VLAN,
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "Flag to enable or disable VLANs and tagging",
+ .get = ip17xx_get_enable_vlan,
+ .set = ip17xx_set_enable_vlan,
+ },
+ [IP17XX_GET_NAME] = {
+ .id = IP17XX_GET_NAME,
+ .type = SWITCH_TYPE_STRING,
+ .description = "Returns the type of IC+ chip.",
+ .name = "name",
+ .get = ip17xx_read_name,
+ .set = NULL,
+ },
+ /* jal: added for low level debugging etc. */
+ [IP17XX_REGISTER_PHY] = {
+ .id = IP17XX_REGISTER_PHY,
+ .type = SWITCH_TYPE_INT,
+ .description = "Direct register access: set PHY (0-4, or 29,30,31)",
+ .name = "phy",
+ .get = ip17xx_get_phy,
+ .set = ip17xx_set_phy,
+ },
+ [IP17XX_REGISTER_MII] = {
+ .id = IP17XX_REGISTER_MII,
+ .type = SWITCH_TYPE_INT,
+ .description = "Direct register access: set MII register number (0-31)",
+ .name = "reg",
+ .get = ip17xx_get_reg,
+ .set = ip17xx_set_reg,
+ },
+ [IP17XX_REGISTER_VALUE] = {
+ .id = IP17XX_REGISTER_VALUE,
+ .type = SWITCH_TYPE_INT,
+ .description = "Direct register access: read/write to register (0-65535)",
+ .name = "val",
+ .get = ip17xx_get_val,
+ .set = ip17xx_set_val,
+ },
+};
+
+static const struct switch_attr ip17xx_vlan[] = {
+ [IP17XX_VLAN_TAG] = {
+ .id = IP17XX_VLAN_TAG,
+ .type = SWITCH_TYPE_INT,
+ .description = "VLAN ID (0-4095) [IP175D only]",
+ .name = "vid",
+ .get = ip17xx_get_tag,
+ .set = ip17xx_set_tag,
+ }
+};
+
+static const struct switch_attr ip17xx_port[] = {
+ [IP17XX_PORT_STATUS] = {
+ .id = IP17XX_PORT_STATUS,
+ .type = SWITCH_TYPE_STRING,
+ .description = "Returns Detailed port status",
+ .name = "status",
+ .get = ip17xx_get_port_status,
+ .set = NULL,
+ },
+ [IP17XX_PORT_LINK] = {
+ .id = IP17XX_PORT_LINK,
+ .type = SWITCH_TYPE_INT,
+ .description = "Link speed. Can write 0 for auto-negotiate, or 10 or 100",
+ .name = "link",
+ .get = ip17xx_get_port_speed,
+ .set = ip17xx_set_port_speed,
+ },
+ [IP17XX_PORT_TAGGED] = {
+ .id = IP17XX_PORT_LINK,
+ .type = SWITCH_TYPE_INT,
+ .description = "0 = untag, 1 = add tags, 2 = do not alter (This value is reset if vlans are altered)",
+ .name = "tagged",
+ .get = ip17xx_get_tagged,
+ .set = ip17xx_set_tagged,
+ },
+};
+
+static const struct switch_dev_ops ip17xx_ops = {
+ .attr_global = {
+ .attr = ip17xx_global,
+ .n_attr = ARRAY_SIZE(ip17xx_global),
+ },
+ .attr_port = {
+ .attr = ip17xx_port,
+ .n_attr = ARRAY_SIZE(ip17xx_port),
+ },
+ .attr_vlan = {
+ .attr = ip17xx_vlan,
+ .n_attr = ARRAY_SIZE(ip17xx_vlan),
+ },
+
+ .get_port_pvid = ip17xx_get_pvid,
+ .set_port_pvid = ip17xx_set_pvid,
+ .get_vlan_ports = ip17xx_get_ports,
+ .set_vlan_ports = ip17xx_set_ports,
+ .apply_config = ip17xx_apply,
+ .reset_switch = ip17xx_reset,
+};
+
+static int ip17xx_probe(struct phy_device *pdev)
+{
+ struct ip17xx_state *state;
+ struct switch_dev *dev;
+ int err;
+
+ /* We only attach to PHY 0, but use all available PHYs */
+ if (pdev->addr != 0)
+ return -ENODEV;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ dev = &state->dev;
+
+ pdev->priv = state;
+ state->mii_bus = pdev->bus;
+
+ err = get_model(state);
+ if (err < 0)
+ goto error;
+
+ dev->vlans = MAX_VLANS;
+ dev->cpu_port = state->regs->CPU_PORT;
+ dev->ports = state->regs->NUM_PORTS;
+ dev->name = state->regs->NAME;
+ dev->ops = &ip17xx_ops;
+
+ pr_info("IP17xx: Found %s at %s\n", dev->name, dev_name(&pdev->dev));
+ return 0;
+
+error:
+ kfree(state);
+ return err;
+}
+
+static int ip17xx_config_init(struct phy_device *pdev)
+{
+ struct ip17xx_state *state = pdev->priv;
+ struct net_device *dev = pdev->attached_dev;
+ int err;
+
+ err = register_switch(&state->dev, dev);
+ if (err < 0)
+ return err;
+
+ state->registered = true;
+ ip17xx_reset(&state->dev);
+ return 0;
+}
+
+static void ip17xx_remove(struct phy_device *pdev)
+{
+ struct ip17xx_state *state = pdev->priv;
+
+ if (state->registered)
+ unregister_switch(&state->dev);
+ kfree(state);
+}
+
+static int ip17xx_config_aneg(struct phy_device *pdev)
+{
+ return 0;
+}
+
+static int ip17xx_aneg_done(struct phy_device *pdev)
+{
+ return BMSR_ANEGCOMPLETE;
+}
+
+static int ip17xx_update_link(struct phy_device *pdev)
+{
+ pdev->link = 1;
+ return 0;
+}
+
+static int ip17xx_read_status(struct phy_device *pdev)
+{
+ pdev->speed = SPEED_100;
+ pdev->duplex = DUPLEX_FULL;
+ pdev->pause = pdev->asym_pause = 0;
+ pdev->link = 1;
+
+ return 0;
+}
+
+static struct phy_driver ip17xx_driver = {
+ .name = "IC+ IP17xx",
+ .phy_id = 0x02430c00,
+ .phy_id_mask = 0x0ffffc00,
+ .features = PHY_BASIC_FEATURES,
+ .probe = ip17xx_probe,
+ .remove = ip17xx_remove,
+ .config_init = ip17xx_config_init,
+ .config_aneg = ip17xx_config_aneg,
+ .aneg_done = ip17xx_aneg_done,
+ .update_link = ip17xx_update_link,
+ .read_status = ip17xx_read_status,
+ .driver = { .owner = THIS_MODULE },
+};
+
+static struct phy_driver ip175a_driver = {
+ .name = "IC+ IP175A",
+ .phy_id = 0x02430c50,
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .probe = ip17xx_probe,
+ .remove = ip17xx_remove,
+ .config_init = ip17xx_config_init,
+ .config_aneg = ip17xx_config_aneg,
+ .aneg_done = ip17xx_aneg_done,
+ .update_link = ip17xx_update_link,
+ .read_status = ip17xx_read_status,
+ .driver = { .owner = THIS_MODULE },
+};
+
+
+int __init ip17xx_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&ip175a_driver);
+ if (ret < 0)
+ return ret;
+
+ return phy_driver_register(&ip17xx_driver);
+}
+
+void __exit ip17xx_exit(void)
+{
+ phy_driver_unregister(&ip17xx_driver);
+ phy_driver_unregister(&ip175a_driver);
+}
+
+MODULE_AUTHOR("Patrick Horn <patrick.horn@gmail.com>");
+MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
+MODULE_AUTHOR("Martin Mares <mj@ucw.cz>");
+MODULE_LICENSE("GPL");
+
+module_init(ip17xx_init);
+module_exit(ip17xx_exit);
diff --git a/target/linux/generic/files/drivers/net/phy/micrel.c b/target/linux/generic/files/drivers/net/phy/micrel.c
new file mode 100644
index 000000000..1499d4ace
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/micrel.c
@@ -0,0 +1,83 @@
+/*
+ * Driver for Micrel/Kendin PHYs
+ *
+ * Copyright (c) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+
+#define KSZ_REG_INT_CTRL 0x1b
+
+#define KSZ_INT_LU_EN (1 << 8) /* enable Link Up interrupt */
+#define KSZ_INT_RF_EN (1 << 9) /* enable Remote Fault interrupt */
+#define KSZ_INT_LD_EN (1 << 10) /* enable Link Down interrupt */
+
+#define KSZ_INT_INIT (KSZ_INT_LU_EN | KSZ_INT_LD_EN)
+
+static int ksz8041_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, KSZ_REG_INT_CTRL);
+
+ return (err < 0) ? err : 0;
+}
+
+static int ksz8041_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, KSZ_REG_INT_CTRL,
+ KSZ_INT_INIT);
+ else
+ err = phy_write(phydev, KSZ_REG_INT_CTRL, 0);
+
+ return err;
+}
+
+static struct phy_driver ksz8041_phy_driver = {
+ .phy_id = 0x00221512,
+ .name = "Micrel KSZ8041",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = ksz8041_ack_interrupt,
+ .config_intr = ksz8041_config_intr,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init micrel_phy_init(void)
+{
+ return phy_driver_register(&ksz8041_phy_driver);
+}
+
+static void __exit micrel_phy_exit(void)
+{
+ phy_driver_unregister(&ksz8041_phy_driver);
+}
+
+#ifdef MODULE
+module_init(micrel_phy_init);
+module_exit(micrel_phy_exit);
+#else
+subsys_initcall(micrel_phy_init);
+#endif
+
+MODULE_DESCRIPTION("Micrel/Kendin PHY driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/generic/files/drivers/net/phy/mvswitch.c b/target/linux/generic/files/drivers/net/phy/mvswitch.c
new file mode 100644
index 000000000..d754951ac
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/mvswitch.c
@@ -0,0 +1,422 @@
+/*
+ * Marvell 88E6060 switch driver
+ * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include "mvswitch.h"
+
+/* Undefine this to use trailer mode instead.
+ * I don't know if header mode works with all chips */
+#define HEADER_MODE 1
+
+MODULE_DESCRIPTION("Marvell 88E6060 Switch driver");
+MODULE_AUTHOR("Felix Fietkau");
+MODULE_LICENSE("GPL");
+
+#define MVSWITCH_MAGIC 0x88E6060
+
+struct mvswitch_priv {
+ netdev_features_t orig_features;
+ u8 vlans[16];
+};
+
+#define to_mvsw(_phy) ((struct mvswitch_priv *) (_phy)->priv)
+
+static inline u16
+r16(struct phy_device *phydev, int addr, int reg)
+{
+ return phydev->bus->read(phydev->bus, addr, reg);
+}
+
+static inline void
+w16(struct phy_device *phydev, int addr, int reg, u16 val)
+{
+ phydev->bus->write(phydev->bus, addr, reg, val);
+}
+
+
+static struct sk_buff *
+mvswitch_mangle_tx(struct net_device *dev, struct sk_buff *skb)
+{
+ struct mvswitch_priv *priv;
+ char *buf = NULL;
+ u16 vid;
+
+ priv = dev->phy_ptr;
+ if (unlikely(!priv))
+ goto error;
+
+ if (unlikely(skb->len < 16))
+ goto error;
+
+#ifdef HEADER_MODE
+ if (__vlan_hwaccel_get_tag(skb, &vid))
+ goto error;
+
+ if (skb_cloned(skb) || (skb->len <= 62) || (skb_headroom(skb) < MV_HEADER_SIZE)) {
+ if (pskb_expand_head(skb, MV_HEADER_SIZE, (skb->len < 62 ? 62 - skb->len : 0), GFP_ATOMIC))
+ goto error_expand;
+ if (skb->len < 62)
+ skb->len = 62;
+ }
+ buf = skb_push(skb, MV_HEADER_SIZE);
+#else
+ if (__vlan_get_tag(skb, &vid))
+ goto error;
+
+ if (unlikely((vid > 15 || !priv->vlans[vid])))
+ goto error;
+
+ if (skb->len <= 64) {
+ if (pskb_expand_head(skb, 0, 64 + MV_TRAILER_SIZE - skb->len, GFP_ATOMIC))
+ goto error_expand;
+
+ buf = skb->data + 64;
+ skb->len = 64 + MV_TRAILER_SIZE;
+ } else {
+ if (skb_cloned(skb) || unlikely(skb_tailroom(skb) < 4)) {
+ if (pskb_expand_head(skb, 0, 4, GFP_ATOMIC))
+ goto error_expand;
+ }
+ buf = skb_put(skb, 4);
+ }
+
+ /* move the ethernet header 4 bytes forward, overwriting the vlan tag */
+ memmove(skb->data + 4, skb->data, 12);
+ skb->data += 4;
+ skb->len -= 4;
+ skb->mac_header += 4;
+#endif
+
+ if (!buf)
+ goto error;
+
+
+#ifdef HEADER_MODE
+ /* prepend the tag */
+ *((__be16 *) buf) = cpu_to_be16(
+ ((vid << MV_HEADER_VLAN_S) & MV_HEADER_VLAN_M) |
+ ((priv->vlans[vid] << MV_HEADER_PORTS_S) & MV_HEADER_PORTS_M)
+ );
+#else
+ /* append the tag */
+ *((__be32 *) buf) = cpu_to_be32((
+ (MV_TRAILER_OVERRIDE << MV_TRAILER_FLAGS_S) |
+ ((priv->vlans[vid] & MV_TRAILER_PORTS_M) << MV_TRAILER_PORTS_S)
+ ));
+#endif
+
+ return skb;
+
+error_expand:
+ if (net_ratelimit())
+ printk("%s: failed to expand/update skb for the switch\n", dev->name);
+
+error:
+ /* any errors? drop the packet! */
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static void
+mvswitch_mangle_rx(struct net_device *dev, struct sk_buff *skb)
+{
+ struct mvswitch_priv *priv;
+ unsigned char *buf;
+ int vlan = -1;
+ int i;
+
+ priv = dev->phy_ptr;
+ if (WARN_ON_ONCE(!priv))
+ return;
+
+#ifdef HEADER_MODE
+ buf = skb->data;
+ skb_pull(skb, MV_HEADER_SIZE);
+#else
+ buf = skb->data + skb->len - MV_TRAILER_SIZE;
+ if (buf[0] != 0x80)
+ return;
+#endif
+
+ /* look for the vlan matching the incoming port */
+ for (i = 0; i < ARRAY_SIZE(priv->vlans); i++) {
+ if ((1 << buf[1]) & priv->vlans[i])
+ vlan = i;
+ }
+
+ if (vlan == -1)
+ return;
+
+ __vlan_hwaccel_put_tag(skb, vlan);
+}
+
+
+static int
+mvswitch_wait_mask(struct phy_device *pdev, int addr, int reg, u16 mask, u16 val)
+{
+ int i = 100;
+ u16 r;
+
+ do {
+ r = r16(pdev, addr, reg) & mask;
+ if (r == val)
+ return 0;
+ } while(--i > 0);
+ return -ETIMEDOUT;
+}
+
+static int
+mvswitch_config_init(struct phy_device *pdev)
+{
+ struct mvswitch_priv *priv = to_mvsw(pdev);
+ struct net_device *dev = pdev->attached_dev;
+ u8 vlmap = 0;
+ int i;
+
+ if (!dev)
+ return -EINVAL;
+
+ printk("%s: Marvell 88E6060 PHY driver attached.\n", dev->name);
+ pdev->supported = ADVERTISED_100baseT_Full;
+ pdev->advertising = ADVERTISED_100baseT_Full;
+ dev->phy_ptr = priv;
+ pdev->irq = PHY_POLL;
+#ifdef HEADER_MODE
+ dev->flags |= IFF_PROMISC;
+#endif
+
+ /* initialize default vlans */
+ for (i = 0; i < MV_PORTS; i++)
+ priv->vlans[(i == MV_WANPORT ? 2 : 1)] |= (1 << i);
+
+ /* before entering reset, disable all ports */
+ for (i = 0; i < MV_PORTS; i++)
+ w16(pdev, MV_PORTREG(CONTROL, i), 0x00);
+
+ msleep(2); /* wait for the status change to settle in */
+
+ /* put the ATU in reset */
+ w16(pdev, MV_SWITCHREG(ATU_CTRL), MV_ATUCTL_RESET);
+
+ i = mvswitch_wait_mask(pdev, MV_SWITCHREG(ATU_CTRL), MV_ATUCTL_RESET, 0);
+ if (i < 0) {
+ printk("%s: Timeout waiting for the switch to reset.\n", dev->name);
+ return i;
+ }
+
+ /* set the ATU flags */
+ w16(pdev, MV_SWITCHREG(ATU_CTRL),
+ MV_ATUCTL_NO_LEARN |
+ MV_ATUCTL_ATU_1K |
+ MV_ATUCTL_AGETIME(MV_ATUCTL_AGETIME_MIN) /* minimum without disabling ageing */
+ );
+
+ /* initialize the cpu port */
+ w16(pdev, MV_PORTREG(CONTROL, MV_CPUPORT),
+#ifdef HEADER_MODE
+ MV_PORTCTRL_HEADER |
+#else
+ MV_PORTCTRL_RXTR |
+ MV_PORTCTRL_TXTR |
+#endif
+ MV_PORTCTRL_ENABLED
+ );
+ /* wait for the phy change to settle in */
+ msleep(2);
+ for (i = 0; i < MV_PORTS; i++) {
+ u8 pvid = 0;
+ int j;
+
+ vlmap = 0;
+
+ /* look for the matching vlan */
+ for (j = 0; j < ARRAY_SIZE(priv->vlans); j++) {
+ if (priv->vlans[j] & (1 << i)) {
+ vlmap = priv->vlans[j];
+ pvid = j;
+ }
+ }
+ /* leave port unconfigured if it's not part of a vlan */
+ if (!vlmap)
+ continue;
+
+ /* add the cpu port to the allowed destinations list */
+ vlmap |= (1 << MV_CPUPORT);
+
+ /* take port out of its own vlan destination map */
+ vlmap &= ~(1 << i);
+
+ /* apply vlan settings */
+ w16(pdev, MV_PORTREG(VLANMAP, i),
+ MV_PORTVLAN_PORTS(vlmap) |
+ MV_PORTVLAN_ID(i)
+ );
+
+ /* re-enable port */
+ w16(pdev, MV_PORTREG(CONTROL, i),
+ MV_PORTCTRL_ENABLED
+ );
+ }
+
+ w16(pdev, MV_PORTREG(VLANMAP, MV_CPUPORT),
+ MV_PORTVLAN_ID(MV_CPUPORT)
+ );
+
+ /* set the port association vector */
+ for (i = 0; i <= MV_PORTS; i++) {
+ w16(pdev, MV_PORTREG(ASSOC, i),
+ MV_PORTASSOC_PORTS(1 << i)
+ );
+ }
+
+ /* init switch control */
+ w16(pdev, MV_SWITCHREG(CTRL),
+ MV_SWITCHCTL_MSIZE |
+ MV_SWITCHCTL_DROP
+ );
+
+ dev->eth_mangle_rx = mvswitch_mangle_rx;
+ dev->eth_mangle_tx = mvswitch_mangle_tx;
+ priv->orig_features = dev->features;
+
+#ifdef HEADER_MODE
+ dev->priv_flags |= IFF_NO_IP_ALIGN;
+ dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
+#else
+ dev->features |= NETIF_F_HW_VLAN_RX;
+#endif
+
+ return 0;
+}
+
+static int
+mvswitch_read_status(struct phy_device *pdev)
+{
+ pdev->speed = SPEED_100;
+ pdev->duplex = DUPLEX_FULL;
+ pdev->link = 1;
+
+ /* XXX ugly workaround: we can't force the switch
+ * to gracefully handle hosts moving from one port to another,
+ * so we have to regularly clear the ATU database */
+
+ /* wait for the ATU to become available */
+ mvswitch_wait_mask(pdev, MV_SWITCHREG(ATU_OP), MV_ATUOP_INPROGRESS, 0);
+
+ /* flush the ATU */
+ w16(pdev, MV_SWITCHREG(ATU_OP),
+ MV_ATUOP_INPROGRESS |
+ MV_ATUOP_FLUSH_ALL
+ );
+
+ /* wait for operation to complete */
+ mvswitch_wait_mask(pdev, MV_SWITCHREG(ATU_OP), MV_ATUOP_INPROGRESS, 0);
+
+ return 0;
+}
+
+static int
+mvswitch_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static void
+mvswitch_remove(struct phy_device *pdev)
+{
+ struct mvswitch_priv *priv = to_mvsw(pdev);
+ struct net_device *dev = pdev->attached_dev;
+
+ dev->phy_ptr = NULL;
+ dev->eth_mangle_rx = NULL;
+ dev->eth_mangle_tx = NULL;
+ dev->features = priv->orig_features;
+ dev->priv_flags &= ~IFF_NO_IP_ALIGN;
+ kfree(priv);
+}
+
+static int
+mvswitch_probe(struct phy_device *pdev)
+{
+ struct mvswitch_priv *priv;
+
+ priv = kzalloc(sizeof(struct mvswitch_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ pdev->priv = priv;
+
+ return 0;
+}
+
+static int
+mvswitch_fixup(struct phy_device *dev)
+{
+ u16 reg;
+
+ if (dev->addr != 0x10)
+ return 0;
+
+ reg = dev->bus->read(dev->bus, MV_PORTREG(IDENT, 0)) & MV_IDENT_MASK;
+ if (reg != MV_IDENT_VALUE)
+ return 0;
+
+ dev->phy_id = MVSWITCH_MAGIC;
+ return 0;
+}
+
+
+static struct phy_driver mvswitch_driver = {
+ .name = "Marvell 88E6060",
+ .phy_id = MVSWITCH_MAGIC,
+ .phy_id_mask = 0xffffffff,
+ .features = PHY_BASIC_FEATURES,
+ .probe = &mvswitch_probe,
+ .remove = &mvswitch_remove,
+ .config_init = &mvswitch_config_init,
+ .config_aneg = &mvswitch_config_aneg,
+ .read_status = &mvswitch_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init
+mvswitch_init(void)
+{
+ phy_register_fixup_for_id(PHY_ANY_ID, mvswitch_fixup);
+ return phy_driver_register(&mvswitch_driver);
+}
+
+static void __exit
+mvswitch_exit(void)
+{
+ phy_driver_unregister(&mvswitch_driver);
+}
+
+module_init(mvswitch_init);
+module_exit(mvswitch_exit);
diff --git a/target/linux/generic/files/drivers/net/phy/mvswitch.h b/target/linux/generic/files/drivers/net/phy/mvswitch.h
new file mode 100644
index 000000000..1563eec4d
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/mvswitch.h
@@ -0,0 +1,145 @@
+/*
+ * Marvell 88E6060 switch driver
+ * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation
+ */
+#ifndef __MVSWITCH_H
+#define __MVSWITCH_H
+
+#define MV_HEADER_SIZE 2
+#define MV_HEADER_PORTS_M 0x001f
+#define MV_HEADER_PORTS_S 0
+#define MV_HEADER_VLAN_M 0xf000
+#define MV_HEADER_VLAN_S 12
+
+#define MV_TRAILER_SIZE 4
+#define MV_TRAILER_PORTS_M 0x1f
+#define MV_TRAILER_PORTS_S 16
+#define MV_TRAILER_FLAGS_S 24
+#define MV_TRAILER_OVERRIDE 0x80
+
+
+#define MV_PORTS 5
+#define MV_WANPORT 4
+#define MV_CPUPORT 5
+
+#define MV_BASE 0x10
+
+#define MV_PHYPORT_BASE (MV_BASE + 0x0)
+#define MV_PHYPORT(_n) (MV_PHYPORT_BASE + (_n))
+#define MV_SWITCHPORT_BASE (MV_BASE + 0x8)
+#define MV_SWITCHPORT(_n) (MV_SWITCHPORT_BASE + (_n))
+#define MV_SWITCHREGS (MV_BASE + 0xf)
+
+enum {
+ MV_PHY_CONTROL = 0x00,
+ MV_PHY_STATUS = 0x01,
+ MV_PHY_IDENT0 = 0x02,
+ MV_PHY_IDENT1 = 0x03,
+ MV_PHY_ANEG = 0x04,
+ MV_PHY_LINK_ABILITY = 0x05,
+ MV_PHY_ANEG_EXPAND = 0x06,
+ MV_PHY_XMIT_NEXTP = 0x07,
+ MV_PHY_LINK_NEXTP = 0x08,
+ MV_PHY_CONTROL1 = 0x10,
+ MV_PHY_STATUS1 = 0x11,
+ MV_PHY_INTR_EN = 0x12,
+ MV_PHY_INTR_STATUS = 0x13,
+ MV_PHY_INTR_PORT = 0x14,
+ MV_PHY_RECV_COUNTER = 0x16,
+ MV_PHY_LED_PARALLEL = 0x16,
+ MV_PHY_LED_STREAM = 0x17,
+ MV_PHY_LED_CTRL = 0x18,
+ MV_PHY_LED_OVERRIDE = 0x19,
+ MV_PHY_VCT_CTRL = 0x1a,
+ MV_PHY_VCT_STATUS = 0x1b,
+ MV_PHY_CONTROL2 = 0x1e
+};
+#define MV_PHYREG(_type, _port) MV_PHYPORT(_port), MV_PHY_##_type
+
+enum {
+ MV_PORT_STATUS = 0x00,
+ MV_PORT_IDENT = 0x03,
+ MV_PORT_CONTROL = 0x04,
+ MV_PORT_VLANMAP = 0x06,
+ MV_PORT_ASSOC = 0x0b,
+ MV_PORT_RXCOUNT = 0x10,
+ MV_PORT_TXCOUNT = 0x11,
+};
+#define MV_PORTREG(_type, _port) MV_SWITCHPORT(_port), MV_PORT_##_type
+
+enum {
+ MV_PORTCTRL_BLOCK = (1 << 0),
+ MV_PORTCTRL_LEARN = (2 << 0),
+ MV_PORTCTRL_ENABLED = (3 << 0),
+ MV_PORTCTRL_VLANTUN = (1 << 7), /* Enforce VLANs on packets */
+ MV_PORTCTRL_RXTR = (1 << 8), /* Enable Marvell packet trailer for ingress */
+ MV_PORTCTRL_HEADER = (1 << 11), /* Enable Marvell packet header mode for port */
+ MV_PORTCTRL_TXTR = (1 << 14), /* Enable Marvell packet trailer for egress */
+ MV_PORTCTRL_FORCEFL = (1 << 15), /* force flow control */
+};
+
+#define MV_PORTVLAN_ID(_n) (((_n) & 0xf) << 12)
+#define MV_PORTVLAN_PORTS(_n) ((_n) & 0x3f)
+
+#define MV_PORTASSOC_PORTS(_n) ((_n) & 0x1f)
+#define MV_PORTASSOC_MONITOR (1 << 15)
+
+enum {
+ MV_SWITCH_MAC0 = 0x01,
+ MV_SWITCH_MAC1 = 0x02,
+ MV_SWITCH_MAC2 = 0x03,
+ MV_SWITCH_CTRL = 0x04,
+ MV_SWITCH_ATU_CTRL = 0x0a,
+ MV_SWITCH_ATU_OP = 0x0b,
+ MV_SWITCH_ATU_DATA = 0x0c,
+ MV_SWITCH_ATU_MAC0 = 0x0d,
+ MV_SWITCH_ATU_MAC1 = 0x0e,
+ MV_SWITCH_ATU_MAC2 = 0x0f,
+};
+#define MV_SWITCHREG(_type) MV_SWITCHREGS, MV_SWITCH_##_type
+
+enum {
+ MV_SWITCHCTL_EEIE = (1 << 0), /* EEPROM interrupt enable */
+ MV_SWITCHCTL_PHYIE = (1 << 1), /* PHY interrupt enable */
+ MV_SWITCHCTL_ATUDONE= (1 << 2), /* ATU done interrupt enable */
+ MV_SWITCHCTL_ATUIE = (1 << 3), /* ATU interrupt enable */
+ MV_SWITCHCTL_CTRMODE= (1 << 8), /* statistics for rx and tx errors */
+ MV_SWITCHCTL_RELOAD = (1 << 9), /* reload registers from eeprom */
+ MV_SWITCHCTL_MSIZE = (1 << 10), /* increase maximum frame size */
+ MV_SWITCHCTL_DROP = (1 << 13), /* discard frames with excessive collisions */
+};
+
+enum {
+#define MV_ATUCTL_AGETIME_MIN 16
+#define MV_ATUCTL_AGETIME_MAX 4080
+#define MV_ATUCTL_AGETIME(_n) ((((_n) / 16) & 0xff) << 4)
+ MV_ATUCTL_ATU_256 = (0 << 12),
+ MV_ATUCTL_ATU_512 = (1 << 12),
+ MV_ATUCTL_ATU_1K = (2 << 12),
+ MV_ATUCTL_ATUMASK = (3 << 12),
+ MV_ATUCTL_NO_LEARN = (1 << 14),
+ MV_ATUCTL_RESET = (1 << 15),
+};
+
+enum {
+#define MV_ATUOP_DBNUM(_n) ((_n) & 0x0f)
+
+ MV_ATUOP_NOOP = (0 << 12),
+ MV_ATUOP_FLUSH_ALL = (1 << 12),
+ MV_ATUOP_FLUSH_U = (2 << 12),
+ MV_ATUOP_LOAD_DB = (3 << 12),
+ MV_ATUOP_GET_NEXT = (4 << 12),
+ MV_ATUOP_FLUSH_DB = (5 << 12),
+ MV_ATUOP_FLUSH_DB_UU= (6 << 12),
+
+ MV_ATUOP_INPROGRESS = (1 << 15),
+};
+
+#define MV_IDENT_MASK 0xfff0
+#define MV_IDENT_VALUE 0x0600
+
+#endif
diff --git a/target/linux/generic/files/drivers/net/phy/psb6970.c b/target/linux/generic/files/drivers/net/phy/psb6970.c
new file mode 100644
index 000000000..2fcd29901
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/psb6970.c
@@ -0,0 +1,438 @@
+/*
+ * Lantiq PSB6970 (Tantos) Switch driver
+ *
+ * Copyright (c) 2009,2010 Team Embedded.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation.
+ *
+ * The switch programming done in this driver follows the
+ * "Ethernet Traffic Separation using VLAN" Application Note as
+ * published by Lantiq.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/switch.h>
+#include <linux/phy.h>
+
+#define PSB6970_MAX_VLANS 16
+#define PSB6970_NUM_PORTS 7
+#define PSB6970_DEFAULT_PORT_CPU 6
+#define PSB6970_IS_CPU_PORT(x) ((x) > 4)
+
+#define PHYADDR(_reg) ((_reg >> 5) & 0xff), (_reg & 0x1f)
+
+/* --- Identification --- */
+#define PSB6970_CI0 0x0100
+#define PSB6970_CI0_MASK 0x000f
+#define PSB6970_CI1 0x0101
+#define PSB6970_CI1_VAL 0x2599
+#define PSB6970_CI1_MASK 0xffff
+
+/* --- VLAN filter table --- */
+#define PSB6970_VFxL(i) ((i)*2+0x10) /* VLAN Filter Low */
+#define PSB6970_VFxL_VV (1 << 15) /* VLAN_Valid */
+
+#define PSB6970_VFxH(i) ((i)*2+0x11) /* VLAN Filter High */
+#define PSB6970_VFxH_TM_SHIFT 7 /* Tagged Member */
+
+/* --- Port registers --- */
+#define PSB6970_EC(p) ((p)*0x20+2) /* Extended Control */
+#define PSB6970_EC_IFNTE (1 << 1) /* Input Force No Tag Enable */
+
+#define PSB6970_PBVM(p) ((p)*0x20+3) /* Port Base VLAN Map */
+#define PSB6970_PBVM_VMCE (1 << 8)
+#define PSB6970_PBVM_AOVTP (1 << 9)
+#define PSB6970_PBVM_VSD (1 << 10)
+#define PSB6970_PBVM_VC (1 << 11) /* VID Check with VID table */
+#define PSB6970_PBVM_TBVE (1 << 13) /* Tag-Based VLAN enable */
+
+#define PSB6970_DVID(p) ((p)*0x20+4) /* Default VLAN ID & Priority */
+
+struct psb6970_priv {
+ struct switch_dev dev;
+ struct phy_device *phy;
+ u16 (*read) (struct phy_device* phydev, int reg);
+ void (*write) (struct phy_device* phydev, int reg, u16 val);
+ struct mutex reg_mutex;
+
+ /* all fields below are cleared on reset */
+ bool vlan;
+ u16 vlan_id[PSB6970_MAX_VLANS];
+ u8 vlan_table[PSB6970_MAX_VLANS];
+ u8 vlan_tagged;
+ u16 pvid[PSB6970_NUM_PORTS];
+};
+
+#define to_psb6970(_dev) container_of(_dev, struct psb6970_priv, dev)
+
+static u16 psb6970_mii_read(struct phy_device *phydev, int reg)
+{
+ return phydev->bus->read(phydev->bus, PHYADDR(reg));
+}
+
+static void psb6970_mii_write(struct phy_device *phydev, int reg, u16 val)
+{
+ phydev->bus->write(phydev->bus, PHYADDR(reg), val);
+}
+
+static int
+psb6970_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ priv->vlan = !!val->value.i;
+ return 0;
+}
+
+static int
+psb6970_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ val->value.i = priv->vlan;
+ return 0;
+}
+
+static int psb6970_set_pvid(struct switch_dev *dev, int port, int vlan)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+
+ /* make sure no invalid PVIDs get set */
+ if (vlan >= dev->vlans)
+ return -EINVAL;
+
+ priv->pvid[port] = vlan;
+ return 0;
+}
+
+static int psb6970_get_pvid(struct switch_dev *dev, int port, int *vlan)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ *vlan = priv->pvid[port];
+ return 0;
+}
+
+static int
+psb6970_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ priv->vlan_id[val->port_vlan] = val->value.i;
+ return 0;
+}
+
+static int
+psb6970_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ val->value.i = priv->vlan_id[val->port_vlan];
+ return 0;
+}
+
+static struct switch_attr psb6970_globals[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "Enable VLAN mode",
+ .set = psb6970_set_vlan,
+ .get = psb6970_get_vlan,
+ .max = 1},
+};
+
+static struct switch_attr psb6970_port[] = {
+};
+
+static struct switch_attr psb6970_vlan[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "vid",
+ .description = "VLAN ID (0-4094)",
+ .set = psb6970_set_vid,
+ .get = psb6970_get_vid,
+ .max = 4094,
+ },
+};
+
+static int psb6970_get_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ u8 ports = priv->vlan_table[val->port_vlan];
+ int i;
+
+ val->len = 0;
+ for (i = 0; i < PSB6970_NUM_PORTS; i++) {
+ struct switch_port *p;
+
+ if (!(ports & (1 << i)))
+ continue;
+
+ p = &val->value.ports[val->len++];
+ p->id = i;
+ if (priv->vlan_tagged & (1 << i))
+ p->flags = (1 << SWITCH_PORT_FLAG_TAGGED);
+ else
+ p->flags = 0;
+ }
+ return 0;
+}
+
+static int psb6970_set_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ u8 *vt = &priv->vlan_table[val->port_vlan];
+ int i, j;
+
+ *vt = 0;
+ for (i = 0; i < val->len; i++) {
+ struct switch_port *p = &val->value.ports[i];
+
+ if (p->flags & (1 << SWITCH_PORT_FLAG_TAGGED))
+ priv->vlan_tagged |= (1 << p->id);
+ else {
+ priv->vlan_tagged &= ~(1 << p->id);
+ priv->pvid[p->id] = val->port_vlan;
+
+ /* make sure that an untagged port does not
+ * appear in other vlans */
+ for (j = 0; j < PSB6970_MAX_VLANS; j++) {
+ if (j == val->port_vlan)
+ continue;
+ priv->vlan_table[j] &= ~(1 << p->id);
+ }
+ }
+
+ *vt |= 1 << p->id;
+ }
+ return 0;
+}
+
+static int psb6970_hw_apply(struct switch_dev *dev)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ int i, j;
+
+ mutex_lock(&priv->reg_mutex);
+
+ if (priv->vlan) {
+ /* into the vlan translation unit */
+ for (j = 0; j < PSB6970_MAX_VLANS; j++) {
+ u8 vp = priv->vlan_table[j];
+
+ if (vp) {
+ priv->write(priv->phy, PSB6970_VFxL(j),
+ PSB6970_VFxL_VV | priv->vlan_id[j]);
+ priv->write(priv->phy, PSB6970_VFxH(j),
+ ((vp & priv->
+ vlan_tagged) <<
+ PSB6970_VFxH_TM_SHIFT) | vp);
+ } else /* clear VLAN Valid flag for unused vlans */
+ priv->write(priv->phy, PSB6970_VFxL(j), 0);
+
+ }
+ }
+
+ /* update the port destination mask registers and tag settings */
+ for (i = 0; i < PSB6970_NUM_PORTS; i++) {
+ int dvid = 1, pbvm = 0x7f | PSB6970_PBVM_VSD, ec = 0;
+
+ if (priv->vlan) {
+ ec = PSB6970_EC_IFNTE;
+ dvid = priv->vlan_id[priv->pvid[i]];
+ pbvm |= PSB6970_PBVM_TBVE | PSB6970_PBVM_VMCE;
+
+ if ((i << 1) & priv->vlan_tagged)
+ pbvm |= PSB6970_PBVM_AOVTP | PSB6970_PBVM_VC;
+ }
+
+ priv->write(priv->phy, PSB6970_PBVM(i), pbvm);
+
+ if (!PSB6970_IS_CPU_PORT(i)) {
+ priv->write(priv->phy, PSB6970_EC(i), ec);
+ priv->write(priv->phy, PSB6970_DVID(i), dvid);
+ }
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+static int psb6970_reset_switch(struct switch_dev *dev)
+{
+ struct psb6970_priv *priv = to_psb6970(dev);
+ int i;
+
+ mutex_lock(&priv->reg_mutex);
+
+ memset(&priv->vlan, 0, sizeof(struct psb6970_priv) -
+ offsetof(struct psb6970_priv, vlan));
+
+ for (i = 0; i < PSB6970_MAX_VLANS; i++)
+ priv->vlan_id[i] = i;
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return psb6970_hw_apply(dev);
+}
+
+static const struct switch_dev_ops psb6970_ops = {
+ .attr_global = {
+ .attr = psb6970_globals,
+ .n_attr = ARRAY_SIZE(psb6970_globals),
+ },
+ .attr_port = {
+ .attr = psb6970_port,
+ .n_attr = ARRAY_SIZE(psb6970_port),
+ },
+ .attr_vlan = {
+ .attr = psb6970_vlan,
+ .n_attr = ARRAY_SIZE(psb6970_vlan),
+ },
+ .get_port_pvid = psb6970_get_pvid,
+ .set_port_pvid = psb6970_set_pvid,
+ .get_vlan_ports = psb6970_get_ports,
+ .set_vlan_ports = psb6970_set_ports,
+ .apply_config = psb6970_hw_apply,
+ .reset_switch = psb6970_reset_switch,
+};
+
+static int psb6970_config_init(struct phy_device *pdev)
+{
+ struct psb6970_priv *priv;
+ struct net_device *dev = pdev->attached_dev;
+ struct switch_dev *swdev;
+ int ret;
+
+ priv = kzalloc(sizeof(struct psb6970_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ priv->phy = pdev;
+
+ if (pdev->addr == 0)
+ printk(KERN_INFO "%s: psb6970 switch driver attached.\n",
+ pdev->attached_dev->name);
+
+ if (pdev->addr != 0) {
+ kfree(priv);
+ return 0;
+ }
+
+ pdev->supported = pdev->advertising = SUPPORTED_100baseT_Full;
+
+ mutex_init(&priv->reg_mutex);
+ priv->read = psb6970_mii_read;
+ priv->write = psb6970_mii_write;
+
+ pdev->priv = priv;
+
+ swdev = &priv->dev;
+ swdev->cpu_port = PSB6970_DEFAULT_PORT_CPU;
+ swdev->ops = &psb6970_ops;
+
+ swdev->name = "Lantiq PSB6970";
+ swdev->vlans = PSB6970_MAX_VLANS;
+ swdev->ports = PSB6970_NUM_PORTS;
+
+ if ((ret = register_switch(&priv->dev, pdev->attached_dev)) < 0) {
+ kfree(priv);
+ goto done;
+ }
+
+ ret = psb6970_reset_switch(&priv->dev);
+ if (ret) {
+ kfree(priv);
+ goto done;
+ }
+
+ dev->phy_ptr = priv;
+
+done:
+ return ret;
+}
+
+static int psb6970_read_status(struct phy_device *phydev)
+{
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ phydev->link = 1;
+
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+
+ return 0;
+}
+
+static int psb6970_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int psb6970_probe(struct phy_device *pdev)
+{
+ return 0;
+}
+
+static void psb6970_remove(struct phy_device *pdev)
+{
+ struct psb6970_priv *priv = pdev->priv;
+
+ if (!priv)
+ return;
+
+ if (pdev->addr == 0)
+ unregister_switch(&priv->dev);
+ kfree(priv);
+}
+
+static int psb6970_fixup(struct phy_device *dev)
+{
+ struct mii_bus *bus = dev->bus;
+ u16 reg;
+
+ /* look for the switch on the bus */
+ reg = bus->read(bus, PHYADDR(PSB6970_CI1)) & PSB6970_CI1_MASK;
+ if (reg != PSB6970_CI1_VAL)
+ return 0;
+
+ dev->phy_id = (reg << 16);
+ dev->phy_id |= bus->read(bus, PHYADDR(PSB6970_CI0)) & PSB6970_CI0_MASK;
+
+ return 0;
+}
+
+static struct phy_driver psb6970_driver = {
+ .name = "Lantiq PSB6970",
+ .phy_id = PSB6970_CI1_VAL << 16,
+ .phy_id_mask = 0xffff0000,
+ .features = PHY_BASIC_FEATURES,
+ .probe = psb6970_probe,
+ .remove = psb6970_remove,
+ .config_init = &psb6970_config_init,
+ .config_aneg = &psb6970_config_aneg,
+ .read_status = &psb6970_read_status,
+ .driver = {.owner = THIS_MODULE},
+};
+
+int __init psb6970_init(void)
+{
+ phy_register_fixup_for_id(PHY_ANY_ID, psb6970_fixup);
+ return phy_driver_register(&psb6970_driver);
+}
+
+module_init(psb6970_init);
+
+void __exit psb6970_exit(void)
+{
+ phy_driver_unregister(&psb6970_driver);
+}
+
+module_exit(psb6970_exit);
+
+MODULE_DESCRIPTION("Lantiq PSB6970 Switch");
+MODULE_AUTHOR("Ithamar R. Adema <ithamar.adema@team-embedded.nl>");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/files/drivers/net/phy/rtl8306.c b/target/linux/generic/files/drivers/net/phy/rtl8306.c
new file mode 100644
index 000000000..78ded0ace
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/rtl8306.c
@@ -0,0 +1,1056 @@
+/*
+ * rtl8306.c: RTL8306S switch driver
+ *
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/if.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <net/genetlink.h>
+#include <linux/switch.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+
+//#define DEBUG 1
+
+/* Global (PHY0) */
+#define RTL8306_REG_PAGE 16
+#define RTL8306_REG_PAGE_LO (1 << 15)
+#define RTL8306_REG_PAGE_HI (1 << 1) /* inverted */
+
+#define RTL8306_NUM_VLANS 16
+#define RTL8306_NUM_PORTS 6
+#define RTL8306_PORT_CPU 5
+#define RTL8306_NUM_PAGES 4
+#define RTL8306_NUM_REGS 32
+
+#define RTL_NAME_S "RTL8306S"
+#define RTL_NAME_SD "RTL8306SD"
+#define RTL_NAME_SDM "RTL8306SDM"
+#define RTL_NAME_UNKNOWN "RTL8306(unknown)"
+
+#define RTL8306_MAGIC 0x8306
+
+static LIST_HEAD(phydevs);
+
+struct rtl_priv {
+ struct list_head list;
+ struct switch_dev dev;
+ int page;
+ int type;
+ int do_cpu;
+ struct mii_bus *bus;
+ char hwname[sizeof(RTL_NAME_UNKNOWN)];
+ bool fixup;
+};
+
+struct rtl_phyregs {
+ int nway;
+ int speed;
+ int duplex;
+};
+
+#define to_rtl(_dev) container_of(_dev, struct rtl_priv, dev)
+
+enum {
+ RTL_TYPE_S,
+ RTL_TYPE_SD,
+ RTL_TYPE_SDM,
+};
+
+struct rtl_reg {
+ int page;
+ int phy;
+ int reg;
+ int bits;
+ int shift;
+ int inverted;
+};
+
+#define RTL_VLAN_REGOFS(name) \
+ (RTL_REG_VLAN1_##name - RTL_REG_VLAN0_##name)
+
+#define RTL_PORT_REGOFS(name) \
+ (RTL_REG_PORT1_##name - RTL_REG_PORT0_##name)
+
+#define RTL_PORT_REG(id, reg) \
+ (RTL_REG_PORT0_##reg + (id * RTL_PORT_REGOFS(reg)))
+
+#define RTL_VLAN_REG(id, reg) \
+ (RTL_REG_VLAN0_##reg + (id * RTL_VLAN_REGOFS(reg)))
+
+#define RTL_GLOBAL_REGATTR(reg) \
+ .id = RTL_REG_##reg, \
+ .type = SWITCH_TYPE_INT, \
+ .ofs = 0, \
+ .set = rtl_attr_set_int, \
+ .get = rtl_attr_get_int
+
+#define RTL_PORT_REGATTR(reg) \
+ .id = RTL_REG_PORT0_##reg, \
+ .type = SWITCH_TYPE_INT, \
+ .ofs = RTL_PORT_REGOFS(reg), \
+ .set = rtl_attr_set_port_int, \
+ .get = rtl_attr_get_port_int
+
+#define RTL_VLAN_REGATTR(reg) \
+ .id = RTL_REG_VLAN0_##reg, \
+ .type = SWITCH_TYPE_INT, \
+ .ofs = RTL_VLAN_REGOFS(reg), \
+ .set = rtl_attr_set_vlan_int, \
+ .get = rtl_attr_get_vlan_int
+
+enum rtl_regidx {
+ RTL_REG_CHIPID,
+ RTL_REG_CHIPVER,
+ RTL_REG_CHIPTYPE,
+ RTL_REG_CPUPORT,
+
+ RTL_REG_EN_CPUPORT,
+ RTL_REG_EN_TAG_OUT,
+ RTL_REG_EN_TAG_CLR,
+ RTL_REG_EN_TAG_IN,
+ RTL_REG_TRAP_CPU,
+ RTL_REG_TRUNK_PORTSEL,
+ RTL_REG_EN_TRUNK,
+ RTL_REG_RESET,
+
+ RTL_REG_VLAN_ENABLE,
+ RTL_REG_VLAN_FILTER,
+ RTL_REG_VLAN_TAG_ONLY,
+ RTL_REG_VLAN_TAG_AWARE,
+#define RTL_VLAN_ENUM(id) \
+ RTL_REG_VLAN##id##_VID, \
+ RTL_REG_VLAN##id##_PORTMASK
+ RTL_VLAN_ENUM(0),
+ RTL_VLAN_ENUM(1),
+ RTL_VLAN_ENUM(2),
+ RTL_VLAN_ENUM(3),
+ RTL_VLAN_ENUM(4),
+ RTL_VLAN_ENUM(5),
+ RTL_VLAN_ENUM(6),
+ RTL_VLAN_ENUM(7),
+ RTL_VLAN_ENUM(8),
+ RTL_VLAN_ENUM(9),
+ RTL_VLAN_ENUM(10),
+ RTL_VLAN_ENUM(11),
+ RTL_VLAN_ENUM(12),
+ RTL_VLAN_ENUM(13),
+ RTL_VLAN_ENUM(14),
+ RTL_VLAN_ENUM(15),
+#define RTL_PORT_ENUM(id) \
+ RTL_REG_PORT##id##_PVID, \
+ RTL_REG_PORT##id##_NULL_VID_REPLACE, \
+ RTL_REG_PORT##id##_NON_PVID_DISCARD, \
+ RTL_REG_PORT##id##_VID_INSERT, \
+ RTL_REG_PORT##id##_TAG_INSERT, \
+ RTL_REG_PORT##id##_LINK, \
+ RTL_REG_PORT##id##_SPEED, \
+ RTL_REG_PORT##id##_NWAY, \
+ RTL_REG_PORT##id##_NRESTART, \
+ RTL_REG_PORT##id##_DUPLEX, \
+ RTL_REG_PORT##id##_RXEN, \
+ RTL_REG_PORT##id##_TXEN
+ RTL_PORT_ENUM(0),
+ RTL_PORT_ENUM(1),
+ RTL_PORT_ENUM(2),
+ RTL_PORT_ENUM(3),
+ RTL_PORT_ENUM(4),
+ RTL_PORT_ENUM(5),
+};
+
+static const struct rtl_reg rtl_regs[] = {
+ [RTL_REG_CHIPID] = { 0, 4, 30, 16, 0, 0 },
+ [RTL_REG_CHIPVER] = { 0, 4, 31, 8, 0, 0 },
+ [RTL_REG_CHIPTYPE] = { 0, 4, 31, 2, 8, 0 },
+
+ /* CPU port number */
+ [RTL_REG_CPUPORT] = { 2, 4, 21, 3, 0, 0 },
+ /* Enable CPU port function */
+ [RTL_REG_EN_CPUPORT] = { 3, 2, 21, 1, 15, 1 },
+ /* Enable CPU port tag insertion */
+ [RTL_REG_EN_TAG_OUT] = { 3, 2, 21, 1, 12, 0 },
+ /* Enable CPU port tag removal */
+ [RTL_REG_EN_TAG_CLR] = { 3, 2, 21, 1, 11, 0 },
+ /* Enable CPU port tag checking */
+ [RTL_REG_EN_TAG_IN] = { 0, 4, 21, 1, 7, 0 },
+ [RTL_REG_EN_TRUNK] = { 0, 0, 19, 1, 11, 1 },
+ [RTL_REG_TRUNK_PORTSEL] = { 0, 0, 16, 1, 6, 1 },
+ [RTL_REG_RESET] = { 0, 0, 16, 1, 12, 0 },
+
+ [RTL_REG_TRAP_CPU] = { 3, 2, 22, 1, 6, 0 },
+
+ [RTL_REG_VLAN_TAG_ONLY] = { 0, 0, 16, 1, 8, 1 },
+ [RTL_REG_VLAN_FILTER] = { 0, 0, 16, 1, 9, 1 },
+ [RTL_REG_VLAN_TAG_AWARE] = { 0, 0, 16, 1, 10, 1 },
+ [RTL_REG_VLAN_ENABLE] = { 0, 0, 18, 1, 8, 1 },
+
+#define RTL_VLAN_REGS(id, phy, page, regofs) \
+ [RTL_REG_VLAN##id##_VID] = { page, phy, 25 + regofs, 12, 0, 0 }, \
+ [RTL_REG_VLAN##id##_PORTMASK] = { page, phy, 24 + regofs, 6, 0, 0 }
+ RTL_VLAN_REGS( 0, 0, 0, 0),
+ RTL_VLAN_REGS( 1, 1, 0, 0),
+ RTL_VLAN_REGS( 2, 2, 0, 0),
+ RTL_VLAN_REGS( 3, 3, 0, 0),
+ RTL_VLAN_REGS( 4, 4, 0, 0),
+ RTL_VLAN_REGS( 5, 0, 1, 2),
+ RTL_VLAN_REGS( 6, 1, 1, 2),
+ RTL_VLAN_REGS( 7, 2, 1, 2),
+ RTL_VLAN_REGS( 8, 3, 1, 2),
+ RTL_VLAN_REGS( 9, 4, 1, 2),
+ RTL_VLAN_REGS(10, 0, 1, 4),
+ RTL_VLAN_REGS(11, 1, 1, 4),
+ RTL_VLAN_REGS(12, 2, 1, 4),
+ RTL_VLAN_REGS(13, 3, 1, 4),
+ RTL_VLAN_REGS(14, 4, 1, 4),
+ RTL_VLAN_REGS(15, 0, 1, 6),
+
+#define REG_PORT_SETTING(port, phy) \
+ [RTL_REG_PORT##port##_SPEED] = { 0, phy, 0, 1, 13, 0 }, \
+ [RTL_REG_PORT##port##_NWAY] = { 0, phy, 0, 1, 12, 0 }, \
+ [RTL_REG_PORT##port##_NRESTART] = { 0, phy, 0, 1, 9, 0 }, \
+ [RTL_REG_PORT##port##_DUPLEX] = { 0, phy, 0, 1, 8, 0 }, \
+ [RTL_REG_PORT##port##_TXEN] = { 0, phy, 24, 1, 11, 0 }, \
+ [RTL_REG_PORT##port##_RXEN] = { 0, phy, 24, 1, 10, 0 }, \
+ [RTL_REG_PORT##port##_LINK] = { 0, phy, 1, 1, 2, 0 }, \
+ [RTL_REG_PORT##port##_NULL_VID_REPLACE] = { 0, phy, 22, 1, 12, 0 }, \
+ [RTL_REG_PORT##port##_NON_PVID_DISCARD] = { 0, phy, 22, 1, 11, 0 }, \
+ [RTL_REG_PORT##port##_VID_INSERT] = { 0, phy, 22, 2, 9, 0 }, \
+ [RTL_REG_PORT##port##_TAG_INSERT] = { 0, phy, 22, 2, 0, 0 }
+
+ REG_PORT_SETTING(0, 0),
+ REG_PORT_SETTING(1, 1),
+ REG_PORT_SETTING(2, 2),
+ REG_PORT_SETTING(3, 3),
+ REG_PORT_SETTING(4, 4),
+ REG_PORT_SETTING(5, 6),
+
+#define REG_PORT_PVID(phy, page, regofs) \
+ { page, phy, 24 + regofs, 4, 12, 0 }
+ [RTL_REG_PORT0_PVID] = REG_PORT_PVID(0, 0, 0),
+ [RTL_REG_PORT1_PVID] = REG_PORT_PVID(1, 0, 0),
+ [RTL_REG_PORT2_PVID] = REG_PORT_PVID(2, 0, 0),
+ [RTL_REG_PORT3_PVID] = REG_PORT_PVID(3, 0, 0),
+ [RTL_REG_PORT4_PVID] = REG_PORT_PVID(4, 0, 0),
+ [RTL_REG_PORT5_PVID] = REG_PORT_PVID(0, 1, 2),
+};
+
+
+static inline void
+rtl_set_page(struct rtl_priv *priv, unsigned int page)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 pgsel;
+
+ if (priv->fixup)
+ return;
+
+ if (priv->page == page)
+ return;
+
+ BUG_ON(page > RTL8306_NUM_PAGES);
+ pgsel = bus->read(bus, 0, RTL8306_REG_PAGE);
+ pgsel &= ~(RTL8306_REG_PAGE_LO | RTL8306_REG_PAGE_HI);
+ if (page & (1 << 0))
+ pgsel |= RTL8306_REG_PAGE_LO;
+ if (!(page & (1 << 1))) /* bit is inverted */
+ pgsel |= RTL8306_REG_PAGE_HI;
+ bus->write(bus, 0, RTL8306_REG_PAGE, pgsel);
+}
+
+static inline int
+rtl_w16(struct switch_dev *dev, unsigned int page, unsigned int phy, unsigned int reg, u16 val)
+{
+ struct rtl_priv *priv = to_rtl(dev);
+ struct mii_bus *bus = priv->bus;
+
+ rtl_set_page(priv, page);
+ bus->write(bus, phy, reg, val);
+ bus->read(bus, phy, reg); /* flush */
+ return 0;
+}
+
+static inline int
+rtl_r16(struct switch_dev *dev, unsigned int page, unsigned int phy, unsigned int reg)
+{
+ struct rtl_priv *priv = to_rtl(dev);
+ struct mii_bus *bus = priv->bus;
+
+ rtl_set_page(priv, page);
+ return bus->read(bus, phy, reg);
+}
+
+static inline u16
+rtl_rmw(struct switch_dev *dev, unsigned int page, unsigned int phy, unsigned int reg, u16 mask, u16 val)
+{
+ struct rtl_priv *priv = to_rtl(dev);
+ struct mii_bus *bus = priv->bus;
+ u16 r;
+
+ rtl_set_page(priv, page);
+ r = bus->read(bus, phy, reg);
+ r &= ~mask;
+ r |= val;
+ bus->write(bus, phy, reg, r);
+ return bus->read(bus, phy, reg); /* flush */
+}
+
+
+static inline int
+rtl_get(struct switch_dev *dev, enum rtl_regidx s)
+{
+ const struct rtl_reg *r = &rtl_regs[s];
+ u16 val;
+
+ BUG_ON(s >= ARRAY_SIZE(rtl_regs));
+ if (r->bits == 0) /* unimplemented */
+ return 0;
+
+ val = rtl_r16(dev, r->page, r->phy, r->reg);
+
+ if (r->shift > 0)
+ val >>= r->shift;
+
+ if (r->inverted)
+ val = ~val;
+
+ val &= (1 << r->bits) - 1;
+
+ return val;
+}
+
+static int
+rtl_set(struct switch_dev *dev, enum rtl_regidx s, unsigned int val)
+{
+ const struct rtl_reg *r = &rtl_regs[s];
+ u16 mask = 0xffff;
+
+ BUG_ON(s >= ARRAY_SIZE(rtl_regs));
+
+ if (r->bits == 0) /* unimplemented */
+ return 0;
+
+ if (r->shift > 0)
+ val <<= r->shift;
+
+ if (r->inverted)
+ val = ~val;
+
+ if (r->bits != 16) {
+ mask = (1 << r->bits) - 1;
+ mask <<= r->shift;
+ }
+ val &= mask;
+ return rtl_rmw(dev, r->page, r->phy, r->reg, mask, val);
+}
+
+static void
+rtl_phy_save(struct switch_dev *dev, int port, struct rtl_phyregs *regs)
+{
+ regs->nway = rtl_get(dev, RTL_PORT_REG(port, NWAY));
+ regs->speed = rtl_get(dev, RTL_PORT_REG(port, SPEED));
+ regs->duplex = rtl_get(dev, RTL_PORT_REG(port, DUPLEX));
+}
+
+static void
+rtl_phy_restore(struct switch_dev *dev, int port, struct rtl_phyregs *regs)
+{
+ rtl_set(dev, RTL_PORT_REG(port, NWAY), regs->nway);
+ rtl_set(dev, RTL_PORT_REG(port, SPEED), regs->speed);
+ rtl_set(dev, RTL_PORT_REG(port, DUPLEX), regs->duplex);
+}
+
+static void
+rtl_port_set_enable(struct switch_dev *dev, int port, int enabled)
+{
+ rtl_set(dev, RTL_PORT_REG(port, RXEN), enabled);
+ rtl_set(dev, RTL_PORT_REG(port, TXEN), enabled);
+
+ if ((port >= 5) || !enabled)
+ return;
+
+ /* restart autonegotiation if enabled */
+ rtl_set(dev, RTL_PORT_REG(port, NRESTART), 1);
+}
+
+static int
+rtl_hw_apply(struct switch_dev *dev)
+{
+ int i;
+ int trunk_en, trunk_psel;
+ struct rtl_phyregs port5;
+
+ rtl_phy_save(dev, 5, &port5);
+
+ /* disable rx/tx from PHYs */
+ for (i = 0; i < RTL8306_NUM_PORTS - 1; i++) {
+ rtl_port_set_enable(dev, i, 0);
+ }
+
+ /* save trunking status */
+ trunk_en = rtl_get(dev, RTL_REG_EN_TRUNK);
+ trunk_psel = rtl_get(dev, RTL_REG_TRUNK_PORTSEL);
+
+ /* trunk port 3 and 4
+ * XXX: Big WTF, but RealTek seems to do it */
+ rtl_set(dev, RTL_REG_EN_TRUNK, 1);
+ rtl_set(dev, RTL_REG_TRUNK_PORTSEL, 1);
+
+ /* execute the software reset */
+ rtl_set(dev, RTL_REG_RESET, 1);
+
+ /* wait for the reset to complete,
+ * but don't wait for too long */
+ for (i = 0; i < 10; i++) {
+ if (rtl_get(dev, RTL_REG_RESET) == 0)
+ break;
+
+ msleep(1);
+ }
+
+ /* enable rx/tx from PHYs */
+ for (i = 0; i < RTL8306_NUM_PORTS - 1; i++) {
+ rtl_port_set_enable(dev, i, 1);
+ }
+
+ /* restore trunking settings */
+ rtl_set(dev, RTL_REG_EN_TRUNK, trunk_en);
+ rtl_set(dev, RTL_REG_TRUNK_PORTSEL, trunk_psel);
+ rtl_phy_restore(dev, 5, &port5);
+
+ return 0;
+}
+
+static void
+rtl_hw_init(struct switch_dev *dev)
+{
+ struct rtl_priv *priv = to_rtl(dev);
+ int cpu_mask = 1 << dev->cpu_port;
+ int i;
+
+ rtl_set(dev, RTL_REG_VLAN_ENABLE, 0);
+ rtl_set(dev, RTL_REG_VLAN_FILTER, 0);
+ rtl_set(dev, RTL_REG_EN_TRUNK, 0);
+ rtl_set(dev, RTL_REG_TRUNK_PORTSEL, 0);
+
+ /* initialize cpu port settings */
+ if (priv->do_cpu) {
+ rtl_set(dev, RTL_REG_CPUPORT, dev->cpu_port);
+ rtl_set(dev, RTL_REG_EN_CPUPORT, 1);
+ } else {
+ rtl_set(dev, RTL_REG_CPUPORT, 7);
+ rtl_set(dev, RTL_REG_EN_CPUPORT, 0);
+ }
+ rtl_set(dev, RTL_REG_EN_TAG_OUT, 0);
+ rtl_set(dev, RTL_REG_EN_TAG_IN, 0);
+ rtl_set(dev, RTL_REG_EN_TAG_CLR, 0);
+
+ /* reset all vlans */
+ for (i = 0; i < RTL8306_NUM_VLANS; i++) {
+ rtl_set(dev, RTL_VLAN_REG(i, VID), i);
+ rtl_set(dev, RTL_VLAN_REG(i, PORTMASK), 0);
+ }
+
+ /* default to port isolation */
+ for (i = 0; i < RTL8306_NUM_PORTS; i++) {
+ unsigned long mask;
+
+ if ((1 << i) == cpu_mask)
+ mask = ((1 << RTL8306_NUM_PORTS) - 1) & ~cpu_mask; /* all bits set */
+ else
+ mask = cpu_mask | (1 << i);
+
+ rtl_set(dev, RTL_VLAN_REG(i, PORTMASK), mask);
+ rtl_set(dev, RTL_PORT_REG(i, PVID), i);
+ rtl_set(dev, RTL_PORT_REG(i, NULL_VID_REPLACE), 1);
+ rtl_set(dev, RTL_PORT_REG(i, VID_INSERT), 1);
+ rtl_set(dev, RTL_PORT_REG(i, TAG_INSERT), 3);
+ }
+ rtl_hw_apply(dev);
+}
+
+#ifdef DEBUG
+static int
+rtl_set_use_cpuport(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct rtl_priv *priv = to_rtl(dev);
+ priv->do_cpu = val->value.i;
+ rtl_hw_init(dev);
+ return 0;
+}
+
+static int
+rtl_get_use_cpuport(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct rtl_priv *priv = to_rtl(dev);
+ val->value.i = priv->do_cpu;
+ return 0;
+}
+
+static int
+rtl_set_cpuport(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ dev->cpu_port = val->value.i;
+ rtl_hw_init(dev);
+ return 0;
+}
+
+static int
+rtl_get_cpuport(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ val->value.i = dev->cpu_port;
+ return 0;
+}
+#endif
+
+static int
+rtl_reset(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ rtl_hw_init(dev);
+ return 0;
+}
+
+static int
+rtl_attr_set_int(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ int idx = attr->id + (val->port_vlan * attr->ofs);
+ struct rtl_phyregs port;
+
+ if (attr->id >= ARRAY_SIZE(rtl_regs))
+ return -EINVAL;
+
+ if ((attr->max > 0) && (val->value.i > attr->max))
+ return -EINVAL;
+
+ /* access to phy register 22 on port 4/5
+ * needs phy status save/restore */
+ if ((val->port_vlan > 3) &&
+ (rtl_regs[idx].reg == 22) &&
+ (rtl_regs[idx].page == 0)) {
+
+ rtl_phy_save(dev, val->port_vlan, &port);
+ rtl_set(dev, idx, val->value.i);
+ rtl_phy_restore(dev, val->port_vlan, &port);
+ } else {
+ rtl_set(dev, idx, val->value.i);
+ }
+
+ return 0;
+}
+
+static int
+rtl_attr_get_int(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ int idx = attr->id + (val->port_vlan * attr->ofs);
+
+ if (idx >= ARRAY_SIZE(rtl_regs))
+ return -EINVAL;
+
+ val->value.i = rtl_get(dev, idx);
+ return 0;
+}
+
+static int
+rtl_attr_set_port_int(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ if (val->port_vlan >= RTL8306_NUM_PORTS)
+ return -EINVAL;
+
+ return rtl_attr_set_int(dev, attr, val);
+}
+
+static int
+rtl_attr_get_port_int(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ if (val->port_vlan >= RTL8306_NUM_PORTS)
+ return -EINVAL;
+ return rtl_attr_get_int(dev, attr, val);
+}
+
+static int
+rtl_attr_set_vlan_int(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ if (val->port_vlan >= dev->vlans)
+ return -EINVAL;
+
+ return rtl_attr_set_int(dev, attr, val);
+}
+
+static int
+rtl_attr_get_vlan_int(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ if (val->port_vlan >= dev->vlans)
+ return -EINVAL;
+
+ return rtl_attr_get_int(dev, attr, val);
+}
+
+static int
+rtl_get_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ unsigned int i, mask;
+
+ mask = rtl_get(dev, RTL_VLAN_REG(val->port_vlan, PORTMASK));
+ for (i = 0; i < RTL8306_NUM_PORTS; i++) {
+ struct switch_port *port;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ port = &val->value.ports[val->len];
+ port->id = i;
+ port->flags = 0;
+ val->len++;
+ }
+
+ return 0;
+}
+
+static int
+rtl_set_vlan(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct rtl_priv *priv = to_rtl(dev);
+ struct rtl_phyregs port;
+ int en = val->value.i;
+ int i;
+
+ rtl_set(dev, RTL_REG_EN_TAG_OUT, en && priv->do_cpu);
+ rtl_set(dev, RTL_REG_EN_TAG_IN, en && priv->do_cpu);
+ rtl_set(dev, RTL_REG_EN_TAG_CLR, en && priv->do_cpu);
+ rtl_set(dev, RTL_REG_VLAN_TAG_AWARE, en);
+ if (en)
+ rtl_set(dev, RTL_REG_VLAN_FILTER, en);
+
+ for (i = 0; i < RTL8306_NUM_PORTS; i++) {
+ if (i > 3)
+ rtl_phy_save(dev, val->port_vlan, &port);
+ rtl_set(dev, RTL_PORT_REG(i, NULL_VID_REPLACE), 1);
+ rtl_set(dev, RTL_PORT_REG(i, VID_INSERT), (en ? (i == dev->cpu_port ? 0 : 1) : 1));
+ rtl_set(dev, RTL_PORT_REG(i, TAG_INSERT), (en ? (i == dev->cpu_port ? 2 : 1) : 3));
+ if (i > 3)
+ rtl_phy_restore(dev, val->port_vlan, &port);
+ }
+ rtl_set(dev, RTL_REG_VLAN_ENABLE, en);
+
+ return 0;
+}
+
+static int
+rtl_get_vlan(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ return rtl_get(dev, RTL_REG_VLAN_ENABLE);
+}
+
+static int
+rtl_set_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ unsigned int mask = 0;
+ unsigned int oldmask;
+ int i;
+
+ for(i = 0; i < val->len; i++)
+ {
+ struct switch_port *port = &val->value.ports[i];
+ bool tagged = false;
+
+ mask |= (1 << port->id);
+
+ if (port->id == dev->cpu_port)
+ continue;
+
+ if ((i == dev->cpu_port) ||
+ (port->flags & (1 << SWITCH_PORT_FLAG_TAGGED)))
+ tagged = true;
+
+ /* fix up PVIDs for added ports */
+ if (!tagged)
+ rtl_set(dev, RTL_PORT_REG(port->id, PVID), val->port_vlan);
+
+ rtl_set(dev, RTL_PORT_REG(port->id, NON_PVID_DISCARD), (tagged ? 0 : 1));
+ rtl_set(dev, RTL_PORT_REG(port->id, VID_INSERT), (tagged ? 0 : 1));
+ rtl_set(dev, RTL_PORT_REG(port->id, TAG_INSERT), (tagged ? 2 : 1));
+ }
+
+ oldmask = rtl_get(dev, RTL_VLAN_REG(val->port_vlan, PORTMASK));
+ rtl_set(dev, RTL_VLAN_REG(val->port_vlan, PORTMASK), mask);
+
+ /* fix up PVIDs for removed ports, default to last vlan */
+ oldmask &= ~mask;
+ for (i = 0; i < RTL8306_NUM_PORTS; i++) {
+ if (!(oldmask & (1 << i)))
+ continue;
+
+ if (i == dev->cpu_port)
+ continue;
+
+ if (rtl_get(dev, RTL_PORT_REG(i, PVID)) == val->port_vlan)
+ rtl_set(dev, RTL_PORT_REG(i, PVID), dev->vlans - 1);
+ }
+
+ return 0;
+}
+
+static struct switch_attr rtl_globals[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "reset",
+ .description = "Reset the switch",
+ .set = rtl_reset,
+ },
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "Enable VLAN mode",
+ .max = 1,
+ .set = rtl_set_vlan,
+ .get = rtl_get_vlan,
+ },
+ {
+ RTL_GLOBAL_REGATTR(EN_TRUNK),
+ .name = "trunk",
+ .description = "Enable port trunking",
+ .max = 1,
+ },
+ {
+ RTL_GLOBAL_REGATTR(TRUNK_PORTSEL),
+ .name = "trunk_sel",
+ .description = "Select ports for trunking (0: 0,1 - 1: 3,4)",
+ .max = 1,
+ },
+#ifdef DEBUG
+ {
+ RTL_GLOBAL_REGATTR(VLAN_FILTER),
+ .name = "vlan_filter",
+ .description = "Filter incoming packets for allowed VLANS",
+ .max = 1,
+ },
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "cpuport",
+ .description = "CPU Port",
+ .set = rtl_set_cpuport,
+ .get = rtl_get_cpuport,
+ .max = RTL8306_NUM_PORTS,
+ },
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "use_cpuport",
+ .description = "CPU Port handling flag",
+ .set = rtl_set_use_cpuport,
+ .get = rtl_get_use_cpuport,
+ .max = RTL8306_NUM_PORTS,
+ },
+ {
+ RTL_GLOBAL_REGATTR(TRAP_CPU),
+ .name = "trap_cpu",
+ .description = "VLAN trap to CPU",
+ .max = 1,
+ },
+ {
+ RTL_GLOBAL_REGATTR(VLAN_TAG_AWARE),
+ .name = "vlan_tag_aware",
+ .description = "Enable VLAN tag awareness",
+ .max = 1,
+ },
+ {
+ RTL_GLOBAL_REGATTR(VLAN_TAG_ONLY),
+ .name = "tag_only",
+ .description = "Only accept tagged packets",
+ .max = 1,
+ },
+#endif
+};
+static struct switch_attr rtl_port[] = {
+ {
+ RTL_PORT_REGATTR(PVID),
+ .name = "pvid",
+ .description = "Port VLAN ID",
+ .max = RTL8306_NUM_VLANS - 1,
+ },
+ {
+ RTL_PORT_REGATTR(LINK),
+ .name = "link",
+ .description = "get the current link state",
+ .max = 1,
+ .set = NULL,
+ },
+#ifdef DEBUG
+ {
+ RTL_PORT_REGATTR(NULL_VID_REPLACE),
+ .name = "null_vid",
+ .description = "NULL VID gets replaced by port default vid",
+ .max = 1,
+ },
+ {
+ RTL_PORT_REGATTR(NON_PVID_DISCARD),
+ .name = "non_pvid_discard",
+ .description = "discard packets with VID != PVID",
+ .max = 1,
+ },
+ {
+ RTL_PORT_REGATTR(VID_INSERT),
+ .name = "vid_insert_remove",
+ .description = "how should the switch insert and remove vids ?",
+ .max = 3,
+ },
+ {
+ RTL_PORT_REGATTR(TAG_INSERT),
+ .name = "tag_insert",
+ .description = "tag insertion handling",
+ .max = 3,
+ },
+#endif
+ {
+ RTL_PORT_REGATTR(SPEED),
+ .name = "speed",
+ .description = "current link speed",
+ .max = 1,
+ },
+ {
+ RTL_PORT_REGATTR(NWAY),
+ .name = "nway",
+ .description = "enable autonegotiation",
+ .max = 1,
+ },
+};
+
+static struct switch_attr rtl_vlan[] = {
+ {
+ RTL_VLAN_REGATTR(VID),
+ .name = "vid",
+ .description = "VLAN ID (1-4095)",
+ .max = 4095,
+ },
+};
+
+static const struct switch_dev_ops rtl8306_ops = {
+ .attr_global = {
+ .attr = rtl_globals,
+ .n_attr = ARRAY_SIZE(rtl_globals),
+ },
+ .attr_port = {
+ .attr = rtl_port,
+ .n_attr = ARRAY_SIZE(rtl_port),
+ },
+ .attr_vlan = {
+ .attr = rtl_vlan,
+ .n_attr = ARRAY_SIZE(rtl_vlan),
+ },
+
+ .get_vlan_ports = rtl_get_ports,
+ .set_vlan_ports = rtl_set_ports,
+ .apply_config = rtl_hw_apply,
+};
+
+static int
+rtl8306_config_init(struct phy_device *pdev)
+{
+ struct net_device *netdev = pdev->attached_dev;
+ struct rtl_priv *priv = pdev->priv;
+ struct switch_dev *dev = &priv->dev;
+ struct switch_val val;
+ unsigned int chipid, chipver, chiptype;
+ int err;
+
+ /* Only init the switch for the primary PHY */
+ if (pdev->addr != 0)
+ return 0;
+
+ val.value.i = 1;
+ priv->dev.cpu_port = RTL8306_PORT_CPU;
+ priv->dev.ports = RTL8306_NUM_PORTS;
+ priv->dev.vlans = RTL8306_NUM_VLANS;
+ priv->dev.ops = &rtl8306_ops;
+ priv->do_cpu = 0;
+ priv->page = -1;
+ priv->bus = pdev->bus;
+
+ chipid = rtl_get(dev, RTL_REG_CHIPID);
+ chipver = rtl_get(dev, RTL_REG_CHIPVER);
+ chiptype = rtl_get(dev, RTL_REG_CHIPTYPE);
+ switch(chiptype) {
+ case 0:
+ case 2:
+ strncpy(priv->hwname, RTL_NAME_S, sizeof(priv->hwname));
+ priv->type = RTL_TYPE_S;
+ break;
+ case 1:
+ strncpy(priv->hwname, RTL_NAME_SD, sizeof(priv->hwname));
+ priv->type = RTL_TYPE_SD;
+ break;
+ case 3:
+ strncpy(priv->hwname, RTL_NAME_SDM, sizeof(priv->hwname));
+ priv->type = RTL_TYPE_SDM;
+ break;
+ default:
+ strncpy(priv->hwname, RTL_NAME_UNKNOWN, sizeof(priv->hwname));
+ break;
+ }
+
+ dev->name = priv->hwname;
+ rtl_hw_init(dev);
+
+ printk(KERN_INFO "Registering %s switch with Chip ID: 0x%04x, version: 0x%04x\n", priv->hwname, chipid, chipver);
+
+ err = register_switch(dev, netdev);
+ if (err < 0) {
+ kfree(priv);
+ return err;
+ }
+
+ return 0;
+}
+
+
+static int
+rtl8306_fixup(struct phy_device *pdev)
+{
+ struct rtl_priv priv;
+ u16 chipid;
+
+ /* Attach to primary LAN port and WAN port */
+ if (pdev->addr != 0 && pdev->addr != 4)
+ return 0;
+
+ memset(&priv, 0, sizeof(priv));
+ priv.fixup = true;
+ priv.page = -1;
+ priv.bus = pdev->bus;
+ chipid = rtl_get(&priv.dev, RTL_REG_CHIPID);
+ if (chipid == 0x5988)
+ pdev->phy_id = RTL8306_MAGIC;
+
+ return 0;
+}
+
+static int
+rtl8306_probe(struct phy_device *pdev)
+{
+ struct rtl_priv *priv;
+
+ list_for_each_entry(priv, &phydevs, list) {
+ /*
+ * share one rtl_priv instance between virtual phy
+ * devices on the same bus
+ */
+ if (priv->bus == pdev->bus)
+ goto found;
+ }
+ priv = kzalloc(sizeof(struct rtl_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = pdev->bus;
+
+found:
+ pdev->priv = priv;
+ return 0;
+}
+
+static void
+rtl8306_remove(struct phy_device *pdev)
+{
+ struct rtl_priv *priv = pdev->priv;
+ unregister_switch(&priv->dev);
+ kfree(priv);
+}
+
+static int
+rtl8306_config_aneg(struct phy_device *pdev)
+{
+ struct rtl_priv *priv = pdev->priv;
+
+ /* Only for WAN */
+ if (pdev->addr == 0)
+ return 0;
+
+ /* Restart autonegotiation */
+ rtl_set(&priv->dev, RTL_PORT_REG(4, NWAY), 1);
+ rtl_set(&priv->dev, RTL_PORT_REG(4, NRESTART), 1);
+
+ return 0;
+}
+
+static int
+rtl8306_read_status(struct phy_device *pdev)
+{
+ struct rtl_priv *priv = pdev->priv;
+ struct switch_dev *dev = &priv->dev;
+
+ if (pdev->addr == 4) {
+ /* WAN */
+ pdev->speed = rtl_get(dev, RTL_PORT_REG(4, SPEED)) ? SPEED_100 : SPEED_10;
+ pdev->duplex = rtl_get(dev, RTL_PORT_REG(4, DUPLEX)) ? DUPLEX_FULL : DUPLEX_HALF;
+ pdev->link = !!rtl_get(dev, RTL_PORT_REG(4, LINK));
+ } else {
+ /* LAN */
+ pdev->speed = SPEED_100;
+ pdev->duplex = DUPLEX_FULL;
+ pdev->link = 1;
+ }
+
+ /*
+ * Bypass generic PHY status read,
+ * it doesn't work with this switch
+ */
+ if (pdev->link) {
+ pdev->state = PHY_RUNNING;
+ netif_carrier_on(pdev->attached_dev);
+ pdev->adjust_link(pdev->attached_dev);
+ } else {
+ pdev->state = PHY_NOLINK;
+ netif_carrier_off(pdev->attached_dev);
+ pdev->adjust_link(pdev->attached_dev);
+ }
+
+ return 0;
+}
+
+
+static struct phy_driver rtl8306_driver = {
+ .name = "Realtek RTL8306S",
+ .flags = PHY_HAS_MAGICANEG,
+ .phy_id = RTL8306_MAGIC,
+ .phy_id_mask = 0xffffffff,
+ .features = PHY_BASIC_FEATURES,
+ .probe = &rtl8306_probe,
+ .remove = &rtl8306_remove,
+ .config_init = &rtl8306_config_init,
+ .config_aneg = &rtl8306_config_aneg,
+ .read_status = &rtl8306_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+
+static int __init
+rtl_init(void)
+{
+ phy_register_fixup_for_id(PHY_ANY_ID, rtl8306_fixup);
+ return phy_driver_register(&rtl8306_driver);
+}
+
+static void __exit
+rtl_exit(void)
+{
+ phy_driver_unregister(&rtl8306_driver);
+}
+
+module_init(rtl_init);
+module_exit(rtl_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/target/linux/generic/files/drivers/net/phy/rtl8366_smi.c b/target/linux/generic/files/drivers/net/phy/rtl8366_smi.c
new file mode 100644
index 000000000..be85274e7
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/rtl8366_smi.c
@@ -0,0 +1,1375 @@
+/*
+ * Realtek RTL8366 SMI interface driver
+ *
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/rtl8366.h>
+
+#ifdef CONFIG_RTL8366_SMI_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+
+#include "rtl8366_smi.h"
+
+#define RTL8366_SMI_ACK_RETRY_COUNT 5
+
+#define RTL8366_SMI_HW_STOP_DELAY 25 /* msecs */
+#define RTL8366_SMI_HW_START_DELAY 100 /* msecs */
+
+static inline void rtl8366_smi_clk_delay(struct rtl8366_smi *smi)
+{
+ ndelay(smi->clk_delay);
+}
+
+static void rtl8366_smi_start(struct rtl8366_smi *smi)
+{
+ unsigned int sda = smi->gpio_sda;
+ unsigned int sck = smi->gpio_sck;
+
+ /*
+ * Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpio_direction_output(sck, 0);
+ gpio_direction_output(sda, 1);
+ rtl8366_smi_clk_delay(smi);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpio_set_value(sck, 1);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sck, 0);
+ rtl8366_smi_clk_delay(smi);
+
+ /* CLK 2: */
+ gpio_set_value(sck, 1);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sda, 0);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sck, 0);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sda, 1);
+}
+
+static void rtl8366_smi_stop(struct rtl8366_smi *smi)
+{
+ unsigned int sda = smi->gpio_sda;
+ unsigned int sck = smi->gpio_sck;
+
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sda, 0);
+ gpio_set_value(sck, 1);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sda, 1);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sck, 1);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sck, 0);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sck, 1);
+
+ /* add a click */
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sck, 0);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sck, 1);
+
+ /* set GPIO pins to input mode */
+ gpio_direction_input(sda);
+ gpio_direction_input(sck);
+}
+
+static void rtl8366_smi_write_bits(struct rtl8366_smi *smi, u32 data, u32 len)
+{
+ unsigned int sda = smi->gpio_sda;
+ unsigned int sck = smi->gpio_sck;
+
+ for (; len > 0; len--) {
+ rtl8366_smi_clk_delay(smi);
+
+ /* prepare data */
+ gpio_set_value(sda, !!(data & ( 1 << (len - 1))));
+ rtl8366_smi_clk_delay(smi);
+
+ /* clocking */
+ gpio_set_value(sck, 1);
+ rtl8366_smi_clk_delay(smi);
+ gpio_set_value(sck, 0);
+ }
+}
+
+static void rtl8366_smi_read_bits(struct rtl8366_smi *smi, u32 len, u32 *data)
+{
+ unsigned int sda = smi->gpio_sda;
+ unsigned int sck = smi->gpio_sck;
+
+ gpio_direction_input(sda);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ rtl8366_smi_clk_delay(smi);
+
+ /* clocking */
+ gpio_set_value(sck, 1);
+ rtl8366_smi_clk_delay(smi);
+ u = !!gpio_get_value(sda);
+ gpio_set_value(sck, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpio_direction_output(sda, 0);
+}
+
+static int rtl8366_smi_wait_for_ack(struct rtl8366_smi *smi)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ rtl8366_smi_read_bits(smi, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > RTL8366_SMI_ACK_RETRY_COUNT) {
+ dev_err(smi->parent, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int rtl8366_smi_write_byte(struct rtl8366_smi *smi, u8 data)
+{
+ rtl8366_smi_write_bits(smi, data, 8);
+ return rtl8366_smi_wait_for_ack(smi);
+}
+
+static int rtl8366_smi_write_byte_noack(struct rtl8366_smi *smi, u8 data)
+{
+ rtl8366_smi_write_bits(smi, data, 8);
+ return 0;
+}
+
+static int rtl8366_smi_read_byte0(struct rtl8366_smi *smi, u8 *data)
+{
+ u32 t;
+
+ /* read data */
+ rtl8366_smi_read_bits(smi, 8, &t);
+ *data = (t & 0xff);
+
+ /* send an ACK */
+ rtl8366_smi_write_bits(smi, 0x00, 1);
+
+ return 0;
+}
+
+static int rtl8366_smi_read_byte1(struct rtl8366_smi *smi, u8 *data)
+{
+ u32 t;
+
+ /* read data */
+ rtl8366_smi_read_bits(smi, 8, &t);
+ *data = (t & 0xff);
+
+ /* send an ACK */
+ rtl8366_smi_write_bits(smi, 0x01, 1);
+
+ return 0;
+}
+
+int rtl8366_smi_read_reg(struct rtl8366_smi *smi, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&smi->lock, flags);
+
+ rtl8366_smi_start(smi);
+
+ /* send READ command */
+ ret = rtl8366_smi_write_byte(smi, smi->cmd_read);
+ if (ret)
+ goto out;
+
+ /* set ADDR[7:0] */
+ ret = rtl8366_smi_write_byte(smi, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* set ADDR[15:8] */
+ ret = rtl8366_smi_write_byte(smi, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* read DATA[7:0] */
+ rtl8366_smi_read_byte0(smi, &lo);
+ /* read DATA[15:8] */
+ rtl8366_smi_read_byte1(smi, &hi);
+
+ *data = ((u32) lo) | (((u32) hi) << 8);
+
+ ret = 0;
+
+ out:
+ rtl8366_smi_stop(smi);
+ spin_unlock_irqrestore(&smi->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_smi_read_reg);
+
+static int __rtl8366_smi_write_reg(struct rtl8366_smi *smi,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&smi->lock, flags);
+
+ rtl8366_smi_start(smi);
+
+ /* send WRITE command */
+ ret = rtl8366_smi_write_byte(smi, smi->cmd_write);
+ if (ret)
+ goto out;
+
+ /* set ADDR[7:0] */
+ ret = rtl8366_smi_write_byte(smi, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* set ADDR[15:8] */
+ ret = rtl8366_smi_write_byte(smi, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* write DATA[7:0] */
+ ret = rtl8366_smi_write_byte(smi, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* write DATA[15:8] */
+ if (ack)
+ ret = rtl8366_smi_write_byte(smi, data >> 8);
+ else
+ ret = rtl8366_smi_write_byte_noack(smi, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ rtl8366_smi_stop(smi);
+ spin_unlock_irqrestore(&smi->lock, flags);
+
+ return ret;
+}
+
+int rtl8366_smi_write_reg(struct rtl8366_smi *smi, u32 addr, u32 data)
+{
+ return __rtl8366_smi_write_reg(smi, addr, data, true);
+}
+EXPORT_SYMBOL_GPL(rtl8366_smi_write_reg);
+
+int rtl8366_smi_write_reg_noack(struct rtl8366_smi *smi, u32 addr, u32 data)
+{
+ return __rtl8366_smi_write_reg(smi, addr, data, false);
+}
+EXPORT_SYMBOL_GPL(rtl8366_smi_write_reg_noack);
+
+int rtl8366_smi_rmwr(struct rtl8366_smi *smi, u32 addr, u32 mask, u32 data)
+{
+ u32 t;
+ int err;
+
+ err = rtl8366_smi_read_reg(smi, addr, &t);
+ if (err)
+ return err;
+
+ err = rtl8366_smi_write_reg(smi, addr, (t & ~mask) | data);
+ return err;
+
+}
+EXPORT_SYMBOL_GPL(rtl8366_smi_rmwr);
+
+static int rtl8366_reset(struct rtl8366_smi *smi)
+{
+ if (smi->hw_reset) {
+ smi->hw_reset(true);
+ msleep(RTL8366_SMI_HW_STOP_DELAY);
+ smi->hw_reset(false);
+ msleep(RTL8366_SMI_HW_START_DELAY);
+ return 0;
+ }
+
+ return smi->ops->reset_chip(smi);
+}
+
+static int rtl8366_mc_is_used(struct rtl8366_smi *smi, int mc_index, int *used)
+{
+ int err;
+ int i;
+
+ *used = 0;
+ for (i = 0; i < smi->num_ports; i++) {
+ int index = 0;
+
+ err = smi->ops->get_mc_index(smi, i, &index);
+ if (err)
+ return err;
+
+ if (mc_index == index) {
+ *used = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int rtl8366_set_vlan(struct rtl8366_smi *smi, int vid, u32 member,
+ u32 untag, u32 fid)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ int err;
+ int i;
+
+ /* Update the 4K table */
+ err = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (err)
+ return err;
+
+ vlan4k.member = member;
+ vlan4k.untag = untag;
+ vlan4k.fid = fid;
+ err = smi->ops->set_vlan_4k(smi, &vlan4k);
+ if (err)
+ return err;
+
+ /* Try to find an existing MC entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
+
+ err = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (err)
+ return err;
+
+ if (vid == vlanmc.vid) {
+ /* update the MC entry */
+ vlanmc.member = member;
+ vlanmc.untag = untag;
+ vlanmc.fid = fid;
+
+ err = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int rtl8366_get_pvid(struct rtl8366_smi *smi, int port, int *val)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int err;
+ int index;
+
+ err = smi->ops->get_mc_index(smi, port, &index);
+ if (err)
+ return err;
+
+ err = smi->ops->get_vlan_mc(smi, index, &vlanmc);
+ if (err)
+ return err;
+
+ *val = vlanmc.vid;
+ return 0;
+}
+
+static int rtl8366_set_pvid(struct rtl8366_smi *smi, unsigned port,
+ unsigned vid)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ struct rtl8366_vlan_4k vlan4k;
+ int err;
+ int i;
+
+ /* Try to find an existing MC entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ err = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (err)
+ return err;
+
+ if (vid == vlanmc.vid) {
+ err = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (err)
+ return err;
+
+ err = smi->ops->set_mc_index(smi, port, i);
+ return err;
+ }
+ }
+
+ /* We have no MC entry for this VID, try to find an empty one */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ err = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (err)
+ return err;
+
+ if (vlanmc.vid == 0 && vlanmc.member == 0) {
+ /* Update the entry from the 4K table */
+ err = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (err)
+ return err;
+
+ vlanmc.vid = vid;
+ vlanmc.member = vlan4k.member;
+ vlanmc.untag = vlan4k.untag;
+ vlanmc.fid = vlan4k.fid;
+ err = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (err)
+ return err;
+
+ err = smi->ops->set_mc_index(smi, port, i);
+ return err;
+ }
+ }
+
+ /* MC table is full, try to find an unused entry and replace it */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ int used;
+
+ err = rtl8366_mc_is_used(smi, i, &used);
+ if (err)
+ return err;
+
+ if (!used) {
+ /* Update the entry from the 4K table */
+ err = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (err)
+ return err;
+
+ vlanmc.vid = vid;
+ vlanmc.member = vlan4k.member;
+ vlanmc.untag = vlan4k.untag;
+ vlanmc.fid = vlan4k.fid;
+ err = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (err)
+ return err;
+
+ err = smi->ops->set_mc_index(smi, port, i);
+ return err;
+ }
+ }
+
+ dev_err(smi->parent,
+ "all VLAN member configurations are in use\n");
+
+ return -ENOSPC;
+}
+
+int rtl8366_enable_vlan(struct rtl8366_smi *smi, int enable)
+{
+ int err;
+
+ err = smi->ops->enable_vlan(smi, enable);
+ if (err)
+ return err;
+
+ smi->vlan_enabled = enable;
+
+ if (!enable) {
+ smi->vlan4k_enabled = 0;
+ err = smi->ops->enable_vlan4k(smi, enable);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+
+static int rtl8366_enable_vlan4k(struct rtl8366_smi *smi, int enable)
+{
+ int err;
+
+ if (enable) {
+ err = smi->ops->enable_vlan(smi, enable);
+ if (err)
+ return err;
+
+ smi->vlan_enabled = enable;
+ }
+
+ err = smi->ops->enable_vlan4k(smi, enable);
+ if (err)
+ return err;
+
+ smi->vlan4k_enabled = enable;
+ return 0;
+}
+
+int rtl8366_enable_all_ports(struct rtl8366_smi *smi, int enable)
+{
+ int port;
+ int err;
+
+ for (port = 0; port < smi->num_ports; port++) {
+ err = smi->ops->enable_port(smi, port, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_all_ports);
+
+int rtl8366_reset_vlan(struct rtl8366_smi *smi)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int err;
+ int i;
+
+ rtl8366_enable_vlan(smi, 0);
+ rtl8366_enable_vlan4k(smi, 0);
+
+ /* clear VLAN member configurations */
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.member = 0;
+ vlanmc.untag = 0;
+ vlanmc.fid = 0;
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ err = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+
+static int rtl8366_init_vlan(struct rtl8366_smi *smi)
+{
+ int port;
+ int err;
+
+ err = rtl8366_reset_vlan(smi);
+ if (err)
+ return err;
+
+ for (port = 0; port < smi->num_ports; port++) {
+ u32 mask;
+
+ if (port == smi->cpu_port)
+ mask = (1 << smi->num_ports) - 1;
+ else
+ mask = (1 << port) | (1 << smi->cpu_port);
+
+ err = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
+ if (err)
+ return err;
+
+ err = rtl8366_set_pvid(smi, port, (port + 1));
+ if (err)
+ return err;
+ }
+
+ return rtl8366_enable_vlan(smi, 1);
+}
+
+#ifdef CONFIG_RTL8366_SMI_DEBUG_FS
+int rtl8366_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_debugfs_open);
+
+static ssize_t rtl8366_read_debugfs_vlan_mc(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtl8366_smi *smi = (struct rtl8366_smi *)file->private_data;
+ int i, len = 0;
+ char *buf = smi->buf;
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%2s %6s %4s %6s %6s %3s\n",
+ "id", "vid","prio", "member", "untag", "fid");
+
+ for (i = 0; i < smi->num_vlan_mc; ++i) {
+ struct rtl8366_vlan_mc vlanmc;
+
+ smi->ops->get_vlan_mc(smi, i, &vlanmc);
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%2d %6d %4d 0x%04x 0x%04x %3d\n",
+ i, vlanmc.vid, vlanmc.priority,
+ vlanmc.member, vlanmc.untag, vlanmc.fid);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+#define RTL8366_VLAN4K_PAGE_SIZE 64
+#define RTL8366_VLAN4K_NUM_PAGES (4096 / RTL8366_VLAN4K_PAGE_SIZE)
+
+static ssize_t rtl8366_read_debugfs_vlan_4k(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtl8366_smi *smi = (struct rtl8366_smi *)file->private_data;
+ int i, len = 0;
+ int offset;
+ char *buf = smi->buf;
+
+ if (smi->dbg_vlan_4k_page >= RTL8366_VLAN4K_NUM_PAGES) {
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "invalid page: %u\n", smi->dbg_vlan_4k_page);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ }
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%4s %6s %6s %3s\n",
+ "vid", "member", "untag", "fid");
+
+ offset = RTL8366_VLAN4K_PAGE_SIZE * smi->dbg_vlan_4k_page;
+ for (i = 0; i < RTL8366_VLAN4K_PAGE_SIZE; i++) {
+ struct rtl8366_vlan_4k vlan4k;
+
+ smi->ops->get_vlan_4k(smi, offset + i, &vlan4k);
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%4d 0x%04x 0x%04x %3d\n",
+ vlan4k.vid, vlan4k.member,
+ vlan4k.untag, vlan4k.fid);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t rtl8366_read_debugfs_pvid(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtl8366_smi *smi = (struct rtl8366_smi *)file->private_data;
+ char *buf = smi->buf;
+ int len = 0;
+ int i;
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len, "%4s %4s\n",
+ "port", "pvid");
+
+ for (i = 0; i < smi->num_ports; i++) {
+ int pvid;
+ int err;
+
+ err = rtl8366_get_pvid(smi, i, &pvid);
+ if (err)
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%4d error\n", i);
+ else
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%4d %4d\n", i, pvid);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t rtl8366_read_debugfs_reg(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtl8366_smi *smi = (struct rtl8366_smi *)file->private_data;
+ u32 t, reg = smi->dbg_reg;
+ int err, len = 0;
+ char *buf = smi->buf;
+
+ memset(buf, '\0', sizeof(smi->buf));
+
+ err = rtl8366_smi_read_reg(smi, reg, &t);
+ if (err) {
+ len += snprintf(buf, sizeof(smi->buf),
+ "Read failed (reg: 0x%04x)\n", reg);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ }
+
+ len += snprintf(buf, sizeof(smi->buf), "reg = 0x%04x, val = 0x%04x\n",
+ reg, t);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t rtl8366_write_debugfs_reg(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtl8366_smi *smi = (struct rtl8366_smi *)file->private_data;
+ unsigned long data;
+ u32 reg = smi->dbg_reg;
+ int err;
+ size_t len;
+ char *buf = smi->buf;
+
+ len = min(count, sizeof(smi->buf) - 1);
+ if (copy_from_user(buf, user_buf, len)) {
+ dev_err(smi->parent, "copy from user failed\n");
+ return -EFAULT;
+ }
+
+ buf[len] = '\0';
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+
+ if (strict_strtoul(buf, 16, &data)) {
+ dev_err(smi->parent, "Invalid reg value %s\n", buf);
+ } else {
+ err = rtl8366_smi_write_reg(smi, reg, data);
+ if (err) {
+ dev_err(smi->parent,
+ "writing reg 0x%04x val 0x%04lx failed\n",
+ reg, data);
+ }
+ }
+
+ return count;
+}
+
+static ssize_t rtl8366_read_debugfs_mibs(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtl8366_smi *smi = file->private_data;
+ int i, j, len = 0;
+ char *buf = smi->buf;
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len, "%-36s",
+ "Counter");
+
+ for (i = 0; i < smi->num_ports; i++) {
+ char port_buf[10];
+
+ snprintf(port_buf, sizeof(port_buf), "Port %d", i);
+ len += snprintf(buf + len, sizeof(smi->buf) - len, " %12s",
+ port_buf);
+ }
+ len += snprintf(buf + len, sizeof(smi->buf) - len, "\n");
+
+ for (i = 0; i < smi->num_mib_counters; i++) {
+ len += snprintf(buf + len, sizeof(smi->buf) - len, "%-36s ",
+ smi->mib_counters[i].name);
+ for (j = 0; j < smi->num_ports; j++) {
+ unsigned long long counter = 0;
+
+ if (!smi->ops->get_mib_counter(smi, i, j, &counter))
+ len += snprintf(buf + len,
+ sizeof(smi->buf) - len,
+ "%12llu ", counter);
+ else
+ len += snprintf(buf + len,
+ sizeof(smi->buf) - len,
+ "%12s ", "error");
+ }
+ len += snprintf(buf + len, sizeof(smi->buf) - len, "\n");
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_rtl8366_regs = {
+ .read = rtl8366_read_debugfs_reg,
+ .write = rtl8366_write_debugfs_reg,
+ .open = rtl8366_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations fops_rtl8366_vlan_mc = {
+ .read = rtl8366_read_debugfs_vlan_mc,
+ .open = rtl8366_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations fops_rtl8366_vlan_4k = {
+ .read = rtl8366_read_debugfs_vlan_4k,
+ .open = rtl8366_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations fops_rtl8366_pvid = {
+ .read = rtl8366_read_debugfs_pvid,
+ .open = rtl8366_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations fops_rtl8366_mibs = {
+ .read = rtl8366_read_debugfs_mibs,
+ .open = rtl8366_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static void rtl8366_debugfs_init(struct rtl8366_smi *smi)
+{
+ struct dentry *node;
+ struct dentry *root;
+
+ if (!smi->debugfs_root)
+ smi->debugfs_root = debugfs_create_dir(dev_name(smi->parent),
+ NULL);
+
+ if (!smi->debugfs_root) {
+ dev_err(smi->parent, "Unable to create debugfs dir\n");
+ return;
+ }
+ root = smi->debugfs_root;
+
+ node = debugfs_create_x16("reg", S_IRUGO | S_IWUSR, root,
+ &smi->dbg_reg);
+ if (!node) {
+ dev_err(smi->parent, "Creating debugfs file '%s' failed\n",
+ "reg");
+ return;
+ }
+
+ node = debugfs_create_file("val", S_IRUGO | S_IWUSR, root, smi,
+ &fops_rtl8366_regs);
+ if (!node) {
+ dev_err(smi->parent, "Creating debugfs file '%s' failed\n",
+ "val");
+ return;
+ }
+
+ node = debugfs_create_file("vlan_mc", S_IRUSR, root, smi,
+ &fops_rtl8366_vlan_mc);
+ if (!node) {
+ dev_err(smi->parent, "Creating debugfs file '%s' failed\n",
+ "vlan_mc");
+ return;
+ }
+
+ node = debugfs_create_u8("vlan_4k_page", S_IRUGO | S_IWUSR, root,
+ &smi->dbg_vlan_4k_page);
+ if (!node) {
+ dev_err(smi->parent, "Creating debugfs file '%s' failed\n",
+ "vlan_4k_page");
+ return;
+ }
+
+ node = debugfs_create_file("vlan_4k", S_IRUSR, root, smi,
+ &fops_rtl8366_vlan_4k);
+ if (!node) {
+ dev_err(smi->parent, "Creating debugfs file '%s' failed\n",
+ "vlan_4k");
+ return;
+ }
+
+ node = debugfs_create_file("pvid", S_IRUSR, root, smi,
+ &fops_rtl8366_pvid);
+ if (!node) {
+ dev_err(smi->parent, "Creating debugfs file '%s' failed\n",
+ "pvid");
+ return;
+ }
+
+ node = debugfs_create_file("mibs", S_IRUSR, smi->debugfs_root, smi,
+ &fops_rtl8366_mibs);
+ if (!node)
+ dev_err(smi->parent, "Creating debugfs file '%s' failed\n",
+ "mibs");
+}
+
+static void rtl8366_debugfs_remove(struct rtl8366_smi *smi)
+{
+ if (smi->debugfs_root) {
+ debugfs_remove_recursive(smi->debugfs_root);
+ smi->debugfs_root = NULL;
+ }
+}
+#else
+static inline void rtl8366_debugfs_init(struct rtl8366_smi *smi) {}
+static inline void rtl8366_debugfs_remove(struct rtl8366_smi *smi) {}
+#endif /* CONFIG_RTL8366_SMI_DEBUG_FS */
+
+static int rtl8366_smi_mii_init(struct rtl8366_smi *smi)
+{
+ int ret;
+ int i;
+
+ smi->mii_bus = mdiobus_alloc();
+ if (smi->mii_bus == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ smi->mii_bus->priv = (void *) smi;
+ smi->mii_bus->name = dev_name(smi->parent);
+ smi->mii_bus->read = smi->ops->mii_read;
+ smi->mii_bus->write = smi->ops->mii_write;
+ snprintf(smi->mii_bus->id, MII_BUS_ID_SIZE, "%s",
+ dev_name(smi->parent));
+ smi->mii_bus->parent = smi->parent;
+ smi->mii_bus->phy_mask = ~(0x1f);
+ smi->mii_bus->irq = smi->mii_irq;
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ smi->mii_irq[i] = PHY_POLL;
+
+ ret = mdiobus_register(smi->mii_bus);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+ err_free:
+ mdiobus_free(smi->mii_bus);
+ err:
+ return ret;
+}
+
+static void rtl8366_smi_mii_cleanup(struct rtl8366_smi *smi)
+{
+ mdiobus_unregister(smi->mii_bus);
+ mdiobus_free(smi->mii_bus);
+}
+
+int rtl8366_sw_reset_switch(struct switch_dev *dev)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ int err;
+
+ err = rtl8366_reset(smi);
+ if (err)
+ return err;
+
+ err = smi->ops->setup(smi);
+ if (err)
+ return err;
+
+ err = rtl8366_reset_vlan(smi);
+ if (err)
+ return err;
+
+ err = rtl8366_enable_vlan(smi, 1);
+ if (err)
+ return err;
+
+ return rtl8366_enable_all_ports(smi, 1);
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_reset_switch);
+
+int rtl8366_sw_get_port_pvid(struct switch_dev *dev, int port, int *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ return rtl8366_get_pvid(smi, port, val);
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_get_port_pvid);
+
+int rtl8366_sw_set_port_pvid(struct switch_dev *dev, int port, int val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ return rtl8366_set_pvid(smi, port, val);
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_set_port_pvid);
+
+int rtl8366_sw_get_port_mib(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ int i, len = 0;
+ unsigned long long counter = 0;
+ char *buf = smi->buf;
+
+ if (val->port_vlan >= smi->num_ports)
+ return -EINVAL;
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "Port %d MIB counters\n",
+ val->port_vlan);
+
+ for (i = 0; i < smi->num_mib_counters; ++i) {
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%-36s: ", smi->mib_counters[i].name);
+ if (!smi->ops->get_mib_counter(smi, i, val->port_vlan,
+ &counter))
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%llu\n", counter);
+ else
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "%s\n", "error");
+ }
+
+ val->value.s = buf;
+ val->len = len;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_get_port_mib);
+
+int rtl8366_sw_get_vlan_info(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ int i;
+ u32 len = 0;
+ struct rtl8366_vlan_4k vlan4k;
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ char *buf = smi->buf;
+ int err;
+
+ if (!smi->ops->is_vlan_valid(smi, val->port_vlan))
+ return -EINVAL;
+
+ memset(buf, '\0', sizeof(smi->buf));
+
+ err = smi->ops->get_vlan_4k(smi, val->port_vlan, &vlan4k);
+ if (err)
+ return err;
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "VLAN %d: Ports: '", vlan4k.vid);
+
+ for (i = 0; i < smi->num_ports; i++) {
+ if (!(vlan4k.member & (1 << i)))
+ continue;
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len, "%d%s", i,
+ (vlan4k.untag & (1 << i)) ? "" : "t");
+ }
+
+ len += snprintf(buf + len, sizeof(smi->buf) - len,
+ "', members=%04x, untag=%04x, fid=%u",
+ vlan4k.member, vlan4k.untag, vlan4k.fid);
+
+ val->value.s = buf;
+ val->len = len;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_get_vlan_info);
+
+int rtl8366_sw_get_vlan_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ struct switch_port *port;
+ struct rtl8366_vlan_4k vlan4k;
+ int i;
+
+ if (!smi->ops->is_vlan_valid(smi, val->port_vlan))
+ return -EINVAL;
+
+ smi->ops->get_vlan_4k(smi, val->port_vlan, &vlan4k);
+
+ port = &val->value.ports[0];
+ val->len = 0;
+ for (i = 0; i < smi->num_ports; i++) {
+ if (!(vlan4k.member & BIT(i)))
+ continue;
+
+ port->id = i;
+ port->flags = (vlan4k.untag & BIT(i)) ?
+ 0 : BIT(SWITCH_PORT_FLAG_TAGGED);
+ val->len++;
+ port++;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_get_vlan_ports);
+
+int rtl8366_sw_set_vlan_ports(struct switch_dev *dev, struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ struct switch_port *port;
+ u32 member = 0;
+ u32 untag = 0;
+ int err;
+ int i;
+
+ if (!smi->ops->is_vlan_valid(smi, val->port_vlan))
+ return -EINVAL;
+
+ port = &val->value.ports[0];
+ for (i = 0; i < val->len; i++, port++) {
+ member |= BIT(port->id);
+
+ if (!(port->flags & BIT(SWITCH_PORT_FLAG_TAGGED)))
+ untag |= BIT(port->id);
+
+ /*
+ * To ensure that we have a valid MC entry for this VLAN,
+ * initialize the port VLAN ID here.
+ */
+ err = rtl8366_set_pvid(smi, port->id, val->port_vlan);
+ if (err < 0)
+ return err;
+ }
+
+ return rtl8366_set_vlan(smi, val->port_vlan, member, untag, 0);
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_set_vlan_ports);
+
+int rtl8366_sw_get_vlan_fid(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ int err;
+
+ if (!smi->ops->is_vlan_valid(smi, val->port_vlan))
+ return -EINVAL;
+
+ err = smi->ops->get_vlan_4k(smi, val->port_vlan, &vlan4k);
+ if (err)
+ return err;
+
+ val->value.i = vlan4k.fid;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_get_vlan_fid);
+
+int rtl8366_sw_set_vlan_fid(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ int err;
+
+ if (!smi->ops->is_vlan_valid(smi, val->port_vlan))
+ return -EINVAL;
+
+ if (val->value.i < 0 || val->value.i > attr->max)
+ return -EINVAL;
+
+ err = smi->ops->get_vlan_4k(smi, val->port_vlan, &vlan4k);
+ if (err)
+ return err;
+
+ return rtl8366_set_vlan(smi, val->port_vlan,
+ vlan4k.member,
+ vlan4k.untag,
+ val->value.i);
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_set_vlan_fid);
+
+int rtl8366_sw_get_vlan_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ if (attr->ofs > 2)
+ return -EINVAL;
+
+ if (attr->ofs == 1)
+ val->value.i = smi->vlan_enabled;
+ else
+ val->value.i = smi->vlan4k_enabled;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_get_vlan_enable);
+
+int rtl8366_sw_set_vlan_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ int err;
+
+ if (attr->ofs > 2)
+ return -EINVAL;
+
+ if (attr->ofs == 1)
+ err = rtl8366_enable_vlan(smi, val->value.i);
+ else
+ err = rtl8366_enable_vlan4k(smi, val->value.i);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtl8366_sw_set_vlan_enable);
+
+struct rtl8366_smi *rtl8366_smi_alloc(struct device *parent)
+{
+ struct rtl8366_smi *smi;
+
+ BUG_ON(!parent);
+
+ smi = kzalloc(sizeof(*smi), GFP_KERNEL);
+ if (!smi) {
+ dev_err(parent, "no memory for private data\n");
+ return NULL;
+ }
+
+ smi->parent = parent;
+ return smi;
+}
+EXPORT_SYMBOL_GPL(rtl8366_smi_alloc);
+
+static int __rtl8366_smi_init(struct rtl8366_smi *smi, const char *name)
+{
+ int err;
+
+ err = gpio_request(smi->gpio_sda, name);
+ if (err) {
+ printk(KERN_ERR "rtl8366_smi: gpio_request failed for %u, err=%d\n",
+ smi->gpio_sda, err);
+ goto err_out;
+ }
+
+ err = gpio_request(smi->gpio_sck, name);
+ if (err) {
+ printk(KERN_ERR "rtl8366_smi: gpio_request failed for %u, err=%d\n",
+ smi->gpio_sck, err);
+ goto err_free_sda;
+ }
+
+ spin_lock_init(&smi->lock);
+
+ /* start the switch */
+ if (smi->hw_reset) {
+ smi->hw_reset(false);
+ msleep(RTL8366_SMI_HW_START_DELAY);
+ }
+
+ return 0;
+
+ err_free_sda:
+ gpio_free(smi->gpio_sda);
+ err_out:
+ return err;
+}
+
+static void __rtl8366_smi_cleanup(struct rtl8366_smi *smi)
+{
+ if (smi->hw_reset)
+ smi->hw_reset(true);
+
+ gpio_free(smi->gpio_sck);
+ gpio_free(smi->gpio_sda);
+}
+
+enum rtl8366_type rtl8366_smi_detect(struct rtl8366_platform_data *pdata)
+{
+ static struct rtl8366_smi smi;
+ enum rtl8366_type type = RTL8366_TYPE_UNKNOWN;
+ u32 reg = 0;
+
+ memset(&smi, 0, sizeof(smi));
+ smi.gpio_sda = pdata->gpio_sda;
+ smi.gpio_sck = pdata->gpio_sck;
+ smi.clk_delay = 10;
+ smi.cmd_read = 0xa9;
+ smi.cmd_write = 0xa8;
+
+ if (__rtl8366_smi_init(&smi, "rtl8366"))
+ goto out;
+
+ if (rtl8366_smi_read_reg(&smi, 0x5c, &reg))
+ goto cleanup;
+
+ switch(reg) {
+ case 0x6027:
+ printk("Found an RTL8366S switch\n");
+ type = RTL8366_TYPE_S;
+ break;
+ case 0x5937:
+ printk("Found an RTL8366RB switch\n");
+ type = RTL8366_TYPE_RB;
+ break;
+ default:
+ printk("Found an Unknown RTL8366 switch (id=0x%04x)\n", reg);
+ break;
+ }
+
+cleanup:
+ __rtl8366_smi_cleanup(&smi);
+out:
+ return type;
+}
+
+int rtl8366_smi_init(struct rtl8366_smi *smi)
+{
+ int err;
+
+ if (!smi->ops)
+ return -EINVAL;
+
+ err = __rtl8366_smi_init(smi, dev_name(smi->parent));
+ if (err)
+ goto err_out;
+
+ dev_info(smi->parent, "using GPIO pins %u (SDA) and %u (SCK)\n",
+ smi->gpio_sda, smi->gpio_sck);
+
+ err = smi->ops->detect(smi);
+ if (err) {
+ dev_err(smi->parent, "chip detection failed, err=%d\n", err);
+ goto err_free_sck;
+ }
+
+ err = rtl8366_reset(smi);
+ if (err)
+ goto err_free_sck;
+
+ err = smi->ops->setup(smi);
+ if (err) {
+ dev_err(smi->parent, "chip setup failed, err=%d\n", err);
+ goto err_free_sck;
+ }
+
+ err = rtl8366_init_vlan(smi);
+ if (err) {
+ dev_err(smi->parent, "VLAN initialization failed, err=%d\n",
+ err);
+ goto err_free_sck;
+ }
+
+ err = rtl8366_enable_all_ports(smi, 1);
+ if (err)
+ goto err_free_sck;
+
+ err = rtl8366_smi_mii_init(smi);
+ if (err)
+ goto err_free_sck;
+
+ rtl8366_debugfs_init(smi);
+
+ return 0;
+
+ err_free_sck:
+ __rtl8366_smi_cleanup(smi);
+ err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtl8366_smi_init);
+
+void rtl8366_smi_cleanup(struct rtl8366_smi *smi)
+{
+ rtl8366_debugfs_remove(smi);
+ rtl8366_smi_mii_cleanup(smi);
+ __rtl8366_smi_cleanup(smi);
+}
+EXPORT_SYMBOL_GPL(rtl8366_smi_cleanup);
+
+MODULE_DESCRIPTION("Realtek RTL8366 SMI interface driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/generic/files/drivers/net/phy/rtl8366_smi.h b/target/linux/generic/files/drivers/net/phy/rtl8366_smi.h
new file mode 100644
index 000000000..700f83c1a
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/rtl8366_smi.h
@@ -0,0 +1,149 @@
+/*
+ * Realtek RTL8366 SMI interface driver defines
+ *
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RTL8366_SMI_H
+#define _RTL8366_SMI_H
+
+#include <linux/phy.h>
+#include <linux/switch.h>
+
+struct rtl8366_smi_ops;
+struct rtl8366_vlan_ops;
+struct mii_bus;
+struct dentry;
+struct inode;
+struct file;
+
+struct rtl8366_mib_counter {
+ unsigned base;
+ unsigned offset;
+ unsigned length;
+ const char *name;
+};
+
+struct rtl8366_smi {
+ struct device *parent;
+ unsigned int gpio_sda;
+ unsigned int gpio_sck;
+ void (*hw_reset)(bool active);
+ unsigned int clk_delay; /* ns */
+ u8 cmd_read;
+ u8 cmd_write;
+ spinlock_t lock;
+ struct mii_bus *mii_bus;
+ int mii_irq[PHY_MAX_ADDR];
+ struct switch_dev sw_dev;
+
+ unsigned int cpu_port;
+ unsigned int num_ports;
+ unsigned int num_vlan_mc;
+ unsigned int num_mib_counters;
+ struct rtl8366_mib_counter *mib_counters;
+
+ struct rtl8366_smi_ops *ops;
+
+ int vlan_enabled;
+ int vlan4k_enabled;
+
+ char buf[4096];
+#ifdef CONFIG_RTL8366_SMI_DEBUG_FS
+ struct dentry *debugfs_root;
+ u16 dbg_reg;
+ u8 dbg_vlan_4k_page;
+#endif
+};
+
+struct rtl8366_vlan_mc {
+ u16 vid;
+ u16 untag;
+ u16 member;
+ u8 fid;
+ u8 priority;
+};
+
+struct rtl8366_vlan_4k {
+ u16 vid;
+ u16 untag;
+ u16 member;
+ u8 fid;
+};
+
+struct rtl8366_smi_ops {
+ int (*detect)(struct rtl8366_smi *smi);
+ int (*reset_chip)(struct rtl8366_smi *smi);
+ int (*setup)(struct rtl8366_smi *smi);
+
+ int (*mii_read)(struct mii_bus *bus, int addr, int reg);
+ int (*mii_write)(struct mii_bus *bus, int addr, int reg, u16 val);
+
+ int (*get_vlan_mc)(struct rtl8366_smi *smi, u32 index,
+ struct rtl8366_vlan_mc *vlanmc);
+ int (*set_vlan_mc)(struct rtl8366_smi *smi, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc);
+ int (*get_vlan_4k)(struct rtl8366_smi *smi, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k);
+ int (*set_vlan_4k)(struct rtl8366_smi *smi,
+ const struct rtl8366_vlan_4k *vlan4k);
+ int (*get_mc_index)(struct rtl8366_smi *smi, int port, int *val);
+ int (*set_mc_index)(struct rtl8366_smi *smi, int port, int index);
+ int (*get_mib_counter)(struct rtl8366_smi *smi, int counter,
+ int port, unsigned long long *val);
+ int (*is_vlan_valid)(struct rtl8366_smi *smi, unsigned vlan);
+ int (*enable_vlan)(struct rtl8366_smi *smi, int enable);
+ int (*enable_vlan4k)(struct rtl8366_smi *smi, int enable);
+ int (*enable_port)(struct rtl8366_smi *smi, int port, int enable);
+};
+
+struct rtl8366_smi *rtl8366_smi_alloc(struct device *parent);
+int rtl8366_smi_init(struct rtl8366_smi *smi);
+void rtl8366_smi_cleanup(struct rtl8366_smi *smi);
+int rtl8366_smi_write_reg(struct rtl8366_smi *smi, u32 addr, u32 data);
+int rtl8366_smi_write_reg_noack(struct rtl8366_smi *smi, u32 addr, u32 data);
+int rtl8366_smi_read_reg(struct rtl8366_smi *smi, u32 addr, u32 *data);
+int rtl8366_smi_rmwr(struct rtl8366_smi *smi, u32 addr, u32 mask, u32 data);
+
+int rtl8366_reset_vlan(struct rtl8366_smi *smi);
+int rtl8366_enable_vlan(struct rtl8366_smi *smi, int enable);
+int rtl8366_enable_all_ports(struct rtl8366_smi *smi, int enable);
+
+#ifdef CONFIG_RTL8366_SMI_DEBUG_FS
+int rtl8366_debugfs_open(struct inode *inode, struct file *file);
+#endif
+
+static inline struct rtl8366_smi *sw_to_rtl8366_smi(struct switch_dev *sw)
+{
+ return container_of(sw, struct rtl8366_smi, sw_dev);
+}
+
+int rtl8366_sw_reset_switch(struct switch_dev *dev);
+int rtl8366_sw_get_port_pvid(struct switch_dev *dev, int port, int *val);
+int rtl8366_sw_set_port_pvid(struct switch_dev *dev, int port, int val);
+int rtl8366_sw_get_port_mib(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val);
+int rtl8366_sw_get_vlan_info(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val);
+int rtl8366_sw_get_vlan_fid(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val);
+int rtl8366_sw_set_vlan_fid(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val);
+int rtl8366_sw_get_vlan_ports(struct switch_dev *dev, struct switch_val *val);
+int rtl8366_sw_set_vlan_ports(struct switch_dev *dev, struct switch_val *val);
+int rtl8366_sw_get_vlan_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val);
+int rtl8366_sw_set_vlan_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val);
+
+#endif /* _RTL8366_SMI_H */
diff --git a/target/linux/generic/files/drivers/net/phy/rtl8366rb.c b/target/linux/generic/files/drivers/net/phy/rtl8366rb.c
new file mode 100644
index 000000000..775949792
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/rtl8366rb.c
@@ -0,0 +1,1271 @@
+/*
+ * Platform driver for the Realtek RTL8366RB ethernet switch
+ *
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/rtl8366.h>
+
+#include "rtl8366_smi.h"
+
+#define RTL8366RB_DRIVER_DESC "Realtek RTL8366RB ethernet switch driver"
+#define RTL8366RB_DRIVER_VER "0.2.3"
+
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_PHY_PAGE_MAX 7
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* Switch Global Configuration register */
+#define RTL8366RB_SGCR 0x0000
+#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0)
+#define RTL8366RB_SGCR_MAX_LENGTH(_x) (_x << 4)
+#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
+#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
+#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
+#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_EN_VLAN BIT(13)
+#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
+
+/* Port Enable Control register */
+#define RTL8366RB_PECR 0x0001
+
+/* Switch Security Control registers */
+#define RTL8366RB_SSCR0 0x0002
+#define RTL8366RB_SSCR1 0x0003
+#define RTL8366RB_SSCR2 0x0004
+#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
+
+#define RTL8366RB_RESET_CTRL_REG 0x0100
+#define RTL8366RB_CHIP_CTRL_RESET_HW 1
+#define RTL8366RB_CHIP_CTRL_RESET_SW (1 << 1)
+
+#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A
+#define RTL8366RB_CHIP_VERSION_MASK 0xf
+#define RTL8366RB_CHIP_ID_REG 0x0509
+#define RTL8366RB_CHIP_ID_8366 0x5937
+
+/* PHY registers control */
+#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000
+#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002
+
+#define RTL8366RB_PHY_CTRL_READ 1
+#define RTL8366RB_PHY_CTRL_WRITE 0
+
+#define RTL8366RB_PHY_REG_MASK 0x1f
+#define RTL8366RB_PHY_PAGE_OFFSET 5
+#define RTL8366RB_PHY_PAGE_MASK (0xf << 5)
+#define RTL8366RB_PHY_NO_OFFSET 9
+#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
+
+#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
+
+/* LED control registers */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_BIT 0
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+
+#define RTL8366RB_MIB_COUNT 33
+#define RTL8366RB_GLOBAL_MIB_COUNT 1
+#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
+#define RTL8366RB_MIB_COUNTER_BASE 0x1000
+#define RTL8366RB_MIB_CTRL_REG 0x13F0
+#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC
+#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0)
+#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1)
+#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p))
+#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11)
+
+#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063
+#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \
+ (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
+#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf
+#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
+
+
+#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C
+#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185
+
+
+#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180
+#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01
+#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01
+
+#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3)
+
+
+#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014
+#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003
+#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004
+#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010
+#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020
+#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040
+#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
+
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_NUM_VLANS 16
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_NUM_VIDS 4096
+#define RTL8366RB_PRIORITYMAX 7
+#define RTL8366RB_FIDMAX 7
+
+
+#define RTL8366RB_PORT_1 (1 << 0) /* In userspace port 0 */
+#define RTL8366RB_PORT_2 (1 << 1) /* In userspace port 1 */
+#define RTL8366RB_PORT_3 (1 << 2) /* In userspace port 2 */
+#define RTL8366RB_PORT_4 (1 << 3) /* In userspace port 3 */
+#define RTL8366RB_PORT_5 (1 << 4) /* In userspace port 4 */
+
+#define RTL8366RB_PORT_CPU (1 << 5) /* CPU port */
+
+#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4 | \
+ RTL8366RB_PORT_5 | \
+ RTL8366RB_PORT_CPU)
+
+#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4 | \
+ RTL8366RB_PORT_5)
+
+#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4)
+
+#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU
+
+#define RTL8366RB_VLAN_VID_MASK 0xfff
+#define RTL8366RB_VLAN_PRIORITY_SHIFT 12
+#define RTL8366RB_VLAN_PRIORITY_MASK 0x7
+#define RTL8366RB_VLAN_UNTAG_SHIFT 8
+#define RTL8366RB_VLAN_UNTAG_MASK 0xff
+#define RTL8366RB_VLAN_MEMBER_MASK 0xff
+#define RTL8366RB_VLAN_FID_MASK 0x7
+
+
+/* Port ingress bandwidth control */
+#define RTL8366RB_IB_BASE 0x0200
+#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + pnum)
+#define RTL8366RB_IB_BDTH_MASK 0x3fff
+#define RTL8366RB_IB_PREIFG_OFFSET 14
+#define RTL8366RB_IB_PREIFG_MASK (1 << RTL8366RB_IB_PREIFG_OFFSET)
+
+/* Port egress bandwidth control */
+#define RTL8366RB_EB_BASE 0x02d1
+#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + pnum)
+#define RTL8366RB_EB_BDTH_MASK 0x3fff
+#define RTL8366RB_EB_PREIFG_REG 0x02f8
+#define RTL8366RB_EB_PREIFG_OFFSET 9
+#define RTL8366RB_EB_PREIFG_MASK (1 << RTL8366RB_EB_PREIFG_OFFSET)
+
+#define RTL8366RB_BDTH_SW_MAX 1048512
+#define RTL8366RB_BDTH_UNIT 64
+#define RTL8366RB_BDTH_REG_DEFAULT 16383
+
+/* QOS */
+#define RTL8366RB_QOS_BIT 15
+#define RTL8366RB_QOS_MASK (1 << RTL8366RB_QOS_BIT)
+/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
+#define RTL8366RB_QOS_DEFAULT_PREIFG 1
+
+
+static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
+ { 0, 0, 4, "IfInOctets" },
+ { 0, 4, 4, "EtherStatsOctets" },
+ { 0, 8, 2, "EtherStatsUnderSizePkts" },
+ { 0, 10, 2, "EtherFragments" },
+ { 0, 12, 2, "EtherStatsPkts64Octets" },
+ { 0, 14, 2, "EtherStatsPkts65to127Octets" },
+ { 0, 16, 2, "EtherStatsPkts128to255Octets" },
+ { 0, 18, 2, "EtherStatsPkts256to511Octets" },
+ { 0, 20, 2, "EtherStatsPkts512to1023Octets" },
+ { 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
+ { 0, 24, 2, "EtherOversizeStats" },
+ { 0, 26, 2, "EtherStatsJabbers" },
+ { 0, 28, 2, "IfInUcastPkts" },
+ { 0, 30, 2, "EtherStatsMulticastPkts" },
+ { 0, 32, 2, "EtherStatsBroadcastPkts" },
+ { 0, 34, 2, "EtherStatsDropEvents" },
+ { 0, 36, 2, "Dot3StatsFCSErrors" },
+ { 0, 38, 2, "Dot3StatsSymbolErrors" },
+ { 0, 40, 2, "Dot3InPauseFrames" },
+ { 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
+ { 0, 44, 4, "IfOutOctets" },
+ { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
+ { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
+ { 0, 52, 2, "Dot3sDeferredTransmissions" },
+ { 0, 54, 2, "Dot3StatsLateCollisions" },
+ { 0, 56, 2, "EtherStatsCollisions" },
+ { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
+ { 0, 60, 2, "Dot3OutPauseFrames" },
+ { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
+ { 0, 64, 2, "Dot1dTpPortInDiscards" },
+ { 0, 66, 2, "IfOutUcastPkts" },
+ { 0, 68, 2, "IfOutMulticastPkts" },
+ { 0, 70, 2, "IfOutBroadcastPkts" },
+};
+
+#define REG_WR(_smi, _reg, _val) \
+ do { \
+ err = rtl8366_smi_write_reg(_smi, _reg, _val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+#define REG_RMW(_smi, _reg, _mask, _val) \
+ do { \
+ err = rtl8366_smi_rmwr(_smi, _reg, _mask, _val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+static int rtl8366rb_reset_chip(struct rtl8366_smi *smi)
+{
+ int timeout = 10;
+ u32 data;
+
+ rtl8366_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
+ do {
+ msleep(1);
+ if (rtl8366_smi_read_reg(smi, RTL8366RB_RESET_CTRL_REG, &data))
+ return -EIO;
+
+ if (!(data & RTL8366RB_CHIP_CTRL_RESET_HW))
+ break;
+ } while (--timeout);
+
+ if (!timeout) {
+ printk("Timeout waiting for the switch to reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_setup(struct rtl8366_smi *smi)
+{
+ int err;
+
+ /* set maximum packet length to 1536 bytes */
+ REG_RMW(smi, RTL8366RB_SGCR, RTL8366RB_SGCR_MAX_LENGTH_MASK,
+ RTL8366RB_SGCR_MAX_LENGTH_1536);
+
+ /* enable learning for all ports */
+ REG_WR(smi, RTL8366RB_SSCR0, 0);
+
+ /* enable auto ageing for all ports */
+ REG_WR(smi, RTL8366RB_SSCR1, 0);
+
+ /*
+ * discard VLAN tagged packets if the port is not a member of
+ * the VLAN with which the packets is associated.
+ */
+ REG_WR(smi, RTL8366RB_VLAN_INGRESS_CTRL2_REG, RTL8366RB_PORT_ALL);
+
+ /* don't drop packets whose DA has not been learned */
+ REG_RMW(smi, RTL8366RB_SSCR2, RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
+
+ return 0;
+}
+
+static int rtl8366rb_read_phy_reg(struct rtl8366_smi *smi,
+ u32 phy_no, u32 page, u32 addr, u32 *data)
+{
+ u32 reg;
+ int ret;
+
+ if (phy_no > RTL8366RB_PHY_NO_MAX)
+ return -EINVAL;
+
+ if (page > RTL8366RB_PHY_PAGE_MAX)
+ return -EINVAL;
+
+ if (addr > RTL8366RB_PHY_ADDR_MAX)
+ return -EINVAL;
+
+ ret = rtl8366_smi_write_reg(smi, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_READ);
+ if (ret)
+ return ret;
+
+ reg = 0x8000 | (1 << (phy_no + RTL8366RB_PHY_NO_OFFSET)) |
+ ((page << RTL8366RB_PHY_PAGE_OFFSET) & RTL8366RB_PHY_PAGE_MASK) |
+ (addr & RTL8366RB_PHY_REG_MASK);
+
+ ret = rtl8366_smi_write_reg(smi, reg, 0);
+ if (ret)
+ return ret;
+
+ ret = rtl8366_smi_read_reg(smi, RTL8366RB_PHY_ACCESS_DATA_REG, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8366rb_write_phy_reg(struct rtl8366_smi *smi,
+ u32 phy_no, u32 page, u32 addr, u32 data)
+{
+ u32 reg;
+ int ret;
+
+ if (phy_no > RTL8366RB_PHY_NO_MAX)
+ return -EINVAL;
+
+ if (page > RTL8366RB_PHY_PAGE_MAX)
+ return -EINVAL;
+
+ if (addr > RTL8366RB_PHY_ADDR_MAX)
+ return -EINVAL;
+
+ ret = rtl8366_smi_write_reg(smi, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+
+ reg = 0x8000 | (1 << (phy_no + RTL8366RB_PHY_NO_OFFSET)) |
+ ((page << RTL8366RB_PHY_PAGE_OFFSET) & RTL8366RB_PHY_PAGE_MASK) |
+ (addr & RTL8366RB_PHY_REG_MASK);
+
+ ret = rtl8366_smi_write_reg(smi, reg, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8366rb_get_mib_counter(struct rtl8366_smi *smi, int counter,
+ int port, unsigned long long *val)
+{
+ int i;
+ int err;
+ u32 addr, data;
+ u64 mibvalue;
+
+ if (port > RTL8366RB_NUM_PORTS || counter >= RTL8366RB_MIB_COUNT)
+ return -EINVAL;
+
+ addr = RTL8366RB_MIB_COUNTER_BASE +
+ RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
+ rtl8366rb_mib_counters[counter].offset;
+
+ /*
+ * Writing access counter address first
+ * then ASIC will prepare 64bits counter wait for being retrived
+ */
+ data = 0; /* writing data will be discard by ASIC */
+ err = rtl8366_smi_write_reg(smi, addr, data);
+ if (err)
+ return err;
+
+ /* read MIB control register */
+ err = rtl8366_smi_read_reg(smi, RTL8366RB_MIB_CTRL_REG, &data);
+ if (err)
+ return err;
+
+ if (data & RTL8366RB_MIB_CTRL_BUSY_MASK)
+ return -EBUSY;
+
+ if (data & RTL8366RB_MIB_CTRL_RESET_MASK)
+ return -EIO;
+
+ mibvalue = 0;
+ for (i = rtl8366rb_mib_counters[counter].length; i > 0; i--) {
+ err = rtl8366_smi_read_reg(smi, addr + (i - 1), &data);
+ if (err)
+ return err;
+
+ mibvalue = (mibvalue << 16) | (data & 0xFFFF);
+ }
+
+ *val = mibvalue;
+ return 0;
+}
+
+static int rtl8366rb_get_vlan_4k(struct rtl8366_smi *smi, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[3];
+ int err;
+ int i;
+
+ memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
+
+ if (vid >= RTL8366RB_NUM_VIDS)
+ return -EINVAL;
+
+ /* write VID */
+ err = rtl8366_smi_write_reg(smi, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ vid & RTL8366RB_VLAN_VID_MASK);
+ if (err)
+ return err;
+
+ /* write table access control word */
+ err = rtl8366_smi_write_reg(smi, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ RTL8366RB_TABLE_VLAN_READ_CTRL);
+ if (err)
+ return err;
+
+ for (i = 0; i < 3; i++) {
+ err = rtl8366_smi_read_reg(smi,
+ RTL8366RB_VLAN_TABLE_READ_BASE + i,
+ &data[i]);
+ if (err)
+ return err;
+ }
+
+ vlan4k->vid = vid;
+ vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+ RTL8366RB_VLAN_UNTAG_MASK;
+ vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+ vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_vlan_4k(struct rtl8366_smi *smi,
+ const struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[3];
+ int err;
+ int i;
+
+ if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
+ vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
+ vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+ vlan4k->fid > RTL8366RB_FIDMAX)
+ return -EINVAL;
+
+ data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
+ data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
+ ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+ RTL8366RB_VLAN_UNTAG_SHIFT);
+ data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
+
+ for (i = 0; i < 3; i++) {
+ err = rtl8366_smi_write_reg(smi,
+ RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
+ data[i]);
+ if (err)
+ return err;
+ }
+
+ /* write table access control word */
+ err = rtl8366_smi_write_reg(smi, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ RTL8366RB_TABLE_VLAN_WRITE_CTRL);
+
+ return err;
+}
+
+static int rtl8366rb_get_vlan_mc(struct rtl8366_smi *smi, u32 index,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[3];
+ int err;
+ int i;
+
+ memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
+
+ if (index >= RTL8366RB_NUM_VLANS)
+ return -EINVAL;
+
+ for (i = 0; i < 3; i++) {
+ err = rtl8366_smi_read_reg(smi,
+ RTL8366RB_VLAN_MC_BASE(index) + i,
+ &data[i]);
+ if (err)
+ return err;
+ }
+
+ vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
+ vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
+ RTL8366RB_VLAN_PRIORITY_MASK;
+ vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+ RTL8366RB_VLAN_UNTAG_MASK;
+ vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+ vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_vlan_mc(struct rtl8366_smi *smi, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[3];
+ int err;
+ int i;
+
+ if (index >= RTL8366RB_NUM_VLANS ||
+ vlanmc->vid >= RTL8366RB_NUM_VIDS ||
+ vlanmc->priority > RTL8366RB_PRIORITYMAX ||
+ vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
+ vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+ vlanmc->fid > RTL8366RB_FIDMAX)
+ return -EINVAL;
+
+ data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
+ ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
+ RTL8366RB_VLAN_PRIORITY_SHIFT);
+ data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
+ ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+ RTL8366RB_VLAN_UNTAG_SHIFT);
+ data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
+
+ for (i = 0; i < 3; i++) {
+ err = rtl8366_smi_write_reg(smi,
+ RTL8366RB_VLAN_MC_BASE(index) + i,
+ data[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_get_mc_index(struct rtl8366_smi *smi, int port, int *val)
+{
+ u32 data;
+ int err;
+
+ if (port >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ err = rtl8366_smi_read_reg(smi, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ &data);
+ if (err)
+ return err;
+
+ *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
+ RTL8366RB_PORT_VLAN_CTRL_MASK;
+
+ return 0;
+
+}
+
+static int rtl8366rb_set_mc_index(struct rtl8366_smi *smi, int port, int index)
+{
+ if (port >= RTL8366RB_NUM_PORTS || index >= RTL8366RB_NUM_VLANS)
+ return -EINVAL;
+
+ return rtl8366_smi_rmwr(smi, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+}
+
+static int rtl8366rb_is_vlan_valid(struct rtl8366_smi *smi, unsigned vlan)
+{
+ unsigned max = RTL8366RB_NUM_VLANS;
+
+ if (smi->vlan4k_enabled)
+ max = RTL8366RB_NUM_VIDS - 1;
+
+ if (vlan == 0 || vlan >= max)
+ return 0;
+
+ return 1;
+}
+
+static int rtl8366rb_enable_vlan(struct rtl8366_smi *smi, int enable)
+{
+ return rtl8366_smi_rmwr(smi, RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
+ (enable) ? RTL8366RB_SGCR_EN_VLAN : 0);
+}
+
+static int rtl8366rb_enable_vlan4k(struct rtl8366_smi *smi, int enable)
+{
+ return rtl8366_smi_rmwr(smi, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_EN_VLAN_4KTB,
+ (enable) ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
+}
+
+static int rtl8366rb_enable_port(struct rtl8366_smi *smi, int port, int enable)
+{
+ return rtl8366_smi_rmwr(smi, RTL8366RB_PECR, (1 << port),
+ (enable) ? 0 : (1 << port));
+}
+
+static int rtl8366rb_sw_reset_mibs(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ return rtl8366_smi_rmwr(smi, RTL8366RB_MIB_CTRL_REG, 0,
+ RTL8366RB_MIB_CTRL_GLOBAL_RESET);
+}
+
+static int rtl8366rb_sw_get_blinkrate(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ rtl8366_smi_read_reg(smi, RTL8366RB_LED_BLINKRATE_REG, &data);
+
+ val->value.i = (data & (RTL8366RB_LED_BLINKRATE_MASK));
+
+ return 0;
+}
+
+static int rtl8366rb_sw_set_blinkrate(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ if (val->value.i >= 6)
+ return -EINVAL;
+
+ return rtl8366_smi_rmwr(smi, RTL8366RB_LED_BLINKRATE_REG,
+ RTL8366RB_LED_BLINKRATE_MASK,
+ val->value.i);
+}
+
+static int rtl8366rb_sw_get_learning_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ rtl8366_smi_read_reg(smi, RTL8366RB_SSCR0, &data);
+ val->value.i = !data;
+
+ return 0;
+}
+
+
+static int rtl8366rb_sw_set_learning_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 portmask = 0;
+ int err = 0;
+
+ if (!val->value.i)
+ portmask = RTL8366RB_PORT_ALL;
+
+ /* set learning for all ports */
+ REG_WR(smi, RTL8366RB_SSCR0, portmask);
+
+ /* set auto ageing for all ports */
+ REG_WR(smi, RTL8366RB_SSCR1, portmask);
+
+ return 0;
+}
+
+static int rtl8366rb_sw_get_port_link(struct switch_dev *dev,
+ int port,
+ struct switch_port_link *link)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data = 0;
+ u32 speed;
+
+ if (port >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ rtl8366_smi_read_reg(smi, RTL8366RB_PORT_LINK_STATUS_BASE + (port / 2),
+ &data);
+
+ if (port % 2)
+ data = data >> 8;
+
+ link->link = !!(data & RTL8366RB_PORT_STATUS_LINK_MASK);
+ if (!link->link)
+ return 0;
+
+ link->duplex = !!(data & RTL8366RB_PORT_STATUS_DUPLEX_MASK);
+ link->rx_flow = !!(data & RTL8366RB_PORT_STATUS_RXPAUSE_MASK);
+ link->tx_flow = !!(data & RTL8366RB_PORT_STATUS_TXPAUSE_MASK);
+ link->aneg = !!(data & RTL8366RB_PORT_STATUS_AN_MASK);
+
+ speed = (data & RTL8366RB_PORT_STATUS_SPEED_MASK);
+ switch (speed) {
+ case 0:
+ link->speed = SWITCH_PORT_SPEED_10;
+ break;
+ case 1:
+ link->speed = SWITCH_PORT_SPEED_100;
+ break;
+ case 2:
+ link->speed = SWITCH_PORT_SPEED_1000;
+ break;
+ default:
+ link->speed = SWITCH_PORT_SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_sw_set_port_led(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+ u32 mask;
+ u32 reg;
+
+ if (val->port_vlan >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ if (val->port_vlan == RTL8366RB_PORT_NUM_CPU) {
+ reg = RTL8366RB_LED_BLINKRATE_REG;
+ mask = 0xF << 4;
+ data = val->value.i << 4;
+ } else {
+ reg = RTL8366RB_LED_CTRL_REG;
+ mask = 0xF << (val->port_vlan * 4),
+ data = val->value.i << (val->port_vlan * 4);
+ }
+
+ return rtl8366_smi_rmwr(smi, reg, mask, data);
+}
+
+static int rtl8366rb_sw_get_port_led(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data = 0;
+
+ if (val->port_vlan >= RTL8366RB_NUM_LEDGROUPS)
+ return -EINVAL;
+
+ rtl8366_smi_read_reg(smi, RTL8366RB_LED_CTRL_REG, &data);
+ val->value.i = (data >> (val->port_vlan * 4)) & 0x000F;
+
+ return 0;
+}
+
+static int rtl8366rb_sw_set_port_disable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 mask, data;
+
+ if (val->port_vlan >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ mask = 1 << val->port_vlan ;
+ if (val->value.i)
+ data = mask;
+ else
+ data = 0;
+
+ return rtl8366_smi_rmwr(smi, RTL8366RB_PECR, mask, data);
+}
+
+static int rtl8366rb_sw_get_port_disable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ if (val->port_vlan >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ rtl8366_smi_read_reg(smi, RTL8366RB_PECR, &data);
+ if (data & (1 << val->port_vlan))
+ val->value.i = 1;
+ else
+ val->value.i = 0;
+
+ return 0;
+}
+
+static int rtl8366rb_sw_set_port_rate_in(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ if (val->port_vlan >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ if (val->value.i > 0 && val->value.i < RTL8366RB_BDTH_SW_MAX)
+ val->value.i = (val->value.i - 1) / RTL8366RB_BDTH_UNIT;
+ else
+ val->value.i = RTL8366RB_BDTH_REG_DEFAULT;
+
+ return rtl8366_smi_rmwr(smi, RTL8366RB_IB_REG(val->port_vlan),
+ RTL8366RB_IB_BDTH_MASK | RTL8366RB_IB_PREIFG_MASK,
+ val->value.i |
+ (RTL8366RB_QOS_DEFAULT_PREIFG << RTL8366RB_IB_PREIFG_OFFSET));
+
+}
+
+static int rtl8366rb_sw_get_port_rate_in(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ if (val->port_vlan >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ rtl8366_smi_read_reg(smi, RTL8366RB_IB_REG(val->port_vlan), &data);
+ data &= RTL8366RB_IB_BDTH_MASK;
+ if (data < RTL8366RB_IB_BDTH_MASK)
+ data += 1;
+
+ val->value.i = (int)data * RTL8366RB_BDTH_UNIT;
+
+ return 0;
+}
+
+static int rtl8366rb_sw_set_port_rate_out(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ if (val->port_vlan >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ rtl8366_smi_rmwr(smi, RTL8366RB_EB_PREIFG_REG,
+ RTL8366RB_EB_PREIFG_MASK,
+ (RTL8366RB_QOS_DEFAULT_PREIFG << RTL8366RB_EB_PREIFG_OFFSET));
+
+ if (val->value.i > 0 && val->value.i < RTL8366RB_BDTH_SW_MAX)
+ val->value.i = (val->value.i - 1) / RTL8366RB_BDTH_UNIT;
+ else
+ val->value.i = RTL8366RB_BDTH_REG_DEFAULT;
+
+ return rtl8366_smi_rmwr(smi, RTL8366RB_EB_REG(val->port_vlan),
+ RTL8366RB_EB_BDTH_MASK, val->value.i );
+
+}
+
+static int rtl8366rb_sw_get_port_rate_out(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ if (val->port_vlan >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ rtl8366_smi_read_reg(smi, RTL8366RB_EB_REG(val->port_vlan), &data);
+ data &= RTL8366RB_EB_BDTH_MASK;
+ if (data < RTL8366RB_EB_BDTH_MASK)
+ data += 1;
+
+ val->value.i = (int)data * RTL8366RB_BDTH_UNIT;
+
+ return 0;
+}
+
+static int rtl8366rb_sw_set_qos_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ if (val->value.i)
+ data = RTL8366RB_QOS_MASK;
+ else
+ data = 0;
+
+ return rtl8366_smi_rmwr(smi, RTL8366RB_SGCR, RTL8366RB_QOS_MASK, data);
+}
+
+static int rtl8366rb_sw_get_qos_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ rtl8366_smi_read_reg(smi, RTL8366RB_SGCR, &data);
+ if (data & RTL8366RB_QOS_MASK)
+ val->value.i = 1;
+ else
+ val->value.i = 0;
+
+ return 0;
+}
+
+static int rtl8366rb_sw_reset_port_mibs(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ if (val->port_vlan >= RTL8366RB_NUM_PORTS)
+ return -EINVAL;
+
+ return rtl8366_smi_rmwr(smi, RTL8366RB_MIB_CTRL_REG, 0,
+ RTL8366RB_MIB_CTRL_PORT_RESET(val->port_vlan));
+}
+
+static struct switch_attr rtl8366rb_globals[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_learning",
+ .description = "Enable learning, enable aging",
+ .set = rtl8366rb_sw_set_learning_enable,
+ .get = rtl8366rb_sw_get_learning_enable,
+ .max = 1
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "Enable VLAN mode",
+ .set = rtl8366_sw_set_vlan_enable,
+ .get = rtl8366_sw_get_vlan_enable,
+ .max = 1,
+ .ofs = 1
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan4k",
+ .description = "Enable VLAN 4K mode",
+ .set = rtl8366_sw_set_vlan_enable,
+ .get = rtl8366_sw_get_vlan_enable,
+ .max = 1,
+ .ofs = 2
+ }, {
+ .type = SWITCH_TYPE_NOVAL,
+ .name = "reset_mibs",
+ .description = "Reset all MIB counters",
+ .set = rtl8366rb_sw_reset_mibs,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "blinkrate",
+ .description = "Get/Set LED blinking rate (0 = 43ms, 1 = 84ms,"
+ " 2 = 120ms, 3 = 170ms, 4 = 340ms, 5 = 670ms)",
+ .set = rtl8366rb_sw_set_blinkrate,
+ .get = rtl8366rb_sw_get_blinkrate,
+ .max = 5
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_qos",
+ .description = "Enable QOS",
+ .set = rtl8366rb_sw_set_qos_enable,
+ .get = rtl8366rb_sw_get_qos_enable,
+ .max = 1
+ },
+};
+
+static struct switch_attr rtl8366rb_port[] = {
+ {
+ .type = SWITCH_TYPE_NOVAL,
+ .name = "reset_mib",
+ .description = "Reset single port MIB counters",
+ .set = rtl8366rb_sw_reset_port_mibs,
+ }, {
+ .type = SWITCH_TYPE_STRING,
+ .name = "mib",
+ .description = "Get MIB counters for port",
+ .max = 33,
+ .set = NULL,
+ .get = rtl8366_sw_get_port_mib,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "led",
+ .description = "Get/Set port group (0 - 3) led mode (0 - 15)",
+ .max = 15,
+ .set = rtl8366rb_sw_set_port_led,
+ .get = rtl8366rb_sw_get_port_led,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "disable",
+ .description = "Get/Set port state (enabled or disabled)",
+ .max = 1,
+ .set = rtl8366rb_sw_set_port_disable,
+ .get = rtl8366rb_sw_get_port_disable,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "rate_in",
+ .description = "Get/Set port ingress (incoming) bandwidth limit in kbps",
+ .max = RTL8366RB_BDTH_SW_MAX,
+ .set = rtl8366rb_sw_set_port_rate_in,
+ .get = rtl8366rb_sw_get_port_rate_in,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "rate_out",
+ .description = "Get/Set port egress (outgoing) bandwidth limit in kbps",
+ .max = RTL8366RB_BDTH_SW_MAX,
+ .set = rtl8366rb_sw_set_port_rate_out,
+ .get = rtl8366rb_sw_get_port_rate_out,
+ },
+};
+
+static struct switch_attr rtl8366rb_vlan[] = {
+ {
+ .type = SWITCH_TYPE_STRING,
+ .name = "info",
+ .description = "Get vlan information",
+ .max = 1,
+ .set = NULL,
+ .get = rtl8366_sw_get_vlan_info,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "fid",
+ .description = "Get/Set vlan FID",
+ .max = RTL8366RB_FIDMAX,
+ .set = rtl8366_sw_set_vlan_fid,
+ .get = rtl8366_sw_get_vlan_fid,
+ },
+};
+
+static const struct switch_dev_ops rtl8366_ops = {
+ .attr_global = {
+ .attr = rtl8366rb_globals,
+ .n_attr = ARRAY_SIZE(rtl8366rb_globals),
+ },
+ .attr_port = {
+ .attr = rtl8366rb_port,
+ .n_attr = ARRAY_SIZE(rtl8366rb_port),
+ },
+ .attr_vlan = {
+ .attr = rtl8366rb_vlan,
+ .n_attr = ARRAY_SIZE(rtl8366rb_vlan),
+ },
+
+ .get_vlan_ports = rtl8366_sw_get_vlan_ports,
+ .set_vlan_ports = rtl8366_sw_set_vlan_ports,
+ .get_port_pvid = rtl8366_sw_get_port_pvid,
+ .set_port_pvid = rtl8366_sw_set_port_pvid,
+ .reset_switch = rtl8366_sw_reset_switch,
+ .get_port_link = rtl8366rb_sw_get_port_link,
+};
+
+static int rtl8366rb_switch_init(struct rtl8366_smi *smi)
+{
+ struct switch_dev *dev = &smi->sw_dev;
+ int err;
+
+ dev->name = "RTL8366RB";
+ dev->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ dev->ports = RTL8366RB_NUM_PORTS;
+ dev->vlans = RTL8366RB_NUM_VIDS;
+ dev->ops = &rtl8366_ops;
+ dev->alias = dev_name(smi->parent);
+
+ err = register_switch(dev, NULL);
+ if (err)
+ dev_err(smi->parent, "switch registration failed\n");
+
+ return err;
+}
+
+static void rtl8366rb_switch_cleanup(struct rtl8366_smi *smi)
+{
+ unregister_switch(&smi->sw_dev);
+}
+
+static int rtl8366rb_mii_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct rtl8366_smi *smi = bus->priv;
+ u32 val = 0;
+ int err;
+
+ err = rtl8366rb_read_phy_reg(smi, addr, 0, reg, &val);
+ if (err)
+ return 0xffff;
+
+ return val;
+}
+
+static int rtl8366rb_mii_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct rtl8366_smi *smi = bus->priv;
+ u32 t;
+ int err;
+
+ err = rtl8366rb_write_phy_reg(smi, addr, 0, reg, val);
+ /* flush write */
+ (void) rtl8366rb_read_phy_reg(smi, addr, 0, reg, &t);
+
+ return err;
+}
+
+static int rtl8366rb_detect(struct rtl8366_smi *smi)
+{
+ u32 chip_id = 0;
+ u32 chip_ver = 0;
+ int ret;
+
+ ret = rtl8366_smi_read_reg(smi, RTL8366RB_CHIP_ID_REG, &chip_id);
+ if (ret) {
+ dev_err(smi->parent, "unable to read chip id\n");
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8366RB_CHIP_ID_8366:
+ break;
+ default:
+ dev_err(smi->parent, "unknown chip id (%04x)\n", chip_id);
+ return -ENODEV;
+ }
+
+ ret = rtl8366_smi_read_reg(smi, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ &chip_ver);
+ if (ret) {
+ dev_err(smi->parent, "unable to read chip version\n");
+ return ret;
+ }
+
+ dev_info(smi->parent, "RTL%04x ver. %u chip found\n",
+ chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
+
+ return 0;
+}
+
+static struct rtl8366_smi_ops rtl8366rb_smi_ops = {
+ .detect = rtl8366rb_detect,
+ .reset_chip = rtl8366rb_reset_chip,
+ .setup = rtl8366rb_setup,
+
+ .mii_read = rtl8366rb_mii_read,
+ .mii_write = rtl8366rb_mii_write,
+
+ .get_vlan_mc = rtl8366rb_get_vlan_mc,
+ .set_vlan_mc = rtl8366rb_set_vlan_mc,
+ .get_vlan_4k = rtl8366rb_get_vlan_4k,
+ .set_vlan_4k = rtl8366rb_set_vlan_4k,
+ .get_mc_index = rtl8366rb_get_mc_index,
+ .set_mc_index = rtl8366rb_set_mc_index,
+ .get_mib_counter = rtl8366rb_get_mib_counter,
+ .is_vlan_valid = rtl8366rb_is_vlan_valid,
+ .enable_vlan = rtl8366rb_enable_vlan,
+ .enable_vlan4k = rtl8366rb_enable_vlan4k,
+ .enable_port = rtl8366rb_enable_port,
+};
+
+static int __devinit rtl8366rb_probe(struct platform_device *pdev)
+{
+ static int rtl8366_smi_version_printed;
+ struct rtl8366_platform_data *pdata;
+ struct rtl8366_smi *smi;
+ int err;
+
+ if (!rtl8366_smi_version_printed++)
+ printk(KERN_NOTICE RTL8366RB_DRIVER_DESC
+ " version " RTL8366RB_DRIVER_VER"\n");
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ smi = rtl8366_smi_alloc(&pdev->dev);
+ if (!smi) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ smi->gpio_sda = pdata->gpio_sda;
+ smi->gpio_sck = pdata->gpio_sck;
+ smi->hw_reset = pdata->hw_reset;
+
+ smi->clk_delay = 10;
+ smi->cmd_read = 0xa9;
+ smi->cmd_write = 0xa8;
+ smi->ops = &rtl8366rb_smi_ops;
+ smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ smi->num_ports = RTL8366RB_NUM_PORTS;
+ smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ smi->mib_counters = rtl8366rb_mib_counters;
+ smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+
+ err = rtl8366_smi_init(smi);
+ if (err)
+ goto err_free_smi;
+
+ platform_set_drvdata(pdev, smi);
+
+ err = rtl8366rb_switch_init(smi);
+ if (err)
+ goto err_clear_drvdata;
+
+ return 0;
+
+ err_clear_drvdata:
+ platform_set_drvdata(pdev, NULL);
+ rtl8366_smi_cleanup(smi);
+ err_free_smi:
+ kfree(smi);
+ err_out:
+ return err;
+}
+
+static int __devexit rtl8366rb_remove(struct platform_device *pdev)
+{
+ struct rtl8366_smi *smi = platform_get_drvdata(pdev);
+
+ if (smi) {
+ rtl8366rb_switch_cleanup(smi);
+ platform_set_drvdata(pdev, NULL);
+ rtl8366_smi_cleanup(smi);
+ kfree(smi);
+ }
+
+ return 0;
+}
+
+static struct platform_driver rtl8366rb_driver = {
+ .driver = {
+ .name = RTL8366RB_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = rtl8366rb_probe,
+ .remove = __devexit_p(rtl8366rb_remove),
+};
+
+static int __init rtl8366rb_module_init(void)
+{
+ return platform_driver_register(&rtl8366rb_driver);
+}
+module_init(rtl8366rb_module_init);
+
+static void __exit rtl8366rb_module_exit(void)
+{
+ platform_driver_unregister(&rtl8366rb_driver);
+}
+module_exit(rtl8366rb_module_exit);
+
+MODULE_DESCRIPTION(RTL8366RB_DRIVER_DESC);
+MODULE_VERSION(RTL8366RB_DRIVER_VER);
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_AUTHOR("Antti Seppälä <a.seppala@gmail.com>");
+MODULE_AUTHOR("Roman Yeryomin <roman@advem.lv>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" RTL8366RB_DRIVER_NAME);
diff --git a/target/linux/generic/files/drivers/net/phy/rtl8366s.c b/target/linux/generic/files/drivers/net/phy/rtl8366s.c
new file mode 100644
index 000000000..77427d6c9
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/rtl8366s.c
@@ -0,0 +1,1150 @@
+/*
+ * Platform driver for the Realtek RTL8366S ethernet switch
+ *
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/rtl8366.h>
+
+#include "rtl8366_smi.h"
+
+#define RTL8366S_DRIVER_DESC "Realtek RTL8366S ethernet switch driver"
+#define RTL8366S_DRIVER_VER "0.2.2"
+
+#define RTL8366S_PHY_NO_MAX 4
+#define RTL8366S_PHY_PAGE_MAX 7
+#define RTL8366S_PHY_ADDR_MAX 31
+
+/* Switch Global Configuration register */
+#define RTL8366S_SGCR 0x0000
+#define RTL8366S_SGCR_EN_BC_STORM_CTRL BIT(0)
+#define RTL8366S_SGCR_MAX_LENGTH(_x) (_x << 4)
+#define RTL8366S_SGCR_MAX_LENGTH_MASK RTL8366S_SGCR_MAX_LENGTH(0x3)
+#define RTL8366S_SGCR_MAX_LENGTH_1522 RTL8366S_SGCR_MAX_LENGTH(0x0)
+#define RTL8366S_SGCR_MAX_LENGTH_1536 RTL8366S_SGCR_MAX_LENGTH(0x1)
+#define RTL8366S_SGCR_MAX_LENGTH_1552 RTL8366S_SGCR_MAX_LENGTH(0x2)
+#define RTL8366S_SGCR_MAX_LENGTH_16000 RTL8366S_SGCR_MAX_LENGTH(0x3)
+#define RTL8366S_SGCR_EN_VLAN BIT(13)
+
+/* Port Enable Control register */
+#define RTL8366S_PECR 0x0001
+
+/* Switch Security Control registers */
+#define RTL8366S_SSCR0 0x0002
+#define RTL8366S_SSCR1 0x0003
+#define RTL8366S_SSCR2 0x0004
+#define RTL8366S_SSCR2_DROP_UNKNOWN_DA BIT(0)
+
+#define RTL8366S_RESET_CTRL_REG 0x0100
+#define RTL8366S_CHIP_CTRL_RESET_HW 1
+#define RTL8366S_CHIP_CTRL_RESET_SW (1 << 1)
+
+#define RTL8366S_CHIP_VERSION_CTRL_REG 0x0104
+#define RTL8366S_CHIP_VERSION_MASK 0xf
+#define RTL8366S_CHIP_ID_REG 0x0105
+#define RTL8366S_CHIP_ID_8366 0x8366
+
+/* PHY registers control */
+#define RTL8366S_PHY_ACCESS_CTRL_REG 0x8028
+#define RTL8366S_PHY_ACCESS_DATA_REG 0x8029
+
+#define RTL8366S_PHY_CTRL_READ 1
+#define RTL8366S_PHY_CTRL_WRITE 0
+
+#define RTL8366S_PHY_REG_MASK 0x1f
+#define RTL8366S_PHY_PAGE_OFFSET 5
+#define RTL8366S_PHY_PAGE_MASK (0x7 << 5)
+#define RTL8366S_PHY_NO_OFFSET 9
+#define RTL8366S_PHY_NO_MASK (0x1f << 9)
+
+/* LED control registers */
+#define RTL8366S_LED_BLINKRATE_REG 0x0420
+#define RTL8366S_LED_BLINKRATE_BIT 0
+#define RTL8366S_LED_BLINKRATE_MASK 0x0007
+
+#define RTL8366S_LED_CTRL_REG 0x0421
+#define RTL8366S_LED_0_1_CTRL_REG 0x0422
+#define RTL8366S_LED_2_3_CTRL_REG 0x0423
+
+#define RTL8366S_MIB_COUNT 33
+#define RTL8366S_GLOBAL_MIB_COUNT 1
+#define RTL8366S_MIB_COUNTER_PORT_OFFSET 0x0040
+#define RTL8366S_MIB_COUNTER_BASE 0x1000
+#define RTL8366S_MIB_COUNTER_PORT_OFFSET2 0x0008
+#define RTL8366S_MIB_COUNTER_BASE2 0x1180
+#define RTL8366S_MIB_CTRL_REG 0x11F0
+#define RTL8366S_MIB_CTRL_USER_MASK 0x01FF
+#define RTL8366S_MIB_CTRL_BUSY_MASK 0x0001
+#define RTL8366S_MIB_CTRL_RESET_MASK 0x0002
+
+#define RTL8366S_MIB_CTRL_GLOBAL_RESET_MASK 0x0004
+#define RTL8366S_MIB_CTRL_PORT_RESET_BIT 0x0003
+#define RTL8366S_MIB_CTRL_PORT_RESET_MASK 0x01FC
+
+
+#define RTL8366S_PORT_VLAN_CTRL_BASE 0x0058
+#define RTL8366S_PORT_VLAN_CTRL_REG(_p) \
+ (RTL8366S_PORT_VLAN_CTRL_BASE + (_p) / 4)
+#define RTL8366S_PORT_VLAN_CTRL_MASK 0xf
+#define RTL8366S_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
+
+
+#define RTL8366S_VLAN_TABLE_READ_BASE 0x018B
+#define RTL8366S_VLAN_TABLE_WRITE_BASE 0x0185
+
+#define RTL8366S_VLAN_TB_CTRL_REG 0x010F
+
+#define RTL8366S_TABLE_ACCESS_CTRL_REG 0x0180
+#define RTL8366S_TABLE_VLAN_READ_CTRL 0x0E01
+#define RTL8366S_TABLE_VLAN_WRITE_CTRL 0x0F01
+
+#define RTL8366S_VLAN_MC_BASE(_x) (0x0016 + (_x) * 2)
+
+#define RTL8366S_VLAN_MEMBERINGRESS_REG 0x0379
+
+#define RTL8366S_PORT_LINK_STATUS_BASE 0x0060
+#define RTL8366S_PORT_STATUS_SPEED_MASK 0x0003
+#define RTL8366S_PORT_STATUS_DUPLEX_MASK 0x0004
+#define RTL8366S_PORT_STATUS_LINK_MASK 0x0010
+#define RTL8366S_PORT_STATUS_TXPAUSE_MASK 0x0020
+#define RTL8366S_PORT_STATUS_RXPAUSE_MASK 0x0040
+#define RTL8366S_PORT_STATUS_AN_MASK 0x0080
+
+
+#define RTL8366S_PORT_NUM_CPU 5
+#define RTL8366S_NUM_PORTS 6
+#define RTL8366S_NUM_VLANS 16
+#define RTL8366S_NUM_LEDGROUPS 4
+#define RTL8366S_NUM_VIDS 4096
+#define RTL8366S_PRIORITYMAX 7
+#define RTL8366S_FIDMAX 7
+
+
+#define RTL8366S_PORT_1 (1 << 0) /* In userspace port 0 */
+#define RTL8366S_PORT_2 (1 << 1) /* In userspace port 1 */
+#define RTL8366S_PORT_3 (1 << 2) /* In userspace port 2 */
+#define RTL8366S_PORT_4 (1 << 3) /* In userspace port 3 */
+
+#define RTL8366S_PORT_UNKNOWN (1 << 4) /* No known connection */
+#define RTL8366S_PORT_CPU (1 << 5) /* CPU port */
+
+#define RTL8366S_PORT_ALL (RTL8366S_PORT_1 | \
+ RTL8366S_PORT_2 | \
+ RTL8366S_PORT_3 | \
+ RTL8366S_PORT_4 | \
+ RTL8366S_PORT_UNKNOWN | \
+ RTL8366S_PORT_CPU)
+
+#define RTL8366S_PORT_ALL_BUT_CPU (RTL8366S_PORT_1 | \
+ RTL8366S_PORT_2 | \
+ RTL8366S_PORT_3 | \
+ RTL8366S_PORT_4 | \
+ RTL8366S_PORT_UNKNOWN)
+
+#define RTL8366S_PORT_ALL_EXTERNAL (RTL8366S_PORT_1 | \
+ RTL8366S_PORT_2 | \
+ RTL8366S_PORT_3 | \
+ RTL8366S_PORT_4)
+
+#define RTL8366S_PORT_ALL_INTERNAL (RTL8366S_PORT_UNKNOWN | \
+ RTL8366S_PORT_CPU)
+
+#define RTL8366S_VLAN_VID_MASK 0xfff
+#define RTL8366S_VLAN_PRIORITY_SHIFT 12
+#define RTL8366S_VLAN_PRIORITY_MASK 0x7
+#define RTL8366S_VLAN_MEMBER_MASK 0x3f
+#define RTL8366S_VLAN_UNTAG_SHIFT 6
+#define RTL8366S_VLAN_UNTAG_MASK 0x3f
+#define RTL8366S_VLAN_FID_SHIFT 12
+#define RTL8366S_VLAN_FID_MASK 0x7
+
+static struct rtl8366_mib_counter rtl8366s_mib_counters[] = {
+ { 0, 0, 4, "IfInOctets" },
+ { 0, 4, 4, "EtherStatsOctets" },
+ { 0, 8, 2, "EtherStatsUnderSizePkts" },
+ { 0, 10, 2, "EtherFragments" },
+ { 0, 12, 2, "EtherStatsPkts64Octets" },
+ { 0, 14, 2, "EtherStatsPkts65to127Octets" },
+ { 0, 16, 2, "EtherStatsPkts128to255Octets" },
+ { 0, 18, 2, "EtherStatsPkts256to511Octets" },
+ { 0, 20, 2, "EtherStatsPkts512to1023Octets" },
+ { 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
+ { 0, 24, 2, "EtherOversizeStats" },
+ { 0, 26, 2, "EtherStatsJabbers" },
+ { 0, 28, 2, "IfInUcastPkts" },
+ { 0, 30, 2, "EtherStatsMulticastPkts" },
+ { 0, 32, 2, "EtherStatsBroadcastPkts" },
+ { 0, 34, 2, "EtherStatsDropEvents" },
+ { 0, 36, 2, "Dot3StatsFCSErrors" },
+ { 0, 38, 2, "Dot3StatsSymbolErrors" },
+ { 0, 40, 2, "Dot3InPauseFrames" },
+ { 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
+ { 0, 44, 4, "IfOutOctets" },
+ { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
+ { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
+ { 0, 52, 2, "Dot3sDeferredTransmissions" },
+ { 0, 54, 2, "Dot3StatsLateCollisions" },
+ { 0, 56, 2, "EtherStatsCollisions" },
+ { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
+ { 0, 60, 2, "Dot3OutPauseFrames" },
+ { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
+
+ /*
+ * The following counters are accessible at a different
+ * base address.
+ */
+ { 1, 0, 2, "Dot1dTpPortInDiscards" },
+ { 1, 2, 2, "IfOutUcastPkts" },
+ { 1, 4, 2, "IfOutMulticastPkts" },
+ { 1, 6, 2, "IfOutBroadcastPkts" },
+};
+
+#define REG_WR(_smi, _reg, _val) \
+ do { \
+ err = rtl8366_smi_write_reg(_smi, _reg, _val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+#define REG_RMW(_smi, _reg, _mask, _val) \
+ do { \
+ err = rtl8366_smi_rmwr(_smi, _reg, _mask, _val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+static int rtl8366s_reset_chip(struct rtl8366_smi *smi)
+{
+ int timeout = 10;
+ u32 data;
+
+ rtl8366_smi_write_reg_noack(smi, RTL8366S_RESET_CTRL_REG,
+ RTL8366S_CHIP_CTRL_RESET_HW);
+ do {
+ msleep(1);
+ if (rtl8366_smi_read_reg(smi, RTL8366S_RESET_CTRL_REG, &data))
+ return -EIO;
+
+ if (!(data & RTL8366S_CHIP_CTRL_RESET_HW))
+ break;
+ } while (--timeout);
+
+ if (!timeout) {
+ printk("Timeout waiting for the switch to reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rtl8366s_setup(struct rtl8366_smi *smi)
+{
+ struct rtl8366_platform_data *pdata;
+ int err;
+
+ pdata = smi->parent->platform_data;
+ if (pdata->num_initvals && pdata->initvals) {
+ unsigned i;
+
+ dev_info(smi->parent, "applying initvals\n");
+ for (i = 0; i < pdata->num_initvals; i++)
+ REG_WR(smi, pdata->initvals[i].reg,
+ pdata->initvals[i].val);
+ }
+
+ /* set maximum packet length to 1536 bytes */
+ REG_RMW(smi, RTL8366S_SGCR, RTL8366S_SGCR_MAX_LENGTH_MASK,
+ RTL8366S_SGCR_MAX_LENGTH_1536);
+
+ /* enable learning for all ports */
+ REG_WR(smi, RTL8366S_SSCR0, 0);
+
+ /* enable auto ageing for all ports */
+ REG_WR(smi, RTL8366S_SSCR1, 0);
+
+ /*
+ * discard VLAN tagged packets if the port is not a member of
+ * the VLAN with which the packets is associated.
+ */
+ REG_WR(smi, RTL8366S_VLAN_MEMBERINGRESS_REG, RTL8366S_PORT_ALL);
+
+ /* don't drop packets whose DA has not been learned */
+ REG_RMW(smi, RTL8366S_SSCR2, RTL8366S_SSCR2_DROP_UNKNOWN_DA, 0);
+
+ return 0;
+}
+
+static int rtl8366s_read_phy_reg(struct rtl8366_smi *smi,
+ u32 phy_no, u32 page, u32 addr, u32 *data)
+{
+ u32 reg;
+ int ret;
+
+ if (phy_no > RTL8366S_PHY_NO_MAX)
+ return -EINVAL;
+
+ if (page > RTL8366S_PHY_PAGE_MAX)
+ return -EINVAL;
+
+ if (addr > RTL8366S_PHY_ADDR_MAX)
+ return -EINVAL;
+
+ ret = rtl8366_smi_write_reg(smi, RTL8366S_PHY_ACCESS_CTRL_REG,
+ RTL8366S_PHY_CTRL_READ);
+ if (ret)
+ return ret;
+
+ reg = 0x8000 | (1 << (phy_no + RTL8366S_PHY_NO_OFFSET)) |
+ ((page << RTL8366S_PHY_PAGE_OFFSET) & RTL8366S_PHY_PAGE_MASK) |
+ (addr & RTL8366S_PHY_REG_MASK);
+
+ ret = rtl8366_smi_write_reg(smi, reg, 0);
+ if (ret)
+ return ret;
+
+ ret = rtl8366_smi_read_reg(smi, RTL8366S_PHY_ACCESS_DATA_REG, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8366s_write_phy_reg(struct rtl8366_smi *smi,
+ u32 phy_no, u32 page, u32 addr, u32 data)
+{
+ u32 reg;
+ int ret;
+
+ if (phy_no > RTL8366S_PHY_NO_MAX)
+ return -EINVAL;
+
+ if (page > RTL8366S_PHY_PAGE_MAX)
+ return -EINVAL;
+
+ if (addr > RTL8366S_PHY_ADDR_MAX)
+ return -EINVAL;
+
+ ret = rtl8366_smi_write_reg(smi, RTL8366S_PHY_ACCESS_CTRL_REG,
+ RTL8366S_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+
+ reg = 0x8000 | (1 << (phy_no + RTL8366S_PHY_NO_OFFSET)) |
+ ((page << RTL8366S_PHY_PAGE_OFFSET) & RTL8366S_PHY_PAGE_MASK) |
+ (addr & RTL8366S_PHY_REG_MASK);
+
+ ret = rtl8366_smi_write_reg(smi, reg, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8366_get_mib_counter(struct rtl8366_smi *smi, int counter,
+ int port, unsigned long long *val)
+{
+ int i;
+ int err;
+ u32 addr, data;
+ u64 mibvalue;
+
+ if (port > RTL8366S_NUM_PORTS || counter >= RTL8366S_MIB_COUNT)
+ return -EINVAL;
+
+ switch (rtl8366s_mib_counters[counter].base) {
+ case 0:
+ addr = RTL8366S_MIB_COUNTER_BASE +
+ RTL8366S_MIB_COUNTER_PORT_OFFSET * port;
+ break;
+
+ case 1:
+ addr = RTL8366S_MIB_COUNTER_BASE2 +
+ RTL8366S_MIB_COUNTER_PORT_OFFSET2 * port;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ addr += rtl8366s_mib_counters[counter].offset;
+
+ /*
+ * Writing access counter address first
+ * then ASIC will prepare 64bits counter wait for being retrived
+ */
+ data = 0; /* writing data will be discard by ASIC */
+ err = rtl8366_smi_write_reg(smi, addr, data);
+ if (err)
+ return err;
+
+ /* read MIB control register */
+ err = rtl8366_smi_read_reg(smi, RTL8366S_MIB_CTRL_REG, &data);
+ if (err)
+ return err;
+
+ if (data & RTL8366S_MIB_CTRL_BUSY_MASK)
+ return -EBUSY;
+
+ if (data & RTL8366S_MIB_CTRL_RESET_MASK)
+ return -EIO;
+
+ mibvalue = 0;
+ for (i = rtl8366s_mib_counters[counter].length; i > 0; i--) {
+ err = rtl8366_smi_read_reg(smi, addr + (i - 1), &data);
+ if (err)
+ return err;
+
+ mibvalue = (mibvalue << 16) | (data & 0xFFFF);
+ }
+
+ *val = mibvalue;
+ return 0;
+}
+
+static int rtl8366s_get_vlan_4k(struct rtl8366_smi *smi, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[2];
+ int err;
+ int i;
+
+ memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
+
+ if (vid >= RTL8366S_NUM_VIDS)
+ return -EINVAL;
+
+ /* write VID */
+ err = rtl8366_smi_write_reg(smi, RTL8366S_VLAN_TABLE_WRITE_BASE,
+ vid & RTL8366S_VLAN_VID_MASK);
+ if (err)
+ return err;
+
+ /* write table access control word */
+ err = rtl8366_smi_write_reg(smi, RTL8366S_TABLE_ACCESS_CTRL_REG,
+ RTL8366S_TABLE_VLAN_READ_CTRL);
+ if (err)
+ return err;
+
+ for (i = 0; i < 2; i++) {
+ err = rtl8366_smi_read_reg(smi,
+ RTL8366S_VLAN_TABLE_READ_BASE + i,
+ &data[i]);
+ if (err)
+ return err;
+ }
+
+ vlan4k->vid = vid;
+ vlan4k->untag = (data[1] >> RTL8366S_VLAN_UNTAG_SHIFT) &
+ RTL8366S_VLAN_UNTAG_MASK;
+ vlan4k->member = data[1] & RTL8366S_VLAN_MEMBER_MASK;
+ vlan4k->fid = (data[1] >> RTL8366S_VLAN_FID_SHIFT) &
+ RTL8366S_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366s_set_vlan_4k(struct rtl8366_smi *smi,
+ const struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[2];
+ int err;
+ int i;
+
+ if (vlan4k->vid >= RTL8366S_NUM_VIDS ||
+ vlan4k->member > RTL8366S_VLAN_MEMBER_MASK ||
+ vlan4k->untag > RTL8366S_VLAN_UNTAG_MASK ||
+ vlan4k->fid > RTL8366S_FIDMAX)
+ return -EINVAL;
+
+ data[0] = vlan4k->vid & RTL8366S_VLAN_VID_MASK;
+ data[1] = (vlan4k->member & RTL8366S_VLAN_MEMBER_MASK) |
+ ((vlan4k->untag & RTL8366S_VLAN_UNTAG_MASK) <<
+ RTL8366S_VLAN_UNTAG_SHIFT) |
+ ((vlan4k->fid & RTL8366S_VLAN_FID_MASK) <<
+ RTL8366S_VLAN_FID_SHIFT);
+
+ for (i = 0; i < 2; i++) {
+ err = rtl8366_smi_write_reg(smi,
+ RTL8366S_VLAN_TABLE_WRITE_BASE + i,
+ data[i]);
+ if (err)
+ return err;
+ }
+
+ /* write table access control word */
+ err = rtl8366_smi_write_reg(smi, RTL8366S_TABLE_ACCESS_CTRL_REG,
+ RTL8366S_TABLE_VLAN_WRITE_CTRL);
+
+ return err;
+}
+
+static int rtl8366s_get_vlan_mc(struct rtl8366_smi *smi, u32 index,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[2];
+ int err;
+ int i;
+
+ memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
+
+ if (index >= RTL8366S_NUM_VLANS)
+ return -EINVAL;
+
+ for (i = 0; i < 2; i++) {
+ err = rtl8366_smi_read_reg(smi,
+ RTL8366S_VLAN_MC_BASE(index) + i,
+ &data[i]);
+ if (err)
+ return err;
+ }
+
+ vlanmc->vid = data[0] & RTL8366S_VLAN_VID_MASK;
+ vlanmc->priority = (data[0] >> RTL8366S_VLAN_PRIORITY_SHIFT) &
+ RTL8366S_VLAN_PRIORITY_MASK;
+ vlanmc->untag = (data[1] >> RTL8366S_VLAN_UNTAG_SHIFT) &
+ RTL8366S_VLAN_UNTAG_MASK;
+ vlanmc->member = data[1] & RTL8366S_VLAN_MEMBER_MASK;
+ vlanmc->fid = (data[1] >> RTL8366S_VLAN_FID_SHIFT) &
+ RTL8366S_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366s_set_vlan_mc(struct rtl8366_smi *smi, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[2];
+ int err;
+ int i;
+
+ if (index >= RTL8366S_NUM_VLANS ||
+ vlanmc->vid >= RTL8366S_NUM_VIDS ||
+ vlanmc->priority > RTL8366S_PRIORITYMAX ||
+ vlanmc->member > RTL8366S_VLAN_MEMBER_MASK ||
+ vlanmc->untag > RTL8366S_VLAN_UNTAG_MASK ||
+ vlanmc->fid > RTL8366S_FIDMAX)
+ return -EINVAL;
+
+ data[0] = (vlanmc->vid & RTL8366S_VLAN_VID_MASK) |
+ ((vlanmc->priority & RTL8366S_VLAN_PRIORITY_MASK) <<
+ RTL8366S_VLAN_PRIORITY_SHIFT);
+ data[1] = (vlanmc->member & RTL8366S_VLAN_MEMBER_MASK) |
+ ((vlanmc->untag & RTL8366S_VLAN_UNTAG_MASK) <<
+ RTL8366S_VLAN_UNTAG_SHIFT) |
+ ((vlanmc->fid & RTL8366S_VLAN_FID_MASK) <<
+ RTL8366S_VLAN_FID_SHIFT);
+
+ for (i = 0; i < 2; i++) {
+ err = rtl8366_smi_write_reg(smi,
+ RTL8366S_VLAN_MC_BASE(index) + i,
+ data[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int rtl8366s_get_mc_index(struct rtl8366_smi *smi, int port, int *val)
+{
+ u32 data;
+ int err;
+
+ if (port >= RTL8366S_NUM_PORTS)
+ return -EINVAL;
+
+ err = rtl8366_smi_read_reg(smi, RTL8366S_PORT_VLAN_CTRL_REG(port),
+ &data);
+ if (err)
+ return err;
+
+ *val = (data >> RTL8366S_PORT_VLAN_CTRL_SHIFT(port)) &
+ RTL8366S_PORT_VLAN_CTRL_MASK;
+
+ return 0;
+}
+
+static int rtl8366s_set_mc_index(struct rtl8366_smi *smi, int port, int index)
+{
+ if (port >= RTL8366S_NUM_PORTS || index >= RTL8366S_NUM_VLANS)
+ return -EINVAL;
+
+ return rtl8366_smi_rmwr(smi, RTL8366S_PORT_VLAN_CTRL_REG(port),
+ RTL8366S_PORT_VLAN_CTRL_MASK <<
+ RTL8366S_PORT_VLAN_CTRL_SHIFT(port),
+ (index & RTL8366S_PORT_VLAN_CTRL_MASK) <<
+ RTL8366S_PORT_VLAN_CTRL_SHIFT(port));
+}
+
+static int rtl8366s_enable_vlan(struct rtl8366_smi *smi, int enable)
+{
+ return rtl8366_smi_rmwr(smi, RTL8366S_SGCR, RTL8366S_SGCR_EN_VLAN,
+ (enable) ? RTL8366S_SGCR_EN_VLAN : 0);
+}
+
+static int rtl8366s_enable_vlan4k(struct rtl8366_smi *smi, int enable)
+{
+ return rtl8366_smi_rmwr(smi, RTL8366S_VLAN_TB_CTRL_REG,
+ 1, (enable) ? 1 : 0);
+}
+
+static int rtl8366s_is_vlan_valid(struct rtl8366_smi *smi, unsigned vlan)
+{
+ unsigned max = RTL8366S_NUM_VLANS;
+
+ if (smi->vlan4k_enabled)
+ max = RTL8366S_NUM_VIDS - 1;
+
+ if (vlan == 0 || vlan >= max)
+ return 0;
+
+ return 1;
+}
+
+static int rtl8366s_enable_port(struct rtl8366_smi *smi, int port, int enable)
+{
+ return rtl8366_smi_rmwr(smi, RTL8366S_PECR, (1 << port),
+ (enable) ? 0 : (1 << port));
+}
+
+static int rtl8366s_sw_reset_mibs(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ return rtl8366_smi_rmwr(smi, RTL8366S_MIB_CTRL_REG, 0, (1 << 2));
+}
+
+static int rtl8366s_sw_get_blinkrate(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ rtl8366_smi_read_reg(smi, RTL8366S_LED_BLINKRATE_REG, &data);
+
+ val->value.i = (data & (RTL8366S_LED_BLINKRATE_MASK));
+
+ return 0;
+}
+
+static int rtl8366s_sw_set_blinkrate(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ if (val->value.i >= 6)
+ return -EINVAL;
+
+ return rtl8366_smi_rmwr(smi, RTL8366S_LED_BLINKRATE_REG,
+ RTL8366S_LED_BLINKRATE_MASK,
+ val->value.i);
+}
+
+static int rtl8366s_sw_get_max_length(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ rtl8366_smi_read_reg(smi, RTL8366S_SGCR, &data);
+
+ val->value.i = ((data & (RTL8366S_SGCR_MAX_LENGTH_MASK)) >> 4);
+
+ return 0;
+}
+
+static int rtl8366s_sw_set_max_length(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ char length_code;
+
+ switch (val->value.i) {
+ case 0:
+ length_code = RTL8366S_SGCR_MAX_LENGTH_1522;
+ break;
+ case 1:
+ length_code = RTL8366S_SGCR_MAX_LENGTH_1536;
+ break;
+ case 2:
+ length_code = RTL8366S_SGCR_MAX_LENGTH_1552;
+ break;
+ case 3:
+ length_code = RTL8366S_SGCR_MAX_LENGTH_16000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rtl8366_smi_rmwr(smi, RTL8366S_SGCR,
+ RTL8366S_SGCR_MAX_LENGTH_MASK,
+ length_code);
+}
+
+static int rtl8366s_sw_get_learning_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ rtl8366_smi_read_reg(smi,RTL8366S_SSCR0, &data);
+ val->value.i = !data;
+
+ return 0;
+}
+
+
+static int rtl8366s_sw_set_learning_enable(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 portmask = 0;
+ int err = 0;
+
+ if (!val->value.i)
+ portmask = RTL8366S_PORT_ALL;
+
+ /* set learning for all ports */
+ REG_WR(smi, RTL8366S_SSCR0, portmask);
+
+ /* set auto ageing for all ports */
+ REG_WR(smi, RTL8366S_SSCR1, portmask);
+
+ return 0;
+}
+
+static int rtl8366s_sw_get_port_link(struct switch_dev *dev,
+ int port,
+ struct switch_port_link *link)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data = 0;
+ u32 speed;
+
+ if (port >= RTL8366S_NUM_PORTS)
+ return -EINVAL;
+
+ rtl8366_smi_read_reg(smi, RTL8366S_PORT_LINK_STATUS_BASE + (port / 2),
+ &data);
+
+ if (port % 2)
+ data = data >> 8;
+
+ link->link = !!(data & RTL8366S_PORT_STATUS_LINK_MASK);
+ if (!link->link)
+ return 0;
+
+ link->duplex = !!(data & RTL8366S_PORT_STATUS_DUPLEX_MASK);
+ link->rx_flow = !!(data & RTL8366S_PORT_STATUS_RXPAUSE_MASK);
+ link->tx_flow = !!(data & RTL8366S_PORT_STATUS_TXPAUSE_MASK);
+ link->aneg = !!(data & RTL8366S_PORT_STATUS_AN_MASK);
+
+ speed = (data & RTL8366S_PORT_STATUS_SPEED_MASK);
+ switch (speed) {
+ case 0:
+ link->speed = SWITCH_PORT_SPEED_10;
+ break;
+ case 1:
+ link->speed = SWITCH_PORT_SPEED_100;
+ break;
+ case 2:
+ link->speed = SWITCH_PORT_SPEED_1000;
+ break;
+ default:
+ link->speed = SWITCH_PORT_SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int rtl8366s_sw_set_port_led(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+ u32 mask;
+ u32 reg;
+
+ if (val->port_vlan >= RTL8366S_NUM_PORTS ||
+ (1 << val->port_vlan) == RTL8366S_PORT_UNKNOWN)
+ return -EINVAL;
+
+ if (val->port_vlan == RTL8366S_PORT_NUM_CPU) {
+ reg = RTL8366S_LED_BLINKRATE_REG;
+ mask = 0xF << 4;
+ data = val->value.i << 4;
+ } else {
+ reg = RTL8366S_LED_CTRL_REG;
+ mask = 0xF << (val->port_vlan * 4),
+ data = val->value.i << (val->port_vlan * 4);
+ }
+
+ return rtl8366_smi_rmwr(smi, reg, mask, data);
+}
+
+static int rtl8366s_sw_get_port_led(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data = 0;
+
+ if (val->port_vlan >= RTL8366S_NUM_LEDGROUPS)
+ return -EINVAL;
+
+ rtl8366_smi_read_reg(smi, RTL8366S_LED_CTRL_REG, &data);
+ val->value.i = (data >> (val->port_vlan * 4)) & 0x000F;
+
+ return 0;
+}
+
+static int rtl8366s_sw_reset_port_mibs(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ if (val->port_vlan >= RTL8366S_NUM_PORTS)
+ return -EINVAL;
+
+
+ return rtl8366_smi_rmwr(smi, RTL8366S_MIB_CTRL_REG,
+ 0, (1 << (val->port_vlan + 3)));
+}
+
+static struct switch_attr rtl8366s_globals[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_learning",
+ .description = "Enable learning, enable aging",
+ .set = rtl8366s_sw_set_learning_enable,
+ .get = rtl8366s_sw_get_learning_enable,
+ .max = 1,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "Enable VLAN mode",
+ .set = rtl8366_sw_set_vlan_enable,
+ .get = rtl8366_sw_get_vlan_enable,
+ .max = 1,
+ .ofs = 1
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan4k",
+ .description = "Enable VLAN 4K mode",
+ .set = rtl8366_sw_set_vlan_enable,
+ .get = rtl8366_sw_get_vlan_enable,
+ .max = 1,
+ .ofs = 2
+ }, {
+ .type = SWITCH_TYPE_NOVAL,
+ .name = "reset_mibs",
+ .description = "Reset all MIB counters",
+ .set = rtl8366s_sw_reset_mibs,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "blinkrate",
+ .description = "Get/Set LED blinking rate (0 = 43ms, 1 = 84ms,"
+ " 2 = 120ms, 3 = 170ms, 4 = 340ms, 5 = 670ms)",
+ .set = rtl8366s_sw_set_blinkrate,
+ .get = rtl8366s_sw_get_blinkrate,
+ .max = 5
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "max_length",
+ .description = "Get/Set the maximum length of valid packets"
+ " (0 = 1522, 1 = 1536, 2 = 1552, 3 = 16000 (9216?))",
+ .set = rtl8366s_sw_set_max_length,
+ .get = rtl8366s_sw_get_max_length,
+ .max = 3,
+ },
+};
+
+static struct switch_attr rtl8366s_port[] = {
+ {
+ .type = SWITCH_TYPE_NOVAL,
+ .name = "reset_mib",
+ .description = "Reset single port MIB counters",
+ .set = rtl8366s_sw_reset_port_mibs,
+ }, {
+ .type = SWITCH_TYPE_STRING,
+ .name = "mib",
+ .description = "Get MIB counters for port",
+ .max = 33,
+ .set = NULL,
+ .get = rtl8366_sw_get_port_mib,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "led",
+ .description = "Get/Set port group (0 - 3) led mode (0 - 15)",
+ .max = 15,
+ .set = rtl8366s_sw_set_port_led,
+ .get = rtl8366s_sw_get_port_led,
+ },
+};
+
+static struct switch_attr rtl8366s_vlan[] = {
+ {
+ .type = SWITCH_TYPE_STRING,
+ .name = "info",
+ .description = "Get vlan information",
+ .max = 1,
+ .set = NULL,
+ .get = rtl8366_sw_get_vlan_info,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "fid",
+ .description = "Get/Set vlan FID",
+ .max = RTL8366S_FIDMAX,
+ .set = rtl8366_sw_set_vlan_fid,
+ .get = rtl8366_sw_get_vlan_fid,
+ },
+};
+
+static const struct switch_dev_ops rtl8366_ops = {
+ .attr_global = {
+ .attr = rtl8366s_globals,
+ .n_attr = ARRAY_SIZE(rtl8366s_globals),
+ },
+ .attr_port = {
+ .attr = rtl8366s_port,
+ .n_attr = ARRAY_SIZE(rtl8366s_port),
+ },
+ .attr_vlan = {
+ .attr = rtl8366s_vlan,
+ .n_attr = ARRAY_SIZE(rtl8366s_vlan),
+ },
+
+ .get_vlan_ports = rtl8366_sw_get_vlan_ports,
+ .set_vlan_ports = rtl8366_sw_set_vlan_ports,
+ .get_port_pvid = rtl8366_sw_get_port_pvid,
+ .set_port_pvid = rtl8366_sw_set_port_pvid,
+ .reset_switch = rtl8366_sw_reset_switch,
+ .get_port_link = rtl8366s_sw_get_port_link,
+};
+
+static int rtl8366s_switch_init(struct rtl8366_smi *smi)
+{
+ struct switch_dev *dev = &smi->sw_dev;
+ int err;
+
+ dev->name = "RTL8366S";
+ dev->cpu_port = RTL8366S_PORT_NUM_CPU;
+ dev->ports = RTL8366S_NUM_PORTS;
+ dev->vlans = RTL8366S_NUM_VIDS;
+ dev->ops = &rtl8366_ops;
+ dev->alias = dev_name(smi->parent);
+
+ err = register_switch(dev, NULL);
+ if (err)
+ dev_err(smi->parent, "switch registration failed\n");
+
+ return err;
+}
+
+static void rtl8366s_switch_cleanup(struct rtl8366_smi *smi)
+{
+ unregister_switch(&smi->sw_dev);
+}
+
+static int rtl8366s_mii_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct rtl8366_smi *smi = bus->priv;
+ u32 val = 0;
+ int err;
+
+ err = rtl8366s_read_phy_reg(smi, addr, 0, reg, &val);
+ if (err)
+ return 0xffff;
+
+ return val;
+}
+
+static int rtl8366s_mii_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct rtl8366_smi *smi = bus->priv;
+ u32 t;
+ int err;
+
+ err = rtl8366s_write_phy_reg(smi, addr, 0, reg, val);
+ /* flush write */
+ (void) rtl8366s_read_phy_reg(smi, addr, 0, reg, &t);
+
+ return err;
+}
+
+static int rtl8366s_detect(struct rtl8366_smi *smi)
+{
+ u32 chip_id = 0;
+ u32 chip_ver = 0;
+ int ret;
+
+ ret = rtl8366_smi_read_reg(smi, RTL8366S_CHIP_ID_REG, &chip_id);
+ if (ret) {
+ dev_err(smi->parent, "unable to read chip id\n");
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8366S_CHIP_ID_8366:
+ break;
+ default:
+ dev_err(smi->parent, "unknown chip id (%04x)\n", chip_id);
+ return -ENODEV;
+ }
+
+ ret = rtl8366_smi_read_reg(smi, RTL8366S_CHIP_VERSION_CTRL_REG,
+ &chip_ver);
+ if (ret) {
+ dev_err(smi->parent, "unable to read chip version\n");
+ return ret;
+ }
+
+ dev_info(smi->parent, "RTL%04x ver. %u chip found\n",
+ chip_id, chip_ver & RTL8366S_CHIP_VERSION_MASK);
+
+ return 0;
+}
+
+static struct rtl8366_smi_ops rtl8366s_smi_ops = {
+ .detect = rtl8366s_detect,
+ .reset_chip = rtl8366s_reset_chip,
+ .setup = rtl8366s_setup,
+
+ .mii_read = rtl8366s_mii_read,
+ .mii_write = rtl8366s_mii_write,
+
+ .get_vlan_mc = rtl8366s_get_vlan_mc,
+ .set_vlan_mc = rtl8366s_set_vlan_mc,
+ .get_vlan_4k = rtl8366s_get_vlan_4k,
+ .set_vlan_4k = rtl8366s_set_vlan_4k,
+ .get_mc_index = rtl8366s_get_mc_index,
+ .set_mc_index = rtl8366s_set_mc_index,
+ .get_mib_counter = rtl8366_get_mib_counter,
+ .is_vlan_valid = rtl8366s_is_vlan_valid,
+ .enable_vlan = rtl8366s_enable_vlan,
+ .enable_vlan4k = rtl8366s_enable_vlan4k,
+ .enable_port = rtl8366s_enable_port,
+};
+
+static int __devinit rtl8366s_probe(struct platform_device *pdev)
+{
+ static int rtl8366_smi_version_printed;
+ struct rtl8366_platform_data *pdata;
+ struct rtl8366_smi *smi;
+ int err;
+
+ if (!rtl8366_smi_version_printed++)
+ printk(KERN_NOTICE RTL8366S_DRIVER_DESC
+ " version " RTL8366S_DRIVER_VER"\n");
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ smi = rtl8366_smi_alloc(&pdev->dev);
+ if (!smi) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ smi->gpio_sda = pdata->gpio_sda;
+ smi->gpio_sck = pdata->gpio_sck;
+ smi->hw_reset = pdata->hw_reset;
+
+ smi->clk_delay = 10;
+ smi->cmd_read = 0xa9;
+ smi->cmd_write = 0xa8;
+ smi->ops = &rtl8366s_smi_ops;
+ smi->cpu_port = RTL8366S_PORT_NUM_CPU;
+ smi->num_ports = RTL8366S_NUM_PORTS;
+ smi->num_vlan_mc = RTL8366S_NUM_VLANS;
+ smi->mib_counters = rtl8366s_mib_counters;
+ smi->num_mib_counters = ARRAY_SIZE(rtl8366s_mib_counters);
+
+ err = rtl8366_smi_init(smi);
+ if (err)
+ goto err_free_smi;
+
+ platform_set_drvdata(pdev, smi);
+
+ err = rtl8366s_switch_init(smi);
+ if (err)
+ goto err_clear_drvdata;
+
+ return 0;
+
+ err_clear_drvdata:
+ platform_set_drvdata(pdev, NULL);
+ rtl8366_smi_cleanup(smi);
+ err_free_smi:
+ kfree(smi);
+ err_out:
+ return err;
+}
+
+static int __devexit rtl8366s_remove(struct platform_device *pdev)
+{
+ struct rtl8366_smi *smi = platform_get_drvdata(pdev);
+
+ if (smi) {
+ rtl8366s_switch_cleanup(smi);
+ platform_set_drvdata(pdev, NULL);
+ rtl8366_smi_cleanup(smi);
+ kfree(smi);
+ }
+
+ return 0;
+}
+
+static struct platform_driver rtl8366s_driver = {
+ .driver = {
+ .name = RTL8366S_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = rtl8366s_probe,
+ .remove = __devexit_p(rtl8366s_remove),
+};
+
+static int __init rtl8366s_module_init(void)
+{
+ return platform_driver_register(&rtl8366s_driver);
+}
+module_init(rtl8366s_module_init);
+
+static void __exit rtl8366s_module_exit(void)
+{
+ platform_driver_unregister(&rtl8366s_driver);
+}
+module_exit(rtl8366s_module_exit);
+
+MODULE_DESCRIPTION(RTL8366S_DRIVER_DESC);
+MODULE_VERSION(RTL8366S_DRIVER_VER);
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_AUTHOR("Antti Seppälä <a.seppala@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" RTL8366S_DRIVER_NAME);
diff --git a/target/linux/generic/files/drivers/net/phy/rtl8367.c b/target/linux/generic/files/drivers/net/phy/rtl8367.c
new file mode 100644
index 000000000..ee2a04701
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/rtl8367.c
@@ -0,0 +1,1775 @@
+/*
+ * Platform driver for the Realtek RTL8367R/M ethernet switches
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/rtl8367.h>
+
+#include "rtl8366_smi.h"
+
+#define RTL8367_RESET_DELAY 1000 /* msecs*/
+
+#define RTL8367_PHY_ADDR_MAX 8
+#define RTL8367_PHY_REG_MAX 31
+
+#define RTL8367_VID_MASK 0xffff
+#define RTL8367_FID_MASK 0xfff
+#define RTL8367_UNTAG_MASK 0xffff
+#define RTL8367_MEMBER_MASK 0xffff
+
+#define RTL8367_PORT_CFG_REG(_p) (0x000e + 0x20 * (_p))
+#define RTL8367_PORT_CFG_EGRESS_MODE_SHIFT 4
+#define RTL8367_PORT_CFG_EGRESS_MODE_MASK 0x3
+#define RTL8367_PORT_CFG_EGRESS_MODE_ORIGINAL 0
+#define RTL8367_PORT_CFG_EGRESS_MODE_KEEP 1
+#define RTL8367_PORT_CFG_EGRESS_MODE_PRI 2
+#define RTL8367_PORT_CFG_EGRESS_MODE_REAL 3
+
+#define RTL8367_BYPASS_LINE_RATE_REG 0x03f7
+
+#define RTL8367_TA_CTRL_REG 0x0500
+#define RTL8367_TA_CTRL_STATUS BIT(12)
+#define RTL8367_TA_CTRL_METHOD BIT(5)
+#define RTL8367_TA_CTRL_CMD_SHIFT 4
+#define RTL8367_TA_CTRL_CMD_READ 0
+#define RTL8367_TA_CTRL_CMD_WRITE 1
+#define RTL8367_TA_CTRL_TABLE_SHIFT 0
+#define RTL8367_TA_CTRL_TABLE_ACLRULE 1
+#define RTL8367_TA_CTRL_TABLE_ACLACT 2
+#define RTL8367_TA_CTRL_TABLE_CVLAN 3
+#define RTL8367_TA_CTRL_TABLE_L2 4
+#define RTL8367_TA_CTRL_CVLAN_READ \
+ ((RTL8367_TA_CTRL_CMD_READ << RTL8367_TA_CTRL_CMD_SHIFT) | \
+ RTL8367_TA_CTRL_TABLE_CVLAN)
+#define RTL8367_TA_CTRL_CVLAN_WRITE \
+ ((RTL8367_TA_CTRL_CMD_WRITE << RTL8367_TA_CTRL_CMD_SHIFT) | \
+ RTL8367_TA_CTRL_TABLE_CVLAN)
+
+#define RTL8367_TA_ADDR_REG 0x0501
+#define RTL8367_TA_ADDR_MASK 0x3fff
+
+#define RTL8367_TA_DATA_REG(_x) (0x0503 + (_x))
+#define RTL8367_TA_VLAN_DATA_SIZE 4
+#define RTL8367_TA_VLAN_VID_MASK RTL8367_VID_MASK
+#define RTL8367_TA_VLAN_MEMBER_SHIFT 0
+#define RTL8367_TA_VLAN_MEMBER_MASK RTL8367_MEMBER_MASK
+#define RTL8367_TA_VLAN_FID_SHIFT 0
+#define RTL8367_TA_VLAN_FID_MASK RTL8367_FID_MASK
+#define RTL8367_TA_VLAN_UNTAG1_SHIFT 14
+#define RTL8367_TA_VLAN_UNTAG1_MASK 0x3
+#define RTL8367_TA_VLAN_UNTAG2_SHIFT 0
+#define RTL8367_TA_VLAN_UNTAG2_MASK 0x3fff
+
+#define RTL8367_VLAN_PVID_CTRL_REG(_p) (0x0700 + (_p) / 2)
+#define RTL8367_VLAN_PVID_CTRL_MASK 0x1f
+#define RTL8367_VLAN_PVID_CTRL_SHIFT(_p) (8 * ((_p) % 2))
+
+#define RTL8367_VLAN_MC_BASE(_x) (0x0728 + (_x) * 4)
+#define RTL8367_VLAN_MC_DATA_SIZE 4
+#define RTL8367_VLAN_MC_MEMBER_SHIFT 0
+#define RTL8367_VLAN_MC_MEMBER_MASK RTL8367_MEMBER_MASK
+#define RTL8367_VLAN_MC_FID_SHIFT 0
+#define RTL8367_VLAN_MC_FID_MASK RTL8367_FID_MASK
+#define RTL8367_VLAN_MC_EVID_SHIFT 0
+#define RTL8367_VLAN_MC_EVID_MASK RTL8367_VID_MASK
+
+#define RTL8367_VLAN_CTRL_REG 0x07a8
+#define RTL8367_VLAN_CTRL_ENABLE BIT(0)
+
+#define RTL8367_VLAN_INGRESS_REG 0x07a9
+
+#define RTL8367_PORT_ISOLATION_REG(_p) (0x08a2 + (_p))
+
+#define RTL8367_MIB_COUNTER_REG(_x) (0x1000 + (_x))
+
+#define RTL8367_MIB_ADDRESS_REG 0x1004
+
+#define RTL8367_MIB_CTRL_REG(_x) (0x1005 + (_x))
+#define RTL8367_MIB_CTRL_GLOBAL_RESET_MASK BIT(11)
+#define RTL8367_MIB_CTRL_QM_RESET_MASK BIT(10)
+#define RTL8367_MIB_CTRL_PORT_RESET_MASK(_p) BIT(2 + (_p))
+#define RTL8367_MIB_CTRL_RESET_MASK BIT(1)
+#define RTL8367_MIB_CTRL_BUSY_MASK BIT(0)
+
+#define RTL8367_MIB_COUNT 36
+#define RTL8367_MIB_COUNTER_PORT_OFFSET 0x0050
+
+#define RTL8367_SWC0_REG 0x1200
+#define RTL8367_SWC0_MAX_LENGTH_SHIFT 13
+#define RTL8367_SWC0_MAX_LENGTH(_x) ((_x) << 13)
+#define RTL8367_SWC0_MAX_LENGTH_MASK RTL8367_SWC0_MAX_LENGTH(0x3)
+#define RTL8367_SWC0_MAX_LENGTH_1522 RTL8367_SWC0_MAX_LENGTH(0)
+#define RTL8367_SWC0_MAX_LENGTH_1536 RTL8367_SWC0_MAX_LENGTH(1)
+#define RTL8367_SWC0_MAX_LENGTH_1552 RTL8367_SWC0_MAX_LENGTH(2)
+#define RTL8367_SWC0_MAX_LENGTH_16000 RTL8367_SWC0_MAX_LENGTH(3)
+
+#define RTL8367_CHIP_NUMBER_REG 0x1300
+
+#define RTL8367_CHIP_VER_REG 0x1301
+#define RTL8367_CHIP_VER_RLVID_SHIFT 12
+#define RTL8367_CHIP_VER_RLVID_MASK 0xf
+#define RTL8367_CHIP_VER_MCID_SHIFT 8
+#define RTL8367_CHIP_VER_MCID_MASK 0xf
+#define RTL8367_CHIP_VER_BOID_SHIFT 4
+#define RTL8367_CHIP_VER_BOID_MASK 0xf
+
+#define RTL8367_CHIP_MODE_REG 0x1302
+#define RTL8367_CHIP_MODE_MASK 0x7
+
+#define RTL8367_CHIP_DEBUG0_REG 0x1303
+#define RTL8367_CHIP_DEBUG0_DUMMY0(_x) BIT(8 + (_x))
+
+#define RTL8367_CHIP_DEBUG1_REG 0x1304
+
+#define RTL8367_DIS_REG 0x1305
+#define RTL8367_DIS_SKIP_MII_RXER(_x) BIT(12 + (_x))
+#define RTL8367_DIS_RGMII_SHIFT(_x) (4 * (_x))
+#define RTL8367_DIS_RGMII_MASK 0x7
+
+#define RTL8367_EXT_RGMXF_REG(_x) (0x1306 + (_x))
+#define RTL8367_EXT_RGMXF_DUMMY0_SHIFT 5
+#define RTL8367_EXT_RGMXF_DUMMY0_MASK 0x7ff
+#define RTL8367_EXT_RGMXF_TXDELAY_SHIFT 3
+#define RTL8367_EXT_RGMXF_TXDELAY_MASK 1
+#define RTL8367_EXT_RGMXF_RXDELAY_MASK 0x7
+
+#define RTL8367_DI_FORCE_REG(_x) (0x1310 + (_x))
+#define RTL8367_DI_FORCE_MODE BIT(12)
+#define RTL8367_DI_FORCE_NWAY BIT(7)
+#define RTL8367_DI_FORCE_TXPAUSE BIT(6)
+#define RTL8367_DI_FORCE_RXPAUSE BIT(5)
+#define RTL8367_DI_FORCE_LINK BIT(4)
+#define RTL8367_DI_FORCE_DUPLEX BIT(2)
+#define RTL8367_DI_FORCE_SPEED_MASK 3
+#define RTL8367_DI_FORCE_SPEED_10 0
+#define RTL8367_DI_FORCE_SPEED_100 1
+#define RTL8367_DI_FORCE_SPEED_1000 2
+
+#define RTL8367_MAC_FORCE_REG(_x) (0x1312 + (_x))
+
+#define RTL8367_CHIP_RESET_REG 0x1322
+#define RTL8367_CHIP_RESET_SW BIT(1)
+#define RTL8367_CHIP_RESET_HW BIT(0)
+
+#define RTL8367_PORT_STATUS_REG(_p) (0x1352 + (_p))
+#define RTL8367_PORT_STATUS_NWAY BIT(7)
+#define RTL8367_PORT_STATUS_TXPAUSE BIT(6)
+#define RTL8367_PORT_STATUS_RXPAUSE BIT(5)
+#define RTL8367_PORT_STATUS_LINK BIT(4)
+#define RTL8367_PORT_STATUS_DUPLEX BIT(2)
+#define RTL8367_PORT_STATUS_SPEED_MASK 0x0003
+#define RTL8367_PORT_STATUS_SPEED_10 0
+#define RTL8367_PORT_STATUS_SPEED_100 1
+#define RTL8367_PORT_STATUS_SPEED_1000 2
+
+#define RTL8367_RTL_NO_REG 0x13c0
+#define RTL8367_RTL_NO_8367R 0x3670
+#define RTL8367_RTL_NO_8367M 0x3671
+
+#define RTL8367_RTL_VER_REG 0x13c1
+#define RTL8367_RTL_VER_MASK 0xf
+
+#define RTL8367_RTL_MAGIC_ID_REG 0x13c2
+#define RTL8367_RTL_MAGIC_ID_VAL 0x0249
+
+#define RTL8367_LED_SYS_CONFIG_REG 0x1b00
+#define RTL8367_LED_MODE_REG 0x1b02
+#define RTL8367_LED_MODE_RATE_M 0x7
+#define RTL8367_LED_MODE_RATE_S 1
+
+#define RTL8367_LED_CONFIG_REG 0x1b03
+#define RTL8367_LED_CONFIG_DATA_S 12
+#define RTL8367_LED_CONFIG_DATA_M 0x3
+#define RTL8367_LED_CONFIG_SEL BIT(14)
+#define RTL8367_LED_CONFIG_LED_CFG_M 0xf
+
+#define RTL8367_PARA_LED_IO_EN1_REG 0x1b24
+#define RTL8367_PARA_LED_IO_EN2_REG 0x1b25
+#define RTL8367_PARA_LED_IO_EN_PMASK 0xff
+
+#define RTL8367_IA_CTRL_REG 0x1f00
+#define RTL8367_IA_CTRL_RW(_x) ((_x) << 1)
+#define RTL8367_IA_CTRL_RW_READ RTL8367_IA_CTRL_RW(0)
+#define RTL8367_IA_CTRL_RW_WRITE RTL8367_IA_CTRL_RW(1)
+#define RTL8367_IA_CTRL_CMD_MASK BIT(0)
+
+#define RTL8367_IA_STATUS_REG 0x1f01
+#define RTL8367_IA_STATUS_PHY_BUSY BIT(2)
+#define RTL8367_IA_STATUS_SDS_BUSY BIT(1)
+#define RTL8367_IA_STATUS_MDX_BUSY BIT(0)
+
+#define RTL8367_IA_ADDRESS_REG 0x1f02
+
+#define RTL8367_IA_WRITE_DATA_REG 0x1f03
+#define RTL8367_IA_READ_DATA_REG 0x1f04
+
+#define RTL8367_INTERNAL_PHY_REG(_a, _r) (0x2000 + 32 * (_a) + (_r))
+
+#define RTL8367_CPU_PORT_NUM 9
+#define RTL8367_NUM_PORTS 10
+#define RTL8367_NUM_VLANS 32
+#define RTL8367_NUM_LEDGROUPS 4
+#define RTL8367_NUM_VIDS 4096
+#define RTL8367_PRIORITYMAX 7
+#define RTL8367_FIDMAX 7
+
+#define RTL8367_PORT_0 BIT(0)
+#define RTL8367_PORT_1 BIT(1)
+#define RTL8367_PORT_2 BIT(2)
+#define RTL8367_PORT_3 BIT(3)
+#define RTL8367_PORT_4 BIT(4)
+#define RTL8367_PORT_5 BIT(5)
+#define RTL8367_PORT_6 BIT(6)
+#define RTL8367_PORT_7 BIT(7)
+#define RTL8367_PORT_E1 BIT(8) /* external port 1 */
+#define RTL8367_PORT_E0 BIT(9) /* external port 0 */
+
+#define RTL8367_PORTS_ALL \
+ (RTL8367_PORT_0 | RTL8367_PORT_1 | RTL8367_PORT_2 | \
+ RTL8367_PORT_3 | RTL8367_PORT_4 | RTL8367_PORT_5 | \
+ RTL8367_PORT_6 | RTL8367_PORT_7 | RTL8367_PORT_E1 | \
+ RTL8367_PORT_E0)
+
+#define RTL8367_PORTS_ALL_BUT_CPU \
+ (RTL8367_PORT_0 | RTL8367_PORT_1 | RTL8367_PORT_2 | \
+ RTL8367_PORT_3 | RTL8367_PORT_4 | RTL8367_PORT_5 | \
+ RTL8367_PORT_6 | RTL8367_PORT_7 | RTL8367_PORT_E1)
+
+struct rtl8367_initval {
+ u16 reg;
+ u16 val;
+};
+
+static struct rtl8366_mib_counter rtl8367_mib_counters[] = {
+ { 0, 0, 4, "IfInOctets" },
+ { 0, 4, 2, "Dot3StatsFCSErrors" },
+ { 0, 6, 2, "Dot3StatsSymbolErrors" },
+ { 0, 8, 2, "Dot3InPauseFrames" },
+ { 0, 10, 2, "Dot3ControlInUnknownOpcodes" },
+ { 0, 12, 2, "EtherStatsFragments" },
+ { 0, 14, 2, "EtherStatsJabbers" },
+ { 0, 16, 2, "IfInUcastPkts" },
+ { 0, 18, 2, "EtherStatsDropEvents" },
+ { 0, 20, 4, "EtherStatsOctets" },
+
+ { 0, 24, 2, "EtherStatsUnderSizePkts" },
+ { 0, 26, 2, "EtherOversizeStats" },
+ { 0, 28, 2, "EtherStatsPkts64Octets" },
+ { 0, 30, 2, "EtherStatsPkts65to127Octets" },
+ { 0, 32, 2, "EtherStatsPkts128to255Octets" },
+ { 0, 34, 2, "EtherStatsPkts256to511Octets" },
+ { 0, 36, 2, "EtherStatsPkts512to1023Octets" },
+ { 0, 38, 2, "EtherStatsPkts1024to1518Octets" },
+ { 0, 40, 2, "EtherStatsMulticastPkts" },
+ { 0, 42, 2, "EtherStatsBroadcastPkts" },
+
+ { 0, 44, 4, "IfOutOctets" },
+
+ { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
+ { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
+ { 0, 52, 2, "Dot3sDeferredTransmissions" },
+ { 0, 54, 2, "Dot3StatsLateCollisions" },
+ { 0, 56, 2, "EtherStatsCollisions" },
+ { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
+ { 0, 60, 2, "Dot3OutPauseFrames" },
+ { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
+ { 0, 64, 2, "Dot1dTpPortInDiscards" },
+ { 0, 66, 2, "IfOutUcastPkts" },
+ { 0, 68, 2, "IfOutMulticastPkts" },
+ { 0, 70, 2, "IfOutBroadcastPkts" },
+ { 0, 72, 2, "OutOampduPkts" },
+ { 0, 74, 2, "InOampduPkts" },
+ { 0, 76, 2, "PktgenPkts" },
+};
+
+#define REG_RD(_smi, _reg, _val) \
+ do { \
+ err = rtl8366_smi_read_reg(_smi, _reg, _val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+#define REG_WR(_smi, _reg, _val) \
+ do { \
+ err = rtl8366_smi_write_reg(_smi, _reg, _val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+#define REG_RMW(_smi, _reg, _mask, _val) \
+ do { \
+ err = rtl8366_smi_rmwr(_smi, _reg, _mask, _val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+static const struct rtl8367_initval rtl8367_initvals_0_0[] = {
+ {0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0000}, {0x2215, 0x1006},
+ {0x221f, 0x0005}, {0x2200, 0x00c6}, {0x221f, 0x0007}, {0x221e, 0x0048},
+ {0x2215, 0x6412}, {0x2216, 0x6412}, {0x2217, 0x6412}, {0x2218, 0x6412},
+ {0x2219, 0x6412}, {0x221A, 0x6412}, {0x221f, 0x0001}, {0x220c, 0xdbf0},
+ {0x2209, 0x2576}, {0x2207, 0x287E}, {0x220A, 0x68E5}, {0x221D, 0x3DA4},
+ {0x221C, 0xE7F7}, {0x2214, 0x7F52}, {0x2218, 0x7FCE}, {0x2208, 0x04B7},
+ {0x2206, 0x4072}, {0x2210, 0xF05E}, {0x221B, 0xB414}, {0x221F, 0x0003},
+ {0x221A, 0x06A6}, {0x2210, 0xF05E}, {0x2213, 0x06EB}, {0x2212, 0xF4D2},
+ {0x220E, 0xE120}, {0x2200, 0x7C00}, {0x2202, 0x5FD0}, {0x220D, 0x0207},
+ {0x221f, 0x0002}, {0x2205, 0x0978}, {0x2202, 0x8C01}, {0x2207, 0x3620},
+ {0x221C, 0x0001}, {0x2203, 0x0420}, {0x2204, 0x80C8}, {0x133e, 0x0ede},
+ {0x221f, 0x0002}, {0x220c, 0x0073}, {0x220d, 0xEB65}, {0x220e, 0x51d1},
+ {0x220f, 0x5dcb}, {0x2210, 0x3044}, {0x2211, 0x1800}, {0x2212, 0x7E00},
+ {0x2213, 0x0000}, {0x133f, 0x0010}, {0x133e, 0x0ffe}, {0x207f, 0x0002},
+ {0x2074, 0x3D22}, {0x2075, 0x2000}, {0x2076, 0x6040}, {0x2077, 0x0000},
+ {0x2078, 0x0f0a}, {0x2079, 0x50AB}, {0x207a, 0x0000}, {0x207b, 0x0f0f},
+ {0x205f, 0x0002}, {0x2054, 0xFF00}, {0x2055, 0x000A}, {0x2056, 0x000A},
+ {0x2057, 0x0005}, {0x2058, 0x0005}, {0x2059, 0x0000}, {0x205A, 0x0005},
+ {0x205B, 0x0005}, {0x205C, 0x0005}, {0x209f, 0x0002}, {0x2094, 0x00AA},
+ {0x2095, 0x00AA}, {0x2096, 0x00AA}, {0x2097, 0x00AA}, {0x2098, 0x0055},
+ {0x2099, 0x00AA}, {0x209A, 0x00AA}, {0x209B, 0x00AA}, {0x1363, 0x8354},
+ {0x1270, 0x3333}, {0x1271, 0x3333}, {0x1272, 0x3333}, {0x1330, 0x00DB},
+ {0x1203, 0xff00}, {0x1200, 0x7fc4}, {0x121d, 0x1006}, {0x121e, 0x03e8},
+ {0x121f, 0x02b3}, {0x1220, 0x028f}, {0x1221, 0x029b}, {0x1222, 0x0277},
+ {0x1223, 0x02b3}, {0x1224, 0x028f}, {0x1225, 0x029b}, {0x1226, 0x0277},
+ {0x1227, 0x00c0}, {0x1228, 0x00b4}, {0x122f, 0x00c0}, {0x1230, 0x00b4},
+ {0x1229, 0x0020}, {0x122a, 0x000c}, {0x1231, 0x0030}, {0x1232, 0x0024},
+ {0x0219, 0x0032}, {0x0200, 0x03e8}, {0x0201, 0x03e8}, {0x0202, 0x03e8},
+ {0x0203, 0x03e8}, {0x0204, 0x03e8}, {0x0205, 0x03e8}, {0x0206, 0x03e8},
+ {0x0207, 0x03e8}, {0x0218, 0x0032}, {0x0208, 0x029b}, {0x0209, 0x029b},
+ {0x020a, 0x029b}, {0x020b, 0x029b}, {0x020c, 0x029b}, {0x020d, 0x029b},
+ {0x020e, 0x029b}, {0x020f, 0x029b}, {0x0210, 0x029b}, {0x0211, 0x029b},
+ {0x0212, 0x029b}, {0x0213, 0x029b}, {0x0214, 0x029b}, {0x0215, 0x029b},
+ {0x0216, 0x029b}, {0x0217, 0x029b}, {0x0900, 0x0000}, {0x0901, 0x0000},
+ {0x0902, 0x0000}, {0x0903, 0x0000}, {0x0865, 0x3210}, {0x087b, 0x0000},
+ {0x087c, 0xff00}, {0x087d, 0x0000}, {0x087e, 0x0000}, {0x0801, 0x0100},
+ {0x0802, 0x0100}, {0x1700, 0x014C}, {0x0301, 0x00FF}, {0x12AA, 0x0096},
+ {0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0005}, {0x2200, 0x00C4},
+ {0x221f, 0x0000}, {0x2210, 0x05EF}, {0x2204, 0x05E1}, {0x2200, 0x1340},
+ {0x133f, 0x0010}, {0x20A0, 0x1940}, {0x20C0, 0x1940}, {0x20E0, 0x1940},
+};
+
+static const struct rtl8367_initval rtl8367_initvals_0_1[] = {
+ {0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0000}, {0x2215, 0x1006},
+ {0x221f, 0x0005}, {0x2200, 0x00c6}, {0x221f, 0x0007}, {0x221e, 0x0048},
+ {0x2215, 0x6412}, {0x2216, 0x6412}, {0x2217, 0x6412}, {0x2218, 0x6412},
+ {0x2219, 0x6412}, {0x221A, 0x6412}, {0x221f, 0x0001}, {0x220c, 0xdbf0},
+ {0x2209, 0x2576}, {0x2207, 0x287E}, {0x220A, 0x68E5}, {0x221D, 0x3DA4},
+ {0x221C, 0xE7F7}, {0x2214, 0x7F52}, {0x2218, 0x7FCE}, {0x2208, 0x04B7},
+ {0x2206, 0x4072}, {0x2210, 0xF05E}, {0x221B, 0xB414}, {0x221F, 0x0003},
+ {0x221A, 0x06A6}, {0x2210, 0xF05E}, {0x2213, 0x06EB}, {0x2212, 0xF4D2},
+ {0x220E, 0xE120}, {0x2200, 0x7C00}, {0x2202, 0x5FD0}, {0x220D, 0x0207},
+ {0x221f, 0x0002}, {0x2205, 0x0978}, {0x2202, 0x8C01}, {0x2207, 0x3620},
+ {0x221C, 0x0001}, {0x2203, 0x0420}, {0x2204, 0x80C8}, {0x133e, 0x0ede},
+ {0x221f, 0x0002}, {0x220c, 0x0073}, {0x220d, 0xEB65}, {0x220e, 0x51d1},
+ {0x220f, 0x5dcb}, {0x2210, 0x3044}, {0x2211, 0x1800}, {0x2212, 0x7E00},
+ {0x2213, 0x0000}, {0x133f, 0x0010}, {0x133e, 0x0ffe}, {0x207f, 0x0002},
+ {0x2074, 0x3D22}, {0x2075, 0x2000}, {0x2076, 0x6040}, {0x2077, 0x0000},
+ {0x2078, 0x0f0a}, {0x2079, 0x50AB}, {0x207a, 0x0000}, {0x207b, 0x0f0f},
+ {0x205f, 0x0002}, {0x2054, 0xFF00}, {0x2055, 0x000A}, {0x2056, 0x000A},
+ {0x2057, 0x0005}, {0x2058, 0x0005}, {0x2059, 0x0000}, {0x205A, 0x0005},
+ {0x205B, 0x0005}, {0x205C, 0x0005}, {0x209f, 0x0002}, {0x2094, 0x00AA},
+ {0x2095, 0x00AA}, {0x2096, 0x00AA}, {0x2097, 0x00AA}, {0x2098, 0x0055},
+ {0x2099, 0x00AA}, {0x209A, 0x00AA}, {0x209B, 0x00AA}, {0x1363, 0x8354},
+ {0x1270, 0x3333}, {0x1271, 0x3333}, {0x1272, 0x3333}, {0x1330, 0x00DB},
+ {0x1203, 0xff00}, {0x1200, 0x7fc4}, {0x121d, 0x1b06}, {0x121e, 0x07f0},
+ {0x121f, 0x0438}, {0x1220, 0x040f}, {0x1221, 0x040f}, {0x1222, 0x03eb},
+ {0x1223, 0x0438}, {0x1224, 0x040f}, {0x1225, 0x040f}, {0x1226, 0x03eb},
+ {0x1227, 0x0144}, {0x1228, 0x0138}, {0x122f, 0x0144}, {0x1230, 0x0138},
+ {0x1229, 0x0020}, {0x122a, 0x000c}, {0x1231, 0x0030}, {0x1232, 0x0024},
+ {0x0219, 0x0032}, {0x0200, 0x07d0}, {0x0201, 0x07d0}, {0x0202, 0x07d0},
+ {0x0203, 0x07d0}, {0x0204, 0x07d0}, {0x0205, 0x07d0}, {0x0206, 0x07d0},
+ {0x0207, 0x07d0}, {0x0218, 0x0032}, {0x0208, 0x0190}, {0x0209, 0x0190},
+ {0x020a, 0x0190}, {0x020b, 0x0190}, {0x020c, 0x0190}, {0x020d, 0x0190},
+ {0x020e, 0x0190}, {0x020f, 0x0190}, {0x0210, 0x0190}, {0x0211, 0x0190},
+ {0x0212, 0x0190}, {0x0213, 0x0190}, {0x0214, 0x0190}, {0x0215, 0x0190},
+ {0x0216, 0x0190}, {0x0217, 0x0190}, {0x0900, 0x0000}, {0x0901, 0x0000},
+ {0x0902, 0x0000}, {0x0903, 0x0000}, {0x0865, 0x3210}, {0x087b, 0x0000},
+ {0x087c, 0xff00}, {0x087d, 0x0000}, {0x087e, 0x0000}, {0x0801, 0x0100},
+ {0x0802, 0x0100}, {0x1700, 0x0125}, {0x0301, 0x00FF}, {0x12AA, 0x0096},
+ {0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0005}, {0x2200, 0x00C4},
+ {0x221f, 0x0000}, {0x2210, 0x05EF}, {0x2204, 0x05E1}, {0x2200, 0x1340},
+ {0x133f, 0x0010},
+};
+
+static const struct rtl8367_initval rtl8367_initvals_1_0[] = {
+ {0x1B24, 0x0000}, {0x1B25, 0x0000}, {0x1B26, 0x0000}, {0x1B27, 0x0000},
+ {0x207F, 0x0002}, {0x2079, 0x0200}, {0x207F, 0x0000}, {0x133F, 0x0030},
+ {0x133E, 0x000E}, {0x221F, 0x0005}, {0x2201, 0x0700}, {0x2205, 0x8B82},
+ {0x2206, 0x05CB}, {0x221F, 0x0002}, {0x2204, 0x80C2}, {0x2205, 0x0938},
+ {0x221F, 0x0003}, {0x2212, 0xC4D2}, {0x220D, 0x0207}, {0x221F, 0x0001},
+ {0x2207, 0x267E}, {0x221C, 0xE5F7}, {0x221B, 0x0424}, {0x221F, 0x0007},
+ {0x221E, 0x0040}, {0x2218, 0x0000}, {0x221F, 0x0007}, {0x221E, 0x002C},
+ {0x2218, 0x008B}, {0x221F, 0x0005}, {0x2205, 0xFFF6}, {0x2206, 0x0080},
+ {0x2205, 0x8000}, {0x2206, 0xF8E0}, {0x2206, 0xE000}, {0x2206, 0xE1E0},
+ {0x2206, 0x01AC}, {0x2206, 0x2408}, {0x2206, 0xE08B}, {0x2206, 0x84F7},
+ {0x2206, 0x20E4}, {0x2206, 0x8B84}, {0x2206, 0xFC05}, {0x2206, 0xF8FA},
+ {0x2206, 0xEF69}, {0x2206, 0xE08B}, {0x2206, 0x86AC}, {0x2206, 0x201A},
+ {0x2206, 0xBF80}, {0x2206, 0x59D0}, {0x2206, 0x2402}, {0x2206, 0x803D},
+ {0x2206, 0xE0E0}, {0x2206, 0xE4E1}, {0x2206, 0xE0E5}, {0x2206, 0x5806},
+ {0x2206, 0x68C0}, {0x2206, 0xD1D2}, {0x2206, 0xE4E0}, {0x2206, 0xE4E5},
+ {0x2206, 0xE0E5}, {0x2206, 0xEF96}, {0x2206, 0xFEFC}, {0x2206, 0x05FB},
+ {0x2206, 0x0BFB}, {0x2206, 0x58FF}, {0x2206, 0x9E11}, {0x2206, 0x06F0},
+ {0x2206, 0x0C81}, {0x2206, 0x8AE0}, {0x2206, 0x0019}, {0x2206, 0x1B89},
+ {0x2206, 0xCFEB}, {0x2206, 0x19EB}, {0x2206, 0x19B0}, {0x2206, 0xEFFF},
+ {0x2206, 0x0BFF}, {0x2206, 0x0425}, {0x2206, 0x0807}, {0x2206, 0x2640},
+ {0x2206, 0x7227}, {0x2206, 0x267E}, {0x2206, 0x2804}, {0x2206, 0xB729},
+ {0x2206, 0x2576}, {0x2206, 0x2A68}, {0x2206, 0xE52B}, {0x2206, 0xAD00},
+ {0x2206, 0x2CDB}, {0x2206, 0xF02D}, {0x2206, 0x67BB}, {0x2206, 0x2E7B},
+ {0x2206, 0x0F2F}, {0x2206, 0x7365}, {0x2206, 0x31AC}, {0x2206, 0xCC32},
+ {0x2206, 0x2300}, {0x2206, 0x332D}, {0x2206, 0x1734}, {0x2206, 0x7F52},
+ {0x2206, 0x3510}, {0x2206, 0x0036}, {0x2206, 0x0600}, {0x2206, 0x370C},
+ {0x2206, 0xC038}, {0x2206, 0x7FCE}, {0x2206, 0x3CE5}, {0x2206, 0xF73D},
+ {0x2206, 0x3DA4}, {0x2206, 0x6530}, {0x2206, 0x3E67}, {0x2206, 0x0053},
+ {0x2206, 0x69D2}, {0x2206, 0x0F6A}, {0x2206, 0x012C}, {0x2206, 0x6C2B},
+ {0x2206, 0x136E}, {0x2206, 0xE100}, {0x2206, 0x6F12}, {0x2206, 0xF771},
+ {0x2206, 0x006B}, {0x2206, 0x7306}, {0x2206, 0xEB74}, {0x2206, 0x94C7},
+ {0x2206, 0x7698}, {0x2206, 0x0A77}, {0x2206, 0x5000}, {0x2206, 0x788A},
+ {0x2206, 0x1579}, {0x2206, 0x7F6F}, {0x2206, 0x7A06}, {0x2206, 0xA600},
+ {0x2205, 0x8B90}, {0x2206, 0x8000}, {0x2205, 0x8B92}, {0x2206, 0x8000},
+ {0x2205, 0x8B94}, {0x2206, 0x8014}, {0x2208, 0xFFFA}, {0x2202, 0x3C65},
+ {0x2205, 0xFFF6}, {0x2206, 0x00F7}, {0x221F, 0x0000}, {0x221F, 0x0007},
+ {0x221E, 0x0042}, {0x2218, 0x0000}, {0x221E, 0x002D}, {0x2218, 0xF010},
+ {0x221E, 0x0020}, {0x2215, 0x0000}, {0x221E, 0x0023}, {0x2216, 0x8000},
+ {0x221F, 0x0000}, {0x133F, 0x0010}, {0x133E, 0x0FFE}, {0x1362, 0x0115},
+ {0x1363, 0x0002}, {0x1363, 0x0000}, {0x1306, 0x000C}, {0x1307, 0x000C},
+ {0x1303, 0x0067}, {0x1304, 0x4444}, {0x1203, 0xFF00}, {0x1200, 0x7FC4},
+ {0x121D, 0x7D16}, {0x121E, 0x03E8}, {0x121F, 0x024E}, {0x1220, 0x0230},
+ {0x1221, 0x0244}, {0x1222, 0x0226}, {0x1223, 0x024E}, {0x1224, 0x0230},
+ {0x1225, 0x0244}, {0x1226, 0x0226}, {0x1227, 0x00C0}, {0x1228, 0x00B4},
+ {0x122F, 0x00C0}, {0x1230, 0x00B4}, {0x0208, 0x03E8}, {0x0209, 0x03E8},
+ {0x020A, 0x03E8}, {0x020B, 0x03E8}, {0x020C, 0x03E8}, {0x020D, 0x03E8},
+ {0x020E, 0x03E8}, {0x020F, 0x03E8}, {0x0210, 0x03E8}, {0x0211, 0x03E8},
+ {0x0212, 0x03E8}, {0x0213, 0x03E8}, {0x0214, 0x03E8}, {0x0215, 0x03E8},
+ {0x0216, 0x03E8}, {0x0217, 0x03E8}, {0x0900, 0x0000}, {0x0901, 0x0000},
+ {0x0902, 0x0000}, {0x0903, 0x0000}, {0x0865, 0x3210}, {0x087B, 0x0000},
+ {0x087C, 0xFF00}, {0x087D, 0x0000}, {0x087E, 0x0000}, {0x0801, 0x0100},
+ {0x0802, 0x0100}, {0x0A20, 0x2040}, {0x0A21, 0x2040}, {0x0A22, 0x2040},
+ {0x0A23, 0x2040}, {0x0A24, 0x2040}, {0x0A28, 0x2040}, {0x0A29, 0x2040},
+ {0x133F, 0x0030}, {0x133E, 0x000E}, {0x221F, 0x0000}, {0x2200, 0x1340},
+ {0x221F, 0x0000}, {0x133F, 0x0010}, {0x133E, 0x0FFE}, {0x20A0, 0x1940},
+ {0x20C0, 0x1940}, {0x20E0, 0x1940}, {0x130c, 0x0050},
+};
+
+static const struct rtl8367_initval rtl8367_initvals_1_1[] = {
+ {0x1B24, 0x0000}, {0x1B25, 0x0000}, {0x1B26, 0x0000}, {0x1B27, 0x0000},
+ {0x207F, 0x0002}, {0x2079, 0x0200}, {0x207F, 0x0000}, {0x133F, 0x0030},
+ {0x133E, 0x000E}, {0x221F, 0x0005}, {0x2201, 0x0700}, {0x2205, 0x8B82},
+ {0x2206, 0x05CB}, {0x221F, 0x0002}, {0x2204, 0x80C2}, {0x2205, 0x0938},
+ {0x221F, 0x0003}, {0x2212, 0xC4D2}, {0x220D, 0x0207}, {0x221F, 0x0001},
+ {0x2207, 0x267E}, {0x221C, 0xE5F7}, {0x221B, 0x0424}, {0x221F, 0x0007},
+ {0x221E, 0x0040}, {0x2218, 0x0000}, {0x221F, 0x0007}, {0x221E, 0x002C},
+ {0x2218, 0x008B}, {0x221F, 0x0005}, {0x2205, 0xFFF6}, {0x2206, 0x0080},
+ {0x2205, 0x8000}, {0x2206, 0xF8E0}, {0x2206, 0xE000}, {0x2206, 0xE1E0},
+ {0x2206, 0x01AC}, {0x2206, 0x2408}, {0x2206, 0xE08B}, {0x2206, 0x84F7},
+ {0x2206, 0x20E4}, {0x2206, 0x8B84}, {0x2206, 0xFC05}, {0x2206, 0xF8FA},
+ {0x2206, 0xEF69}, {0x2206, 0xE08B}, {0x2206, 0x86AC}, {0x2206, 0x201A},
+ {0x2206, 0xBF80}, {0x2206, 0x59D0}, {0x2206, 0x2402}, {0x2206, 0x803D},
+ {0x2206, 0xE0E0}, {0x2206, 0xE4E1}, {0x2206, 0xE0E5}, {0x2206, 0x5806},
+ {0x2206, 0x68C0}, {0x2206, 0xD1D2}, {0x2206, 0xE4E0}, {0x2206, 0xE4E5},
+ {0x2206, 0xE0E5}, {0x2206, 0xEF96}, {0x2206, 0xFEFC}, {0x2206, 0x05FB},
+ {0x2206, 0x0BFB}, {0x2206, 0x58FF}, {0x2206, 0x9E11}, {0x2206, 0x06F0},
+ {0x2206, 0x0C81}, {0x2206, 0x8AE0}, {0x2206, 0x0019}, {0x2206, 0x1B89},
+ {0x2206, 0xCFEB}, {0x2206, 0x19EB}, {0x2206, 0x19B0}, {0x2206, 0xEFFF},
+ {0x2206, 0x0BFF}, {0x2206, 0x0425}, {0x2206, 0x0807}, {0x2206, 0x2640},
+ {0x2206, 0x7227}, {0x2206, 0x267E}, {0x2206, 0x2804}, {0x2206, 0xB729},
+ {0x2206, 0x2576}, {0x2206, 0x2A68}, {0x2206, 0xE52B}, {0x2206, 0xAD00},
+ {0x2206, 0x2CDB}, {0x2206, 0xF02D}, {0x2206, 0x67BB}, {0x2206, 0x2E7B},
+ {0x2206, 0x0F2F}, {0x2206, 0x7365}, {0x2206, 0x31AC}, {0x2206, 0xCC32},
+ {0x2206, 0x2300}, {0x2206, 0x332D}, {0x2206, 0x1734}, {0x2206, 0x7F52},
+ {0x2206, 0x3510}, {0x2206, 0x0036}, {0x2206, 0x0600}, {0x2206, 0x370C},
+ {0x2206, 0xC038}, {0x2206, 0x7FCE}, {0x2206, 0x3CE5}, {0x2206, 0xF73D},
+ {0x2206, 0x3DA4}, {0x2206, 0x6530}, {0x2206, 0x3E67}, {0x2206, 0x0053},
+ {0x2206, 0x69D2}, {0x2206, 0x0F6A}, {0x2206, 0x012C}, {0x2206, 0x6C2B},
+ {0x2206, 0x136E}, {0x2206, 0xE100}, {0x2206, 0x6F12}, {0x2206, 0xF771},
+ {0x2206, 0x006B}, {0x2206, 0x7306}, {0x2206, 0xEB74}, {0x2206, 0x94C7},
+ {0x2206, 0x7698}, {0x2206, 0x0A77}, {0x2206, 0x5000}, {0x2206, 0x788A},
+ {0x2206, 0x1579}, {0x2206, 0x7F6F}, {0x2206, 0x7A06}, {0x2206, 0xA600},
+ {0x2205, 0x8B90}, {0x2206, 0x8000}, {0x2205, 0x8B92}, {0x2206, 0x8000},
+ {0x2205, 0x8B94}, {0x2206, 0x8014}, {0x2208, 0xFFFA}, {0x2202, 0x3C65},
+ {0x2205, 0xFFF6}, {0x2206, 0x00F7}, {0x221F, 0x0000}, {0x221F, 0x0007},
+ {0x221E, 0x0042}, {0x2218, 0x0000}, {0x221E, 0x002D}, {0x2218, 0xF010},
+ {0x221E, 0x0020}, {0x2215, 0x0000}, {0x221E, 0x0023}, {0x2216, 0x8000},
+ {0x221F, 0x0000}, {0x133F, 0x0010}, {0x133E, 0x0FFE}, {0x1362, 0x0115},
+ {0x1363, 0x0002}, {0x1363, 0x0000}, {0x1306, 0x000C}, {0x1307, 0x000C},
+ {0x1303, 0x0067}, {0x1304, 0x4444}, {0x1203, 0xFF00}, {0x1200, 0x7FC4},
+ {0x0900, 0x0000}, {0x0901, 0x0000}, {0x0902, 0x0000}, {0x0903, 0x0000},
+ {0x0865, 0x3210}, {0x087B, 0x0000}, {0x087C, 0xFF00}, {0x087D, 0x0000},
+ {0x087E, 0x0000}, {0x0801, 0x0100}, {0x0802, 0x0100}, {0x0A20, 0x2040},
+ {0x0A21, 0x2040}, {0x0A22, 0x2040}, {0x0A23, 0x2040}, {0x0A24, 0x2040},
+ {0x0A25, 0x2040}, {0x0A26, 0x2040}, {0x0A27, 0x2040}, {0x0A28, 0x2040},
+ {0x0A29, 0x2040}, {0x133F, 0x0030}, {0x133E, 0x000E}, {0x221F, 0x0000},
+ {0x2200, 0x1340}, {0x221F, 0x0000}, {0x133F, 0x0010}, {0x133E, 0x0FFE},
+ {0x1B03, 0x0876},
+};
+
+static const struct rtl8367_initval rtl8367_initvals_2_0[] = {
+ {0x1b24, 0x0000}, {0x1b25, 0x0000}, {0x1b26, 0x0000}, {0x1b27, 0x0000},
+ {0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0007}, {0x221e, 0x0048},
+ {0x2219, 0x4012}, {0x221f, 0x0003}, {0x2201, 0x3554}, {0x2202, 0x63e8},
+ {0x2203, 0x99c2}, {0x2204, 0x0113}, {0x2205, 0x303e}, {0x220d, 0x0207},
+ {0x220e, 0xe100}, {0x221f, 0x0007}, {0x221e, 0x0040}, {0x2218, 0x0000},
+ {0x221f, 0x0007}, {0x221e, 0x002c}, {0x2218, 0x008b}, {0x221f, 0x0005},
+ {0x2205, 0xfff6}, {0x2206, 0x0080}, {0x221f, 0x0005}, {0x2205, 0x8000},
+ {0x2206, 0x0280}, {0x2206, 0x2bf7}, {0x2206, 0x00e0}, {0x2206, 0xfff7},
+ {0x2206, 0xa080}, {0x2206, 0x02ae}, {0x2206, 0xf602}, {0x2206, 0x804e},
+ {0x2206, 0x0201}, {0x2206, 0x5002}, {0x2206, 0x0163}, {0x2206, 0x0201},
+ {0x2206, 0x79e0}, {0x2206, 0x8b8c}, {0x2206, 0xe18b}, {0x2206, 0x8d1e},
+ {0x2206, 0x01e1}, {0x2206, 0x8b8e}, {0x2206, 0x1e01}, {0x2206, 0xa000},
+ {0x2206, 0xe4ae}, {0x2206, 0xd8bf}, {0x2206, 0x8b88}, {0x2206, 0xec00},
+ {0x2206, 0x19a9}, {0x2206, 0x8b90}, {0x2206, 0xf9ee}, {0x2206, 0xfff6},
+ {0x2206, 0x00ee}, {0x2206, 0xfff7}, {0x2206, 0xfce0}, {0x2206, 0xe140},
+ {0x2206, 0xe1e1}, {0x2206, 0x41f7}, {0x2206, 0x2ff6}, {0x2206, 0x28e4},
+ {0x2206, 0xe140}, {0x2206, 0xe5e1}, {0x2206, 0x4104}, {0x2206, 0xf8fa},
+ {0x2206, 0xef69}, {0x2206, 0xe08b}, {0x2206, 0x86ac}, {0x2206, 0x201a},
+ {0x2206, 0xbf80}, {0x2206, 0x77d0}, {0x2206, 0x6c02}, {0x2206, 0x2978},
+ {0x2206, 0xe0e0}, {0x2206, 0xe4e1}, {0x2206, 0xe0e5}, {0x2206, 0x5806},
+ {0x2206, 0x68c0}, {0x2206, 0xd1d2}, {0x2206, 0xe4e0}, {0x2206, 0xe4e5},
+ {0x2206, 0xe0e5}, {0x2206, 0xef96}, {0x2206, 0xfefc}, {0x2206, 0x0425},
+ {0x2206, 0x0807}, {0x2206, 0x2640}, {0x2206, 0x7227}, {0x2206, 0x267e},
+ {0x2206, 0x2804}, {0x2206, 0xb729}, {0x2206, 0x2576}, {0x2206, 0x2a68},
+ {0x2206, 0xe52b}, {0x2206, 0xad00}, {0x2206, 0x2cdb}, {0x2206, 0xf02d},
+ {0x2206, 0x67bb}, {0x2206, 0x2e7b}, {0x2206, 0x0f2f}, {0x2206, 0x7365},
+ {0x2206, 0x31ac}, {0x2206, 0xcc32}, {0x2206, 0x2300}, {0x2206, 0x332d},
+ {0x2206, 0x1734}, {0x2206, 0x7f52}, {0x2206, 0x3510}, {0x2206, 0x0036},
+ {0x2206, 0x0600}, {0x2206, 0x370c}, {0x2206, 0xc038}, {0x2206, 0x7fce},
+ {0x2206, 0x3ce5}, {0x2206, 0xf73d}, {0x2206, 0x3da4}, {0x2206, 0x6530},
+ {0x2206, 0x3e67}, {0x2206, 0x0053}, {0x2206, 0x69d2}, {0x2206, 0x0f6a},
+ {0x2206, 0x012c}, {0x2206, 0x6c2b}, {0x2206, 0x136e}, {0x2206, 0xe100},
+ {0x2206, 0x6f12}, {0x2206, 0xf771}, {0x2206, 0x006b}, {0x2206, 0x7306},
+ {0x2206, 0xeb74}, {0x2206, 0x94c7}, {0x2206, 0x7698}, {0x2206, 0x0a77},
+ {0x2206, 0x5000}, {0x2206, 0x788a}, {0x2206, 0x1579}, {0x2206, 0x7f6f},
+ {0x2206, 0x7a06}, {0x2206, 0xa600}, {0x2201, 0x0701}, {0x2200, 0x0405},
+ {0x221f, 0x0000}, {0x2200, 0x1340}, {0x221f, 0x0000}, {0x133f, 0x0010},
+ {0x133e, 0x0ffe}, {0x1203, 0xff00}, {0x1200, 0x7fc4}, {0x121d, 0x7D16},
+ {0x121e, 0x03e8}, {0x121f, 0x024e}, {0x1220, 0x0230}, {0x1221, 0x0244},
+ {0x1222, 0x0226}, {0x1223, 0x024e}, {0x1224, 0x0230}, {0x1225, 0x0244},
+ {0x1226, 0x0226}, {0x1227, 0x00c0}, {0x1228, 0x00b4}, {0x122f, 0x00c0},
+ {0x1230, 0x00b4}, {0x0208, 0x03e8}, {0x0209, 0x03e8}, {0x020a, 0x03e8},
+ {0x020b, 0x03e8}, {0x020c, 0x03e8}, {0x020d, 0x03e8}, {0x020e, 0x03e8},
+ {0x020f, 0x03e8}, {0x0210, 0x03e8}, {0x0211, 0x03e8}, {0x0212, 0x03e8},
+ {0x0213, 0x03e8}, {0x0214, 0x03e8}, {0x0215, 0x03e8}, {0x0216, 0x03e8},
+ {0x0217, 0x03e8}, {0x0900, 0x0000}, {0x0901, 0x0000}, {0x0902, 0x0000},
+ {0x0903, 0x0000}, {0x0865, 0x3210}, {0x087b, 0x0000}, {0x087c, 0xff00},
+ {0x087d, 0x0000}, {0x087e, 0x0000}, {0x0801, 0x0100}, {0x0802, 0x0100},
+ {0x0A20, 0x2040}, {0x0A21, 0x2040}, {0x0A22, 0x2040}, {0x0A23, 0x2040},
+ {0x0A24, 0x2040}, {0x0A28, 0x2040}, {0x0A29, 0x2040}, {0x20A0, 0x1940},
+ {0x20C0, 0x1940}, {0x20E0, 0x1940}, {0x130c, 0x0050},
+};
+
+static const struct rtl8367_initval rtl8367_initvals_2_1[] = {
+ {0x1b24, 0x0000}, {0x1b25, 0x0000}, {0x1b26, 0x0000}, {0x1b27, 0x0000},
+ {0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0007}, {0x221e, 0x0048},
+ {0x2219, 0x4012}, {0x221f, 0x0003}, {0x2201, 0x3554}, {0x2202, 0x63e8},
+ {0x2203, 0x99c2}, {0x2204, 0x0113}, {0x2205, 0x303e}, {0x220d, 0x0207},
+ {0x220e, 0xe100}, {0x221f, 0x0007}, {0x221e, 0x0040}, {0x2218, 0x0000},
+ {0x221f, 0x0007}, {0x221e, 0x002c}, {0x2218, 0x008b}, {0x221f, 0x0005},
+ {0x2205, 0xfff6}, {0x2206, 0x0080}, {0x221f, 0x0005}, {0x2205, 0x8000},
+ {0x2206, 0x0280}, {0x2206, 0x2bf7}, {0x2206, 0x00e0}, {0x2206, 0xfff7},
+ {0x2206, 0xa080}, {0x2206, 0x02ae}, {0x2206, 0xf602}, {0x2206, 0x804e},
+ {0x2206, 0x0201}, {0x2206, 0x5002}, {0x2206, 0x0163}, {0x2206, 0x0201},
+ {0x2206, 0x79e0}, {0x2206, 0x8b8c}, {0x2206, 0xe18b}, {0x2206, 0x8d1e},
+ {0x2206, 0x01e1}, {0x2206, 0x8b8e}, {0x2206, 0x1e01}, {0x2206, 0xa000},
+ {0x2206, 0xe4ae}, {0x2206, 0xd8bf}, {0x2206, 0x8b88}, {0x2206, 0xec00},
+ {0x2206, 0x19a9}, {0x2206, 0x8b90}, {0x2206, 0xf9ee}, {0x2206, 0xfff6},
+ {0x2206, 0x00ee}, {0x2206, 0xfff7}, {0x2206, 0xfce0}, {0x2206, 0xe140},
+ {0x2206, 0xe1e1}, {0x2206, 0x41f7}, {0x2206, 0x2ff6}, {0x2206, 0x28e4},
+ {0x2206, 0xe140}, {0x2206, 0xe5e1}, {0x2206, 0x4104}, {0x2206, 0xf8fa},
+ {0x2206, 0xef69}, {0x2206, 0xe08b}, {0x2206, 0x86ac}, {0x2206, 0x201a},
+ {0x2206, 0xbf80}, {0x2206, 0x77d0}, {0x2206, 0x6c02}, {0x2206, 0x2978},
+ {0x2206, 0xe0e0}, {0x2206, 0xe4e1}, {0x2206, 0xe0e5}, {0x2206, 0x5806},
+ {0x2206, 0x68c0}, {0x2206, 0xd1d2}, {0x2206, 0xe4e0}, {0x2206, 0xe4e5},
+ {0x2206, 0xe0e5}, {0x2206, 0xef96}, {0x2206, 0xfefc}, {0x2206, 0x0425},
+ {0x2206, 0x0807}, {0x2206, 0x2640}, {0x2206, 0x7227}, {0x2206, 0x267e},
+ {0x2206, 0x2804}, {0x2206, 0xb729}, {0x2206, 0x2576}, {0x2206, 0x2a68},
+ {0x2206, 0xe52b}, {0x2206, 0xad00}, {0x2206, 0x2cdb}, {0x2206, 0xf02d},
+ {0x2206, 0x67bb}, {0x2206, 0x2e7b}, {0x2206, 0x0f2f}, {0x2206, 0x7365},
+ {0x2206, 0x31ac}, {0x2206, 0xcc32}, {0x2206, 0x2300}, {0x2206, 0x332d},
+ {0x2206, 0x1734}, {0x2206, 0x7f52}, {0x2206, 0x3510}, {0x2206, 0x0036},
+ {0x2206, 0x0600}, {0x2206, 0x370c}, {0x2206, 0xc038}, {0x2206, 0x7fce},
+ {0x2206, 0x3ce5}, {0x2206, 0xf73d}, {0x2206, 0x3da4}, {0x2206, 0x6530},
+ {0x2206, 0x3e67}, {0x2206, 0x0053}, {0x2206, 0x69d2}, {0x2206, 0x0f6a},
+ {0x2206, 0x012c}, {0x2206, 0x6c2b}, {0x2206, 0x136e}, {0x2206, 0xe100},
+ {0x2206, 0x6f12}, {0x2206, 0xf771}, {0x2206, 0x006b}, {0x2206, 0x7306},
+ {0x2206, 0xeb74}, {0x2206, 0x94c7}, {0x2206, 0x7698}, {0x2206, 0x0a77},
+ {0x2206, 0x5000}, {0x2206, 0x788a}, {0x2206, 0x1579}, {0x2206, 0x7f6f},
+ {0x2206, 0x7a06}, {0x2206, 0xa600}, {0x2201, 0x0701}, {0x2200, 0x0405},
+ {0x221f, 0x0000}, {0x2200, 0x1340}, {0x221f, 0x0000}, {0x133f, 0x0010},
+ {0x133e, 0x0ffe}, {0x1203, 0xff00}, {0x1200, 0x7fc4}, {0x0900, 0x0000},
+ {0x0901, 0x0000}, {0x0902, 0x0000}, {0x0903, 0x0000}, {0x0865, 0x3210},
+ {0x087b, 0x0000}, {0x087c, 0xff00}, {0x087d, 0x0000}, {0x087e, 0x0000},
+ {0x0801, 0x0100}, {0x0802, 0x0100}, {0x0A20, 0x2040}, {0x0A21, 0x2040},
+ {0x0A22, 0x2040}, {0x0A23, 0x2040}, {0x0A24, 0x2040}, {0x0A25, 0x2040},
+ {0x0A26, 0x2040}, {0x0A27, 0x2040}, {0x0A28, 0x2040}, {0x0A29, 0x2040},
+ {0x130c, 0x0050},
+};
+
+static int rtl8367_write_initvals(struct rtl8366_smi *smi,
+ const struct rtl8367_initval *initvals,
+ int count)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < count; i++)
+ REG_WR(smi, initvals[i].reg, initvals[i].val);
+
+ return 0;
+}
+
+static int rtl8367_read_phy_reg(struct rtl8366_smi *smi,
+ u32 phy_addr, u32 phy_reg, u32 *val)
+{
+ int timeout;
+ u32 data;
+ int err;
+
+ if (phy_addr > RTL8367_PHY_ADDR_MAX)
+ return -EINVAL;
+
+ if (phy_reg > RTL8367_PHY_REG_MAX)
+ return -EINVAL;
+
+ REG_RD(smi, RTL8367_IA_STATUS_REG, &data);
+ if (data & RTL8367_IA_STATUS_PHY_BUSY)
+ return -ETIMEDOUT;
+
+ /* prepare address */
+ REG_WR(smi, RTL8367_IA_ADDRESS_REG,
+ RTL8367_INTERNAL_PHY_REG(phy_addr, phy_reg));
+
+ /* send read command */
+ REG_WR(smi, RTL8367_IA_CTRL_REG,
+ RTL8367_IA_CTRL_CMD_MASK | RTL8367_IA_CTRL_RW_READ);
+
+ timeout = 5;
+ do {
+ REG_RD(smi, RTL8367_IA_STATUS_REG, &data);
+ if ((data & RTL8367_IA_STATUS_PHY_BUSY) == 0)
+ break;
+
+ if (timeout--) {
+ dev_err(smi->parent, "phy read timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(1);
+ } while (1);
+
+ /* read data */
+ REG_RD(smi, RTL8367_IA_READ_DATA_REG, val);
+
+ dev_dbg(smi->parent, "phy_read: addr:%02x, reg:%02x, val:%04x\n",
+ phy_addr, phy_reg, *val);
+ return 0;
+}
+
+static int rtl8367_write_phy_reg(struct rtl8366_smi *smi,
+ u32 phy_addr, u32 phy_reg, u32 val)
+{
+ int timeout;
+ u32 data;
+ int err;
+
+ dev_dbg(smi->parent, "phy_write: addr:%02x, reg:%02x, val:%04x\n",
+ phy_addr, phy_reg, val);
+
+ if (phy_addr > RTL8367_PHY_ADDR_MAX)
+ return -EINVAL;
+
+ if (phy_reg > RTL8367_PHY_REG_MAX)
+ return -EINVAL;
+
+ REG_RD(smi, RTL8367_IA_STATUS_REG, &data);
+ if (data & RTL8367_IA_STATUS_PHY_BUSY)
+ return -ETIMEDOUT;
+
+ /* preapre data */
+ REG_WR(smi, RTL8367_IA_WRITE_DATA_REG, val);
+
+ /* prepare address */
+ REG_WR(smi, RTL8367_IA_ADDRESS_REG,
+ RTL8367_INTERNAL_PHY_REG(phy_addr, phy_reg));
+
+ /* send write command */
+ REG_WR(smi, RTL8367_IA_CTRL_REG,
+ RTL8367_IA_CTRL_CMD_MASK | RTL8367_IA_CTRL_RW_WRITE);
+
+ timeout = 5;
+ do {
+ REG_RD(smi, RTL8367_IA_STATUS_REG, &data);
+ if ((data & RTL8367_IA_STATUS_PHY_BUSY) == 0)
+ break;
+
+ if (timeout--) {
+ dev_err(smi->parent, "phy write timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(1);
+ } while (1);
+
+ return 0;
+}
+
+static int rtl8367_init_regs0(struct rtl8366_smi *smi, unsigned mode)
+{
+ const struct rtl8367_initval *initvals;
+ int count;
+ int err;
+
+ switch (mode) {
+ case 0:
+ initvals = rtl8367_initvals_0_0;
+ count = ARRAY_SIZE(rtl8367_initvals_0_0);
+ break;
+
+ case 1:
+ case 2:
+ initvals = rtl8367_initvals_0_1;
+ count = ARRAY_SIZE(rtl8367_initvals_0_1);
+ break;
+
+ default:
+ dev_err(smi->parent, "%s: unknow mode %u\n", __func__, mode);
+ return -ENODEV;
+ }
+
+ err = rtl8367_write_initvals(smi, initvals, count);
+ if (err)
+ return err;
+
+ /* TODO: complete this */
+
+ return 0;
+}
+
+static int rtl8367_init_regs1(struct rtl8366_smi *smi, unsigned mode)
+{
+ const struct rtl8367_initval *initvals;
+ int count;
+
+ switch (mode) {
+ case 0:
+ initvals = rtl8367_initvals_1_0;
+ count = ARRAY_SIZE(rtl8367_initvals_1_0);
+ break;
+
+ case 1:
+ case 2:
+ initvals = rtl8367_initvals_1_1;
+ count = ARRAY_SIZE(rtl8367_initvals_1_1);
+ break;
+
+ default:
+ dev_err(smi->parent, "%s: unknow mode %u\n", __func__, mode);
+ return -ENODEV;
+ }
+
+ return rtl8367_write_initvals(smi, initvals, count);
+}
+
+static int rtl8367_init_regs2(struct rtl8366_smi *smi, unsigned mode)
+{
+ const struct rtl8367_initval *initvals;
+ int count;
+
+ switch (mode) {
+ case 0:
+ initvals = rtl8367_initvals_2_0;
+ count = ARRAY_SIZE(rtl8367_initvals_2_0);
+ break;
+
+ case 1:
+ case 2:
+ initvals = rtl8367_initvals_2_1;
+ count = ARRAY_SIZE(rtl8367_initvals_2_1);
+ break;
+
+ default:
+ dev_err(smi->parent, "%s: unknow mode %u\n", __func__, mode);
+ return -ENODEV;
+ }
+
+ return rtl8367_write_initvals(smi, initvals, count);
+}
+
+static int rtl8367_init_regs(struct rtl8366_smi *smi)
+{
+ u32 data;
+ u32 rlvid;
+ u32 mode;
+ int err;
+
+ REG_WR(smi, RTL8367_RTL_MAGIC_ID_REG, RTL8367_RTL_MAGIC_ID_VAL);
+
+ REG_RD(smi, RTL8367_CHIP_VER_REG, &data);
+ rlvid = (data >> RTL8367_CHIP_VER_RLVID_SHIFT) &
+ RTL8367_CHIP_VER_RLVID_MASK;
+
+ REG_RD(smi, RTL8367_CHIP_MODE_REG, &data);
+ mode = data & RTL8367_CHIP_MODE_MASK;
+
+ switch (rlvid) {
+ case 0:
+ err = rtl8367_init_regs0(smi, mode);
+ break;
+
+ case 1:
+ err = rtl8367_write_phy_reg(smi, 0, 31, 5);
+ if (err)
+ break;
+
+ err = rtl8367_write_phy_reg(smi, 0, 5, 0x3ffe);
+ if (err)
+ break;
+
+ err = rtl8367_read_phy_reg(smi, 0, 6, &data);
+ if (err)
+ break;
+
+ if (data == 0x94eb) {
+ err = rtl8367_init_regs1(smi, mode);
+ } else if (data == 0x2104) {
+ err = rtl8367_init_regs2(smi, mode);
+ } else {
+ dev_err(smi->parent, "unknow phy data %04x\n", data);
+ return -ENODEV;
+ }
+
+ break;
+
+ default:
+ dev_err(smi->parent, "unknow rlvid %u\n", rlvid);
+ err = -ENODEV;
+ break;
+ }
+
+ return err;
+}
+
+static int rtl8367_reset_chip(struct rtl8366_smi *smi)
+{
+ int timeout = 10;
+ int err;
+ u32 data;
+
+ REG_WR(smi, RTL8367_CHIP_RESET_REG, RTL8367_CHIP_RESET_HW);
+ msleep(RTL8367_RESET_DELAY);
+
+ do {
+ REG_RD(smi, RTL8367_CHIP_RESET_REG, &data);
+ if (!(data & RTL8367_CHIP_RESET_HW))
+ break;
+
+ msleep(1);
+ } while (--timeout);
+
+ if (!timeout) {
+ dev_err(smi->parent, "chip reset timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int rtl8367_extif_set_mode(struct rtl8366_smi *smi, int id,
+ enum rtl8367_extif_mode mode)
+{
+ int err;
+
+ /* set port mode */
+ switch (mode) {
+ case RTL8367_EXTIF_MODE_RGMII:
+ case RTL8367_EXTIF_MODE_RGMII_33V:
+ REG_WR(smi, RTL8367_CHIP_DEBUG0_REG, 0x0367);
+ REG_WR(smi, RTL8367_CHIP_DEBUG1_REG, 0x7777);
+ break;
+
+ case RTL8367_EXTIF_MODE_TMII_MAC:
+ case RTL8367_EXTIF_MODE_TMII_PHY:
+ REG_RMW(smi, RTL8367_BYPASS_LINE_RATE_REG,
+ BIT((id + 1) % 2), BIT((id + 1) % 2));
+ break;
+
+ case RTL8367_EXTIF_MODE_GMII:
+ REG_RMW(smi, RTL8367_CHIP_DEBUG0_REG,
+ RTL8367_CHIP_DEBUG0_DUMMY0(id),
+ RTL8367_CHIP_DEBUG0_DUMMY0(id));
+ REG_RMW(smi, RTL8367_EXT_RGMXF_REG(id), BIT(6), BIT(6));
+ break;
+
+ case RTL8367_EXTIF_MODE_MII_MAC:
+ case RTL8367_EXTIF_MODE_MII_PHY:
+ case RTL8367_EXTIF_MODE_DISABLED:
+ REG_RMW(smi, RTL8367_BYPASS_LINE_RATE_REG,
+ BIT((id + 1) % 2), 0);
+ REG_RMW(smi, RTL8367_EXT_RGMXF_REG(id), BIT(6), 0);
+ break;
+
+ default:
+ dev_err(smi->parent,
+ "invalid mode for external interface %d\n", id);
+ return -EINVAL;
+ }
+
+ REG_RMW(smi, RTL8367_DIS_REG,
+ RTL8367_DIS_RGMII_MASK << RTL8367_DIS_RGMII_SHIFT(id),
+ mode << RTL8367_DIS_RGMII_SHIFT(id));
+
+ return 0;
+}
+
+static int rtl8367_extif_set_force(struct rtl8366_smi *smi, int id,
+ struct rtl8367_port_ability *pa)
+{
+ u32 mask;
+ u32 val;
+ int err;
+
+ mask = (RTL8367_DI_FORCE_MODE |
+ RTL8367_DI_FORCE_NWAY |
+ RTL8367_DI_FORCE_TXPAUSE |
+ RTL8367_DI_FORCE_RXPAUSE |
+ RTL8367_DI_FORCE_LINK |
+ RTL8367_DI_FORCE_DUPLEX |
+ RTL8367_DI_FORCE_SPEED_MASK);
+
+ val = pa->speed;
+ val |= pa->force_mode ? RTL8367_DI_FORCE_MODE : 0;
+ val |= pa->nway ? RTL8367_DI_FORCE_NWAY : 0;
+ val |= pa->txpause ? RTL8367_DI_FORCE_TXPAUSE : 0;
+ val |= pa->rxpause ? RTL8367_DI_FORCE_RXPAUSE : 0;
+ val |= pa->link ? RTL8367_DI_FORCE_LINK : 0;
+ val |= pa->duplex ? RTL8367_DI_FORCE_DUPLEX : 0;
+
+ REG_RMW(smi, RTL8367_DI_FORCE_REG(id), mask, val);
+
+ return 0;
+}
+
+static int rtl8367_extif_set_rgmii_delay(struct rtl8366_smi *smi, int id,
+ unsigned txdelay, unsigned rxdelay)
+{
+ u32 mask;
+ u32 val;
+ int err;
+
+ mask = (RTL8367_EXT_RGMXF_RXDELAY_MASK |
+ (RTL8367_EXT_RGMXF_TXDELAY_MASK <<
+ RTL8367_EXT_RGMXF_TXDELAY_SHIFT));
+
+ val = rxdelay;
+ val |= txdelay << RTL8367_EXT_RGMXF_TXDELAY_SHIFT;
+
+ REG_RMW(smi, RTL8367_EXT_RGMXF_REG(id), mask, val);
+
+ return 0;
+}
+
+static int rtl8367_extif_init(struct rtl8366_smi *smi, int id,
+ struct rtl8367_extif_config *cfg)
+{
+ enum rtl8367_extif_mode mode;
+ int err;
+
+ mode = (cfg) ? cfg->mode : RTL8367_EXTIF_MODE_DISABLED;
+
+ err = rtl8367_extif_set_mode(smi, id, mode);
+ if (err)
+ return err;
+
+ if (mode != RTL8367_EXTIF_MODE_DISABLED) {
+ err = rtl8367_extif_set_force(smi, id, &cfg->ability);
+ if (err)
+ return err;
+
+ err = rtl8367_extif_set_rgmii_delay(smi, id, cfg->txdelay,
+ cfg->rxdelay);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int rtl8367_led_group_set_ports(struct rtl8366_smi *smi,
+ unsigned int group, u16 port_mask)
+{
+ u32 reg;
+ u32 s;
+ int err;
+
+ port_mask &= RTL8367_PARA_LED_IO_EN_PMASK;
+ s = (group % 2) * 8;
+ reg = RTL8367_PARA_LED_IO_EN1_REG + (group / 2);
+
+ REG_RMW(smi, reg, (RTL8367_PARA_LED_IO_EN_PMASK << s), port_mask << s);
+
+ return 0;
+}
+
+static int rtl8367_led_group_set_mode(struct rtl8366_smi *smi,
+ unsigned int mode)
+{
+ u16 mask;
+ u16 set;
+ int err;
+
+ mode &= RTL8367_LED_CONFIG_DATA_M;
+
+ mask = (RTL8367_LED_CONFIG_DATA_M << RTL8367_LED_CONFIG_DATA_S) |
+ RTL8367_LED_CONFIG_SEL;
+ set = (mode << RTL8367_LED_CONFIG_DATA_S) | RTL8367_LED_CONFIG_SEL;
+
+ REG_RMW(smi, RTL8367_LED_CONFIG_REG, mask, set);
+
+ return 0;
+}
+
+static int rtl8367_led_group_set_config(struct rtl8366_smi *smi,
+ unsigned int led, unsigned int cfg)
+{
+ u16 mask;
+ u16 set;
+ int err;
+
+ mask = (RTL8367_LED_CONFIG_LED_CFG_M << (led * 4)) |
+ RTL8367_LED_CONFIG_SEL;
+ set = (cfg & RTL8367_LED_CONFIG_LED_CFG_M) << (led * 4);
+
+ REG_RMW(smi, RTL8367_LED_CONFIG_REG, mask, set);
+ return 0;
+}
+
+static int rtl8367_led_op_select_parallel(struct rtl8366_smi *smi)
+{
+ int err;
+
+ REG_WR(smi, RTL8367_LED_SYS_CONFIG_REG, 0x1472);
+ return 0;
+}
+
+static int rtl8367_led_blinkrate_set(struct rtl8366_smi *smi, unsigned int rate)
+{
+ u16 mask;
+ u16 set;
+ int err;
+
+ mask = RTL8367_LED_MODE_RATE_M << RTL8367_LED_MODE_RATE_S;
+ set = (rate & RTL8367_LED_MODE_RATE_M) << RTL8367_LED_MODE_RATE_S;
+ REG_RMW(smi, RTL8367_LED_MODE_REG, mask, set);
+
+ return 0;
+}
+
+static int rtl8367_setup(struct rtl8366_smi *smi)
+{
+ struct rtl8367_platform_data *pdata;
+ int err;
+ int i;
+
+ pdata = smi->parent->platform_data;
+
+ err = rtl8367_init_regs(smi);
+ if (err)
+ return err;
+
+ /* initialize external interfaces */
+ err = rtl8367_extif_init(smi, 0, pdata->extif0_cfg);
+ if (err)
+ return err;
+
+ err = rtl8367_extif_init(smi, 1, pdata->extif1_cfg);
+ if (err)
+ return err;
+
+ /* set maximum packet length to 1536 bytes */
+ REG_RMW(smi, RTL8367_SWC0_REG, RTL8367_SWC0_MAX_LENGTH_MASK,
+ RTL8367_SWC0_MAX_LENGTH_1536);
+
+ /*
+ * discard VLAN tagged packets if the port is not a member of
+ * the VLAN with which the packets is associated.
+ */
+ REG_WR(smi, RTL8367_VLAN_INGRESS_REG, RTL8367_PORTS_ALL);
+
+ /*
+ * Setup egress tag mode for each port.
+ */
+ for (i = 0; i < RTL8367_NUM_PORTS; i++)
+ REG_RMW(smi,
+ RTL8367_PORT_CFG_REG(i),
+ RTL8367_PORT_CFG_EGRESS_MODE_MASK <<
+ RTL8367_PORT_CFG_EGRESS_MODE_SHIFT,
+ RTL8367_PORT_CFG_EGRESS_MODE_ORIGINAL <<
+ RTL8367_PORT_CFG_EGRESS_MODE_SHIFT);
+
+ /* setup LEDs */
+ err = rtl8367_led_group_set_ports(smi, 0, RTL8367_PORTS_ALL);
+ if (err)
+ return err;
+
+ err = rtl8367_led_group_set_mode(smi, 0);
+ if (err)
+ return err;
+
+ err = rtl8367_led_op_select_parallel(smi);
+ if (err)
+ return err;
+
+ err = rtl8367_led_blinkrate_set(smi, 1);
+ if (err)
+ return err;
+
+ err = rtl8367_led_group_set_config(smi, 0, 2);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int rtl8367_get_mib_counter(struct rtl8366_smi *smi, int counter,
+ int port, unsigned long long *val)
+{
+ struct rtl8366_mib_counter *mib;
+ int offset;
+ int i;
+ int err;
+ u32 addr, data;
+ u64 mibvalue;
+
+ if (port > RTL8367_NUM_PORTS || counter >= RTL8367_MIB_COUNT)
+ return -EINVAL;
+
+ mib = &rtl8367_mib_counters[counter];
+ addr = RTL8367_MIB_COUNTER_PORT_OFFSET * port + mib->offset;
+
+ /*
+ * Writing access counter address first
+ * then ASIC will prepare 64bits counter wait for being retrived
+ */
+ REG_WR(smi, RTL8367_MIB_ADDRESS_REG, addr >> 2);
+
+ /* read MIB control register */
+ REG_RD(smi, RTL8367_MIB_CTRL_REG(0), &data);
+
+ if (data & RTL8367_MIB_CTRL_BUSY_MASK)
+ return -EBUSY;
+
+ if (data & RTL8367_MIB_CTRL_RESET_MASK)
+ return -EIO;
+
+ if (mib->length == 4)
+ offset = 3;
+ else
+ offset = (mib->offset + 1) % 4;
+
+ mibvalue = 0;
+ for (i = 0; i < mib->length; i++) {
+ REG_RD(smi, RTL8367_MIB_COUNTER_REG(offset - i), &data);
+ mibvalue = (mibvalue << 16) | (data & 0xFFFF);
+ }
+
+ *val = mibvalue;
+ return 0;
+}
+
+static int rtl8367_get_vlan_4k(struct rtl8366_smi *smi, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[RTL8367_TA_VLAN_DATA_SIZE];
+ int err;
+ int i;
+
+ memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
+
+ if (vid >= RTL8367_NUM_VIDS)
+ return -EINVAL;
+
+ /* write VID */
+ REG_WR(smi, RTL8367_TA_ADDR_REG, vid);
+
+ /* write table access control word */
+ REG_WR(smi, RTL8367_TA_CTRL_REG, RTL8367_TA_CTRL_CVLAN_READ);
+
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ REG_RD(smi, RTL8367_TA_DATA_REG(i), &data[i]);
+
+ vlan4k->vid = vid;
+ vlan4k->member = (data[0] >> RTL8367_TA_VLAN_MEMBER_SHIFT) &
+ RTL8367_TA_VLAN_MEMBER_MASK;
+ vlan4k->fid = (data[1] >> RTL8367_TA_VLAN_FID_SHIFT) &
+ RTL8367_TA_VLAN_FID_MASK;
+ vlan4k->untag = (data[2] >> RTL8367_TA_VLAN_UNTAG1_SHIFT) &
+ RTL8367_TA_VLAN_UNTAG1_MASK;
+ vlan4k->untag |= ((data[3] >> RTL8367_TA_VLAN_UNTAG2_SHIFT) &
+ RTL8367_TA_VLAN_UNTAG2_MASK) << 2;
+
+ return 0;
+}
+
+static int rtl8367_set_vlan_4k(struct rtl8366_smi *smi,
+ const struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[RTL8367_TA_VLAN_DATA_SIZE];
+ int err;
+ int i;
+
+ if (vlan4k->vid >= RTL8367_NUM_VIDS ||
+ vlan4k->member > RTL8367_TA_VLAN_MEMBER_MASK ||
+ vlan4k->untag > RTL8367_UNTAG_MASK ||
+ vlan4k->fid > RTL8367_FIDMAX)
+ return -EINVAL;
+
+ data[0] = (vlan4k->member & RTL8367_TA_VLAN_MEMBER_MASK) <<
+ RTL8367_TA_VLAN_MEMBER_SHIFT;
+ data[1] = (vlan4k->fid & RTL8367_TA_VLAN_FID_MASK) <<
+ RTL8367_TA_VLAN_FID_SHIFT;
+ data[2] = (vlan4k->untag & RTL8367_TA_VLAN_UNTAG1_MASK) <<
+ RTL8367_TA_VLAN_UNTAG1_SHIFT;
+ data[3] = ((vlan4k->untag >> 2) & RTL8367_TA_VLAN_UNTAG2_MASK) <<
+ RTL8367_TA_VLAN_UNTAG2_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ REG_WR(smi, RTL8367_TA_DATA_REG(i), data[i]);
+
+ /* write VID */
+ REG_WR(smi, RTL8367_TA_ADDR_REG,
+ vlan4k->vid & RTL8367_TA_VLAN_VID_MASK);
+
+ /* write table access control word */
+ REG_WR(smi, RTL8367_TA_CTRL_REG, RTL8367_TA_CTRL_CVLAN_WRITE);
+
+ return 0;
+}
+
+static int rtl8367_get_vlan_mc(struct rtl8366_smi *smi, u32 index,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[RTL8367_VLAN_MC_DATA_SIZE];
+ int err;
+ int i;
+
+ memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
+
+ if (index >= RTL8367_NUM_VLANS)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ REG_RD(smi, RTL8367_VLAN_MC_BASE(index) + i, &data[i]);
+
+ vlanmc->member = (data[0] >> RTL8367_VLAN_MC_MEMBER_SHIFT) &
+ RTL8367_VLAN_MC_MEMBER_MASK;
+ vlanmc->fid = (data[1] >> RTL8367_VLAN_MC_FID_SHIFT) &
+ RTL8367_VLAN_MC_FID_MASK;
+ vlanmc->vid = (data[3] >> RTL8367_VLAN_MC_EVID_SHIFT) &
+ RTL8367_VLAN_MC_EVID_MASK;
+
+ return 0;
+}
+
+static int rtl8367_set_vlan_mc(struct rtl8366_smi *smi, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[RTL8367_VLAN_MC_DATA_SIZE];
+ int err;
+ int i;
+
+ if (index >= RTL8367_NUM_VLANS ||
+ vlanmc->vid >= RTL8367_NUM_VIDS ||
+ vlanmc->priority > RTL8367_PRIORITYMAX ||
+ vlanmc->member > RTL8367_VLAN_MC_MEMBER_MASK ||
+ vlanmc->untag > RTL8367_UNTAG_MASK ||
+ vlanmc->fid > RTL8367_FIDMAX)
+ return -EINVAL;
+
+ data[0] = (vlanmc->member & RTL8367_VLAN_MC_MEMBER_MASK) <<
+ RTL8367_VLAN_MC_MEMBER_SHIFT;
+ data[1] = (vlanmc->fid & RTL8367_VLAN_MC_FID_MASK) <<
+ RTL8367_VLAN_MC_FID_SHIFT;
+ data[2] = 0;
+ data[3] = (vlanmc->vid & RTL8367_VLAN_MC_EVID_MASK) <<
+ RTL8367_VLAN_MC_EVID_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ REG_WR(smi, RTL8367_VLAN_MC_BASE(index) + i, data[i]);
+
+ return 0;
+}
+
+static int rtl8367_get_mc_index(struct rtl8366_smi *smi, int port, int *val)
+{
+ u32 data;
+ int err;
+
+ if (port >= RTL8367_NUM_PORTS)
+ return -EINVAL;
+
+ REG_RD(smi, RTL8367_VLAN_PVID_CTRL_REG(port), &data);
+
+ *val = (data >> RTL8367_VLAN_PVID_CTRL_SHIFT(port)) &
+ RTL8367_VLAN_PVID_CTRL_MASK;
+
+ return 0;
+}
+
+static int rtl8367_set_mc_index(struct rtl8366_smi *smi, int port, int index)
+{
+ if (port >= RTL8367_NUM_PORTS || index >= RTL8367_NUM_VLANS)
+ return -EINVAL;
+
+ return rtl8366_smi_rmwr(smi, RTL8367_VLAN_PVID_CTRL_REG(port),
+ RTL8367_VLAN_PVID_CTRL_MASK <<
+ RTL8367_VLAN_PVID_CTRL_SHIFT(port),
+ (index & RTL8367_VLAN_PVID_CTRL_MASK) <<
+ RTL8367_VLAN_PVID_CTRL_SHIFT(port));
+}
+
+static int rtl8367_enable_vlan(struct rtl8366_smi *smi, int enable)
+{
+ return rtl8366_smi_rmwr(smi, RTL8367_VLAN_CTRL_REG,
+ RTL8367_VLAN_CTRL_ENABLE,
+ (enable) ? RTL8367_VLAN_CTRL_ENABLE : 0);
+}
+
+static int rtl8367_enable_vlan4k(struct rtl8366_smi *smi, int enable)
+{
+ return 0;
+}
+
+static int rtl8367_is_vlan_valid(struct rtl8366_smi *smi, unsigned vlan)
+{
+ unsigned max = RTL8367_NUM_VLANS;
+
+ if (smi->vlan4k_enabled)
+ max = RTL8367_NUM_VIDS - 1;
+
+ if (vlan == 0 || vlan >= max)
+ return 0;
+
+ return 1;
+}
+
+static int rtl8367_enable_port(struct rtl8366_smi *smi, int port, int enable)
+{
+ int err;
+
+ REG_WR(smi, RTL8367_PORT_ISOLATION_REG(port),
+ (enable) ? RTL8367_PORTS_ALL : 0);
+
+ return 0;
+}
+
+static int rtl8367_sw_reset_mibs(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+
+ return rtl8366_smi_rmwr(smi, RTL8367_MIB_CTRL_REG(0), 0,
+ RTL8367_MIB_CTRL_GLOBAL_RESET_MASK);
+}
+
+static int rtl8367_sw_get_port_link(struct switch_dev *dev,
+ int port,
+ struct switch_port_link *link)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data = 0;
+ u32 speed;
+
+ if (port >= RTL8367_NUM_PORTS)
+ return -EINVAL;
+
+ rtl8366_smi_read_reg(smi, RTL8367_PORT_STATUS_REG(port), &data);
+
+ link->link = !!(data & RTL8367_PORT_STATUS_LINK);
+ if (!link->link)
+ return 0;
+
+ link->duplex = !!(data & RTL8367_PORT_STATUS_DUPLEX);
+ link->rx_flow = !!(data & RTL8367_PORT_STATUS_RXPAUSE);
+ link->tx_flow = !!(data & RTL8367_PORT_STATUS_TXPAUSE);
+ link->aneg = !!(data & RTL8367_PORT_STATUS_NWAY);
+
+ speed = (data & RTL8367_PORT_STATUS_SPEED_MASK);
+ switch (speed) {
+ case 0:
+ link->speed = SWITCH_PORT_SPEED_10;
+ break;
+ case 1:
+ link->speed = SWITCH_PORT_SPEED_100;
+ break;
+ case 2:
+ link->speed = SWITCH_PORT_SPEED_1000;
+ break;
+ default:
+ link->speed = SWITCH_PORT_SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int rtl8367_sw_get_max_length(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 data;
+
+ rtl8366_smi_read_reg(smi, RTL8367_SWC0_REG, &data);
+ val->value.i = (data & RTL8367_SWC0_MAX_LENGTH_MASK) >>
+ RTL8367_SWC0_MAX_LENGTH_SHIFT;
+
+ return 0;
+}
+
+static int rtl8367_sw_set_max_length(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ u32 max_len;
+
+ switch (val->value.i) {
+ case 0:
+ max_len = RTL8367_SWC0_MAX_LENGTH_1522;
+ break;
+ case 1:
+ max_len = RTL8367_SWC0_MAX_LENGTH_1536;
+ break;
+ case 2:
+ max_len = RTL8367_SWC0_MAX_LENGTH_1552;
+ break;
+ case 3:
+ max_len = RTL8367_SWC0_MAX_LENGTH_16000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rtl8366_smi_rmwr(smi, RTL8367_SWC0_REG,
+ RTL8367_SWC0_MAX_LENGTH_MASK, max_len);
+}
+
+
+static int rtl8367_sw_reset_port_mibs(struct switch_dev *dev,
+ const struct switch_attr *attr,
+ struct switch_val *val)
+{
+ struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
+ int port;
+
+ port = val->port_vlan;
+ if (port >= RTL8367_NUM_PORTS)
+ return -EINVAL;
+
+ return rtl8366_smi_rmwr(smi, RTL8367_MIB_CTRL_REG(port / 8), 0,
+ RTL8367_MIB_CTRL_PORT_RESET_MASK(port % 8));
+}
+
+static struct switch_attr rtl8367_globals[] = {
+ {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan",
+ .description = "Enable VLAN mode",
+ .set = rtl8366_sw_set_vlan_enable,
+ .get = rtl8366_sw_get_vlan_enable,
+ .max = 1,
+ .ofs = 1
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "enable_vlan4k",
+ .description = "Enable VLAN 4K mode",
+ .set = rtl8366_sw_set_vlan_enable,
+ .get = rtl8366_sw_get_vlan_enable,
+ .max = 1,
+ .ofs = 2
+ }, {
+ .type = SWITCH_TYPE_NOVAL,
+ .name = "reset_mibs",
+ .description = "Reset all MIB counters",
+ .set = rtl8367_sw_reset_mibs,
+ }, {
+ .type = SWITCH_TYPE_INT,
+ .name = "max_length",
+ .description = "Get/Set the maximum length of valid packets"
+ "(0:1522, 1:1536, 2:1552, 3:16000)",
+ .set = rtl8367_sw_set_max_length,
+ .get = rtl8367_sw_get_max_length,
+ .max = 3,
+ }
+};
+
+static struct switch_attr rtl8367_port[] = {
+ {
+ .type = SWITCH_TYPE_NOVAL,
+ .name = "reset_mib",
+ .description = "Reset single port MIB counters",
+ .set = rtl8367_sw_reset_port_mibs,
+ }, {
+ .type = SWITCH_TYPE_STRING,
+ .name = "mib",
+ .description = "Get MIB counters for port",
+ .max = 33,
+ .set = NULL,
+ .get = rtl8366_sw_get_port_mib,
+ },
+};
+
+static struct switch_attr rtl8367_vlan[] = {
+ {
+ .type = SWITCH_TYPE_STRING,
+ .name = "info",
+ .description = "Get vlan information",
+ .max = 1,
+ .set = NULL,
+ .get = rtl8366_sw_get_vlan_info,
+ },
+};
+
+static const struct switch_dev_ops rtl8367_sw_ops = {
+ .attr_global = {
+ .attr = rtl8367_globals,
+ .n_attr = ARRAY_SIZE(rtl8367_globals),
+ },
+ .attr_port = {
+ .attr = rtl8367_port,
+ .n_attr = ARRAY_SIZE(rtl8367_port),
+ },
+ .attr_vlan = {
+ .attr = rtl8367_vlan,
+ .n_attr = ARRAY_SIZE(rtl8367_vlan),
+ },
+
+ .get_vlan_ports = rtl8366_sw_get_vlan_ports,
+ .set_vlan_ports = rtl8366_sw_set_vlan_ports,
+ .get_port_pvid = rtl8366_sw_get_port_pvid,
+ .set_port_pvid = rtl8366_sw_set_port_pvid,
+ .reset_switch = rtl8366_sw_reset_switch,
+ .get_port_link = rtl8367_sw_get_port_link,
+};
+
+static int rtl8367_switch_init(struct rtl8366_smi *smi)
+{
+ struct switch_dev *dev = &smi->sw_dev;
+ int err;
+
+ dev->name = "RTL8367";
+ dev->cpu_port = RTL8367_CPU_PORT_NUM;
+ dev->ports = RTL8367_NUM_PORTS;
+ dev->vlans = RTL8367_NUM_VIDS;
+ dev->ops = &rtl8367_sw_ops;
+ dev->alias = dev_name(smi->parent);
+
+ err = register_switch(dev, NULL);
+ if (err)
+ dev_err(smi->parent, "switch registration failed\n");
+
+ return err;
+}
+
+static void rtl8367_switch_cleanup(struct rtl8366_smi *smi)
+{
+ unregister_switch(&smi->sw_dev);
+}
+
+static int rtl8367_mii_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct rtl8366_smi *smi = bus->priv;
+ u32 val = 0;
+ int err;
+
+ err = rtl8367_read_phy_reg(smi, addr, reg, &val);
+ if (err)
+ return 0xffff;
+
+ return val;
+}
+
+static int rtl8367_mii_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct rtl8366_smi *smi = bus->priv;
+ u32 t;
+ int err;
+
+ err = rtl8367_write_phy_reg(smi, addr, reg, val);
+ if (err)
+ return err;
+
+ /* flush write */
+ (void) rtl8367_read_phy_reg(smi, addr, reg, &t);
+
+ return err;
+}
+
+static int rtl8367_detect(struct rtl8366_smi *smi)
+{
+ u32 rtl_no = 0;
+ u32 rtl_ver = 0;
+ char *chip_name;
+ int ret;
+
+ ret = rtl8366_smi_read_reg(smi, RTL8367_RTL_NO_REG, &rtl_no);
+ if (ret) {
+ dev_err(smi->parent, "unable to read chip number\n");
+ return ret;
+ }
+
+ switch (rtl_no) {
+ case RTL8367_RTL_NO_8367R:
+ chip_name = "8367R";
+ break;
+ case RTL8367_RTL_NO_8367M:
+ chip_name = "8367M";
+ break;
+ default:
+ dev_err(smi->parent, "unknown chip number (%04x)\n", rtl_no);
+ return -ENODEV;
+ }
+
+ ret = rtl8366_smi_read_reg(smi, RTL8367_RTL_VER_REG, &rtl_ver);
+ if (ret) {
+ dev_err(smi->parent, "unable to read chip version\n");
+ return ret;
+ }
+
+ dev_info(smi->parent, "RTL%s ver. %u chip found\n",
+ chip_name, rtl_ver & RTL8367_RTL_VER_MASK);
+
+ return 0;
+}
+
+static struct rtl8366_smi_ops rtl8367_smi_ops = {
+ .detect = rtl8367_detect,
+ .reset_chip = rtl8367_reset_chip,
+ .setup = rtl8367_setup,
+
+ .mii_read = rtl8367_mii_read,
+ .mii_write = rtl8367_mii_write,
+
+ .get_vlan_mc = rtl8367_get_vlan_mc,
+ .set_vlan_mc = rtl8367_set_vlan_mc,
+ .get_vlan_4k = rtl8367_get_vlan_4k,
+ .set_vlan_4k = rtl8367_set_vlan_4k,
+ .get_mc_index = rtl8367_get_mc_index,
+ .set_mc_index = rtl8367_set_mc_index,
+ .get_mib_counter = rtl8367_get_mib_counter,
+ .is_vlan_valid = rtl8367_is_vlan_valid,
+ .enable_vlan = rtl8367_enable_vlan,
+ .enable_vlan4k = rtl8367_enable_vlan4k,
+ .enable_port = rtl8367_enable_port,
+};
+
+static int __devinit rtl8367_probe(struct platform_device *pdev)
+{
+ struct rtl8367_platform_data *pdata;
+ struct rtl8366_smi *smi;
+ int err;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ smi = rtl8366_smi_alloc(&pdev->dev);
+ if (!smi) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ smi->gpio_sda = pdata->gpio_sda;
+ smi->gpio_sck = pdata->gpio_sck;
+ smi->hw_reset = pdata->hw_reset;
+
+ smi->clk_delay = 1500;
+ smi->cmd_read = 0xb9;
+ smi->cmd_write = 0xb8;
+ smi->ops = &rtl8367_smi_ops;
+ smi->cpu_port = RTL8367_CPU_PORT_NUM;
+ smi->num_ports = RTL8367_NUM_PORTS;
+ smi->num_vlan_mc = RTL8367_NUM_VLANS;
+ smi->mib_counters = rtl8367_mib_counters;
+ smi->num_mib_counters = ARRAY_SIZE(rtl8367_mib_counters);
+
+ err = rtl8366_smi_init(smi);
+ if (err)
+ goto err_free_smi;
+
+ platform_set_drvdata(pdev, smi);
+
+ err = rtl8367_switch_init(smi);
+ if (err)
+ goto err_clear_drvdata;
+
+ return 0;
+
+ err_clear_drvdata:
+ platform_set_drvdata(pdev, NULL);
+ rtl8366_smi_cleanup(smi);
+ err_free_smi:
+ kfree(smi);
+ err_out:
+ return err;
+}
+
+static int __devexit rtl8367_remove(struct platform_device *pdev)
+{
+ struct rtl8366_smi *smi = platform_get_drvdata(pdev);
+
+ if (smi) {
+ rtl8367_switch_cleanup(smi);
+ platform_set_drvdata(pdev, NULL);
+ rtl8366_smi_cleanup(smi);
+ kfree(smi);
+ }
+
+ return 0;
+}
+
+static void rtl8367_shutdown(struct platform_device *pdev)
+{
+ struct rtl8366_smi *smi = platform_get_drvdata(pdev);
+
+ if (smi)
+ rtl8367_reset_chip(smi);
+}
+
+static struct platform_driver rtl8367_driver = {
+ .driver = {
+ .name = RTL8367_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = rtl8367_probe,
+ .remove = __devexit_p(rtl8367_remove),
+ .shutdown = rtl8367_shutdown,
+};
+
+static int __init rtl8367_module_init(void)
+{
+ return platform_driver_register(&rtl8367_driver);
+}
+module_init(rtl8367_module_init);
+
+static void __exit rtl8367_module_exit(void)
+{
+ platform_driver_unregister(&rtl8367_driver);
+}
+module_exit(rtl8367_module_exit);
+
+MODULE_DESCRIPTION(RTL8367_DRIVER_DESC);
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" RTL8367_DRIVER_NAME);
diff --git a/target/linux/generic/files/drivers/net/phy/swconfig.c b/target/linux/generic/files/drivers/net/phy/swconfig.c
new file mode 100644
index 000000000..e772c9448
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/swconfig.c
@@ -0,0 +1,1043 @@
+/*
+ * swconfig.c: Switch configuration API
+ *
+ * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/capability.h>
+#include <linux/skbuff.h>
+#include <linux/switch.h>
+
+//#define DEBUG 1
+#ifdef DEBUG
+#define DPRINTF(format, ...) printk("%s: " format, __func__, ##__VA_ARGS__)
+#else
+#define DPRINTF(...) do {} while(0)
+#endif
+
+#define SWCONFIG_DEVNAME "switch%d"
+
+#include "swconfig_leds.c"
+
+MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
+MODULE_LICENSE("GPL");
+
+static int swdev_id = 0;
+static struct list_head swdevs;
+static DEFINE_SPINLOCK(swdevs_lock);
+struct swconfig_callback;
+
+struct swconfig_callback
+{
+ struct sk_buff *msg;
+ struct genlmsghdr *hdr;
+ struct genl_info *info;
+ int cmd;
+
+ /* callback for filling in the message data */
+ int (*fill)(struct swconfig_callback *cb, void *arg);
+
+ /* callback for closing the message before sending it */
+ int (*close)(struct swconfig_callback *cb, void *arg);
+
+ struct nlattr *nest[4];
+ int args[4];
+};
+
+/* defaults */
+
+static int
+swconfig_get_vlan_ports(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ int ret;
+ if (val->port_vlan >= dev->vlans)
+ return -EINVAL;
+
+ if (!dev->ops->get_vlan_ports)
+ return -EOPNOTSUPP;
+
+ ret = dev->ops->get_vlan_ports(dev, val);
+ return ret;
+}
+
+static int
+swconfig_set_vlan_ports(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct switch_port *ports = val->value.ports;
+ const struct switch_dev_ops *ops = dev->ops;
+ int i;
+
+ if (val->port_vlan >= dev->vlans)
+ return -EINVAL;
+
+ /* validate ports */
+ if (val->len > dev->ports)
+ return -EINVAL;
+
+ if (!ops->set_vlan_ports)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < val->len; i++) {
+ if (ports[i].id >= dev->ports)
+ return -EINVAL;
+
+ if (ops->set_port_pvid &&
+ !(ports[i].flags & (1 << SWITCH_PORT_FLAG_TAGGED)))
+ ops->set_port_pvid(dev, ports[i].id, val->port_vlan);
+ }
+
+ return ops->set_vlan_ports(dev, val);
+}
+
+static int
+swconfig_set_pvid(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ if (val->port_vlan >= dev->ports)
+ return -EINVAL;
+
+ if (!dev->ops->set_port_pvid)
+ return -EOPNOTSUPP;
+
+ return dev->ops->set_port_pvid(dev, val->port_vlan, val->value.i);
+}
+
+static int
+swconfig_get_pvid(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ if (val->port_vlan >= dev->ports)
+ return -EINVAL;
+
+ if (!dev->ops->get_port_pvid)
+ return -EOPNOTSUPP;
+
+ return dev->ops->get_port_pvid(dev, val->port_vlan, &val->value.i);
+}
+
+static const char *
+swconfig_speed_str(enum switch_port_speed speed)
+{
+ switch (speed) {
+ case SWITCH_PORT_SPEED_10:
+ return "10baseT";
+ case SWITCH_PORT_SPEED_100:
+ return "100baseT";
+ case SWITCH_PORT_SPEED_1000:
+ return "1000baseT";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static int
+swconfig_get_link(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ struct switch_port_link link;
+ int len;
+ int ret;
+
+ if (val->port_vlan >= dev->ports)
+ return -EINVAL;
+
+ if (!dev->ops->get_port_link)
+ return -EOPNOTSUPP;
+
+ memset(&link, 0, sizeof(link));
+ ret = dev->ops->get_port_link(dev, val->port_vlan, &link);
+ if (ret)
+ return ret;
+
+ memset(dev->buf, 0, sizeof(dev->buf));
+
+ if (link.link)
+ len = snprintf(dev->buf, sizeof(dev->buf),
+ "port:%d link:up speed:%s %s-duplex %s%s%s",
+ val->port_vlan,
+ swconfig_speed_str(link.speed),
+ link.duplex ? "full" : "half",
+ link.tx_flow ? "txflow ": "",
+ link.rx_flow ? "rxflow " : "",
+ link.aneg ? "auto" : "");
+ else
+ len = snprintf(dev->buf, sizeof(dev->buf), "port:%d link:down",
+ val->port_vlan);
+
+ val->value.s = dev->buf;
+ val->len = len;
+
+ return 0;
+}
+
+static int
+swconfig_apply_config(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ /* don't complain if not supported by the switch driver */
+ if (!dev->ops->apply_config)
+ return 0;
+
+ return dev->ops->apply_config(dev);
+}
+
+static int
+swconfig_reset_switch(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
+{
+ /* don't complain if not supported by the switch driver */
+ if (!dev->ops->reset_switch)
+ return 0;
+
+ return dev->ops->reset_switch(dev);
+}
+
+enum global_defaults {
+ GLOBAL_APPLY,
+ GLOBAL_RESET,
+};
+
+enum vlan_defaults {
+ VLAN_PORTS,
+};
+
+enum port_defaults {
+ PORT_PVID,
+ PORT_LINK,
+};
+
+static struct switch_attr default_global[] = {
+ [GLOBAL_APPLY] = {
+ .type = SWITCH_TYPE_NOVAL,
+ .name = "apply",
+ .description = "Activate changes in the hardware",
+ .set = swconfig_apply_config,
+ },
+ [GLOBAL_RESET] = {
+ .type = SWITCH_TYPE_NOVAL,
+ .name = "reset",
+ .description = "Reset the switch",
+ .set = swconfig_reset_switch,
+ }
+};
+
+static struct switch_attr default_port[] = {
+ [PORT_PVID] = {
+ .type = SWITCH_TYPE_INT,
+ .name = "pvid",
+ .description = "Primary VLAN ID",
+ .set = swconfig_set_pvid,
+ .get = swconfig_get_pvid,
+ },
+ [PORT_LINK] = {
+ .type = SWITCH_TYPE_STRING,
+ .name = "link",
+ .description = "Get port link information",
+ .set = NULL,
+ .get = swconfig_get_link,
+ }
+};
+
+static struct switch_attr default_vlan[] = {
+ [VLAN_PORTS] = {
+ .type = SWITCH_TYPE_PORTS,
+ .name = "ports",
+ .description = "VLAN port mapping",
+ .set = swconfig_set_vlan_ports,
+ .get = swconfig_get_vlan_ports,
+ },
+};
+
+static const struct switch_attr *
+swconfig_find_attr_by_name(const struct switch_attrlist *alist, const char *name)
+{
+ int i;
+
+ for (i = 0; i < alist->n_attr; i++)
+ if (strcmp(name, alist->attr[i].name) == 0)
+ return &alist->attr[i];
+
+ return NULL;
+}
+
+static void swconfig_defaults_init(struct switch_dev *dev)
+{
+ const struct switch_dev_ops *ops = dev->ops;
+
+ dev->def_global = 0;
+ dev->def_vlan = 0;
+ dev->def_port = 0;
+
+ if (ops->get_vlan_ports || ops->set_vlan_ports)
+ set_bit(VLAN_PORTS, &dev->def_vlan);
+
+ if (ops->get_port_pvid || ops->set_port_pvid)
+ set_bit(PORT_PVID, &dev->def_port);
+
+ if (ops->get_port_link &&
+ !swconfig_find_attr_by_name(&ops->attr_port, "link"))
+ set_bit(PORT_LINK, &dev->def_port);
+
+ /* always present, can be no-op */
+ set_bit(GLOBAL_APPLY, &dev->def_global);
+ set_bit(GLOBAL_RESET, &dev->def_global);
+}
+
+
+static struct genl_family switch_fam = {
+ .id = GENL_ID_GENERATE,
+ .name = "switch",
+ .hdrsize = 0,
+ .version = 1,
+ .maxattr = SWITCH_ATTR_MAX,
+};
+
+static const struct nla_policy switch_policy[SWITCH_ATTR_MAX+1] = {
+ [SWITCH_ATTR_ID] = { .type = NLA_U32 },
+ [SWITCH_ATTR_OP_ID] = { .type = NLA_U32 },
+ [SWITCH_ATTR_OP_PORT] = { .type = NLA_U32 },
+ [SWITCH_ATTR_OP_VLAN] = { .type = NLA_U32 },
+ [SWITCH_ATTR_OP_VALUE_INT] = { .type = NLA_U32 },
+ [SWITCH_ATTR_OP_VALUE_STR] = { .type = NLA_NUL_STRING },
+ [SWITCH_ATTR_OP_VALUE_PORTS] = { .type = NLA_NESTED },
+ [SWITCH_ATTR_TYPE] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy port_policy[SWITCH_PORT_ATTR_MAX+1] = {
+ [SWITCH_PORT_ID] = { .type = NLA_U32 },
+ [SWITCH_PORT_FLAG_TAGGED] = { .type = NLA_FLAG },
+};
+
+static inline void
+swconfig_lock(void)
+{
+ spin_lock(&swdevs_lock);
+}
+
+static inline void
+swconfig_unlock(void)
+{
+ spin_unlock(&swdevs_lock);
+}
+
+static struct switch_dev *
+swconfig_get_dev(struct genl_info *info)
+{
+ struct switch_dev *dev = NULL;
+ struct switch_dev *p;
+ int id;
+
+ if (!info->attrs[SWITCH_ATTR_ID])
+ goto done;
+
+ id = nla_get_u32(info->attrs[SWITCH_ATTR_ID]);
+ swconfig_lock();
+ list_for_each_entry(p, &swdevs, dev_list) {
+ if (id != p->id)
+ continue;
+
+ dev = p;
+ break;
+ }
+ if (dev)
+ mutex_lock(&dev->sw_mutex);
+ else
+ DPRINTF("device %d not found\n", id);
+ swconfig_unlock();
+done:
+ return dev;
+}
+
+static inline void
+swconfig_put_dev(struct switch_dev *dev)
+{
+ mutex_unlock(&dev->sw_mutex);
+}
+
+static int
+swconfig_dump_attr(struct swconfig_callback *cb, void *arg)
+{
+ struct switch_attr *op = arg;
+ struct genl_info *info = cb->info;
+ struct sk_buff *msg = cb->msg;
+ int id = cb->args[0];
+ void *hdr;
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, &switch_fam,
+ NLM_F_MULTI, SWITCH_CMD_NEW_ATTR);
+ if (IS_ERR(hdr))
+ return -1;
+
+ NLA_PUT_U32(msg, SWITCH_ATTR_OP_ID, id);
+ NLA_PUT_U32(msg, SWITCH_ATTR_OP_TYPE, op->type);
+ NLA_PUT_STRING(msg, SWITCH_ATTR_OP_NAME, op->name);
+ if (op->description)
+ NLA_PUT_STRING(msg, SWITCH_ATTR_OP_DESCRIPTION,
+ op->description);
+
+ return genlmsg_end(msg, hdr);
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/* spread multipart messages across multiple message buffers */
+static int
+swconfig_send_multipart(struct swconfig_callback *cb, void *arg)
+{
+ struct genl_info *info = cb->info;
+ int restart = 0;
+ int err;
+
+ do {
+ if (!cb->msg) {
+ cb->msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (cb->msg == NULL)
+ goto error;
+ }
+
+ if (!(cb->fill(cb, arg) < 0))
+ break;
+
+ /* fill failed, check if this was already the second attempt */
+ if (restart)
+ goto error;
+
+ /* try again in a new message, send the current one */
+ restart = 1;
+ if (cb->close) {
+ if (cb->close(cb, arg) < 0)
+ goto error;
+ }
+ err = genlmsg_reply(cb->msg, info);
+ cb->msg = NULL;
+ if (err < 0)
+ goto error;
+
+ } while (restart);
+
+ return 0;
+
+error:
+ if (cb->msg)
+ nlmsg_free(cb->msg);
+ return -1;
+}
+
+static int
+swconfig_list_attrs(struct sk_buff *skb, struct genl_info *info)
+{
+ struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
+ const struct switch_attrlist *alist;
+ struct switch_dev *dev;
+ struct swconfig_callback cb;
+ int err = -EINVAL;
+ int i;
+
+ /* defaults */
+ struct switch_attr *def_list;
+ unsigned long *def_active;
+ int n_def;
+
+ dev = swconfig_get_dev(info);
+ if (!dev)
+ return -EINVAL;
+
+ switch(hdr->cmd) {
+ case SWITCH_CMD_LIST_GLOBAL:
+ alist = &dev->ops->attr_global;
+ def_list = default_global;
+ def_active = &dev->def_global;
+ n_def = ARRAY_SIZE(default_global);
+ break;
+ case SWITCH_CMD_LIST_VLAN:
+ alist = &dev->ops->attr_vlan;
+ def_list = default_vlan;
+ def_active = &dev->def_vlan;
+ n_def = ARRAY_SIZE(default_vlan);
+ break;
+ case SWITCH_CMD_LIST_PORT:
+ alist = &dev->ops->attr_port;
+ def_list = default_port;
+ def_active = &dev->def_port;
+ n_def = ARRAY_SIZE(default_port);
+ break;
+ default:
+ WARN_ON(1);
+ goto out;
+ }
+
+ memset(&cb, 0, sizeof(cb));
+ cb.info = info;
+ cb.fill = swconfig_dump_attr;
+ for (i = 0; i < alist->n_attr; i++) {
+ if (alist->attr[i].disabled)
+ continue;
+ cb.args[0] = i;
+ err = swconfig_send_multipart(&cb, (void *) &alist->attr[i]);
+ if (err < 0)
+ goto error;
+ }
+
+ /* defaults */
+ for (i = 0; i < n_def; i++) {
+ if (!test_bit(i, def_active))
+ continue;
+ cb.args[0] = SWITCH_ATTR_DEFAULTS_OFFSET + i;
+ err = swconfig_send_multipart(&cb, (void *) &def_list[i]);
+ if (err < 0)
+ goto error;
+ }
+ swconfig_put_dev(dev);
+
+ if (!cb.msg)
+ return 0;
+
+ return genlmsg_reply(cb.msg, info);
+
+error:
+ if (cb.msg)
+ nlmsg_free(cb.msg);
+out:
+ swconfig_put_dev(dev);
+ return err;
+}
+
+static const struct switch_attr *
+swconfig_lookup_attr(struct switch_dev *dev, struct genl_info *info,
+ struct switch_val *val)
+{
+ struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
+ const struct switch_attrlist *alist;
+ const struct switch_attr *attr = NULL;
+ int attr_id;
+
+ /* defaults */
+ struct switch_attr *def_list;
+ unsigned long *def_active;
+ int n_def;
+
+ if (!info->attrs[SWITCH_ATTR_OP_ID])
+ goto done;
+
+ switch(hdr->cmd) {
+ case SWITCH_CMD_SET_GLOBAL:
+ case SWITCH_CMD_GET_GLOBAL:
+ alist = &dev->ops->attr_global;
+ def_list = default_global;
+ def_active = &dev->def_global;
+ n_def = ARRAY_SIZE(default_global);
+ break;
+ case SWITCH_CMD_SET_VLAN:
+ case SWITCH_CMD_GET_VLAN:
+ alist = &dev->ops->attr_vlan;
+ def_list = default_vlan;
+ def_active = &dev->def_vlan;
+ n_def = ARRAY_SIZE(default_vlan);
+ if (!info->attrs[SWITCH_ATTR_OP_VLAN])
+ goto done;
+ val->port_vlan = nla_get_u32(info->attrs[SWITCH_ATTR_OP_VLAN]);
+ if (val->port_vlan >= dev->vlans)
+ goto done;
+ break;
+ case SWITCH_CMD_SET_PORT:
+ case SWITCH_CMD_GET_PORT:
+ alist = &dev->ops->attr_port;
+ def_list = default_port;
+ def_active = &dev->def_port;
+ n_def = ARRAY_SIZE(default_port);
+ if (!info->attrs[SWITCH_ATTR_OP_PORT])
+ goto done;
+ val->port_vlan = nla_get_u32(info->attrs[SWITCH_ATTR_OP_PORT]);
+ if (val->port_vlan >= dev->ports)
+ goto done;
+ break;
+ default:
+ WARN_ON(1);
+ goto done;
+ }
+
+ if (!alist)
+ goto done;
+
+ attr_id = nla_get_u32(info->attrs[SWITCH_ATTR_OP_ID]);
+ if (attr_id >= SWITCH_ATTR_DEFAULTS_OFFSET) {
+ attr_id -= SWITCH_ATTR_DEFAULTS_OFFSET;
+ if (attr_id >= n_def)
+ goto done;
+ if (!test_bit(attr_id, def_active))
+ goto done;
+ attr = &def_list[attr_id];
+ } else {
+ if (attr_id >= alist->n_attr)
+ goto done;
+ attr = &alist->attr[attr_id];
+ }
+
+ if (attr->disabled)
+ attr = NULL;
+
+done:
+ if (!attr)
+ DPRINTF("attribute lookup failed\n");
+ val->attr = attr;
+ return attr;
+}
+
+static int
+swconfig_parse_ports(struct sk_buff *msg, struct nlattr *head,
+ struct switch_val *val, int max)
+{
+ struct nlattr *nla;
+ int rem;
+
+ val->len = 0;
+ nla_for_each_nested(nla, head, rem) {
+ struct nlattr *tb[SWITCH_PORT_ATTR_MAX+1];
+ struct switch_port *port = &val->value.ports[val->len];
+
+ if (val->len >= max)
+ return -EINVAL;
+
+ if (nla_parse_nested(tb, SWITCH_PORT_ATTR_MAX, nla,
+ port_policy))
+ return -EINVAL;
+
+ if (!tb[SWITCH_PORT_ID])
+ return -EINVAL;
+
+ port->id = nla_get_u32(tb[SWITCH_PORT_ID]);
+ if (tb[SWITCH_PORT_FLAG_TAGGED])
+ port->flags |= (1 << SWITCH_PORT_FLAG_TAGGED);
+ val->len++;
+ }
+
+ return 0;
+}
+
+static int
+swconfig_set_attr(struct sk_buff *skb, struct genl_info *info)
+{
+ const struct switch_attr *attr;
+ struct switch_dev *dev;
+ struct switch_val val;
+ int err = -EINVAL;
+
+ dev = swconfig_get_dev(info);
+ if (!dev)
+ return -EINVAL;
+
+ memset(&val, 0, sizeof(val));
+ attr = swconfig_lookup_attr(dev, info, &val);
+ if (!attr || !attr->set)
+ goto error;
+
+ val.attr = attr;
+ switch(attr->type) {
+ case SWITCH_TYPE_NOVAL:
+ break;
+ case SWITCH_TYPE_INT:
+ if (!info->attrs[SWITCH_ATTR_OP_VALUE_INT])
+ goto error;
+ val.value.i =
+ nla_get_u32(info->attrs[SWITCH_ATTR_OP_VALUE_INT]);
+ break;
+ case SWITCH_TYPE_STRING:
+ if (!info->attrs[SWITCH_ATTR_OP_VALUE_STR])
+ goto error;
+ val.value.s =
+ nla_data(info->attrs[SWITCH_ATTR_OP_VALUE_STR]);
+ break;
+ case SWITCH_TYPE_PORTS:
+ val.value.ports = dev->portbuf;
+ memset(dev->portbuf, 0,
+ sizeof(struct switch_port) * dev->ports);
+
+ /* TODO: implement multipart? */
+ if (info->attrs[SWITCH_ATTR_OP_VALUE_PORTS]) {
+ err = swconfig_parse_ports(skb,
+ info->attrs[SWITCH_ATTR_OP_VALUE_PORTS], &val, dev->ports);
+ if (err < 0)
+ goto error;
+ } else {
+ val.len = 0;
+ err = 0;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ err = attr->set(dev, attr, &val);
+error:
+ swconfig_put_dev(dev);
+ return err;
+}
+
+static int
+swconfig_close_portlist(struct swconfig_callback *cb, void *arg)
+{
+ if (cb->nest[0])
+ nla_nest_end(cb->msg, cb->nest[0]);
+ return 0;
+}
+
+static int
+swconfig_send_port(struct swconfig_callback *cb, void *arg)
+{
+ const struct switch_port *port = arg;
+ struct nlattr *p = NULL;
+
+ if (!cb->nest[0]) {
+ cb->nest[0] = nla_nest_start(cb->msg, cb->cmd);
+ if (!cb->nest[0])
+ return -1;
+ }
+
+ p = nla_nest_start(cb->msg, SWITCH_ATTR_PORT);
+ if (!p)
+ goto error;
+
+ NLA_PUT_U32(cb->msg, SWITCH_PORT_ID, port->id);
+ if (port->flags & (1 << SWITCH_PORT_FLAG_TAGGED))
+ NLA_PUT_FLAG(cb->msg, SWITCH_PORT_FLAG_TAGGED);
+
+ nla_nest_end(cb->msg, p);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(cb->msg, p);
+error:
+ nla_nest_cancel(cb->msg, cb->nest[0]);
+ return -1;
+}
+
+static int
+swconfig_send_ports(struct sk_buff **msg, struct genl_info *info, int attr,
+ const struct switch_val *val)
+{
+ struct swconfig_callback cb;
+ int err = 0;
+ int i;
+
+ if (!val->value.ports)
+ return -EINVAL;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd = attr;
+ cb.msg = *msg;
+ cb.info = info;
+ cb.fill = swconfig_send_port;
+ cb.close = swconfig_close_portlist;
+
+ cb.nest[0] = nla_nest_start(cb.msg, cb.cmd);
+ for (i = 0; i < val->len; i++) {
+ err = swconfig_send_multipart(&cb, &val->value.ports[i]);
+ if (err)
+ goto done;
+ }
+ err = val->len;
+ swconfig_close_portlist(&cb, NULL);
+ *msg = cb.msg;
+
+done:
+ return err;
+}
+
+static int
+swconfig_get_attr(struct sk_buff *skb, struct genl_info *info)
+{
+ struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
+ const struct switch_attr *attr;
+ struct switch_dev *dev;
+ struct sk_buff *msg = NULL;
+ struct switch_val val;
+ int err = -EINVAL;
+ int cmd = hdr->cmd;
+
+ dev = swconfig_get_dev(info);
+ if (!dev)
+ return -EINVAL;
+
+ memset(&val, 0, sizeof(val));
+ attr = swconfig_lookup_attr(dev, info, &val);
+ if (!attr || !attr->get)
+ goto error;
+
+ if (attr->type == SWITCH_TYPE_PORTS) {
+ val.value.ports = dev->portbuf;
+ memset(dev->portbuf, 0,
+ sizeof(struct switch_port) * dev->ports);
+ }
+
+ err = attr->get(dev, attr, &val);
+ if (err)
+ goto error;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ goto error;
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, &switch_fam,
+ 0, cmd);
+ if (IS_ERR(hdr))
+ goto nla_put_failure;
+
+ switch(attr->type) {
+ case SWITCH_TYPE_INT:
+ NLA_PUT_U32(msg, SWITCH_ATTR_OP_VALUE_INT, val.value.i);
+ break;
+ case SWITCH_TYPE_STRING:
+ NLA_PUT_STRING(msg, SWITCH_ATTR_OP_VALUE_STR, val.value.s);
+ break;
+ case SWITCH_TYPE_PORTS:
+ err = swconfig_send_ports(&msg, info,
+ SWITCH_ATTR_OP_VALUE_PORTS, &val);
+ if (err < 0)
+ goto nla_put_failure;
+ break;
+ default:
+ DPRINTF("invalid type in attribute\n");
+ err = -EINVAL;
+ goto error;
+ }
+ err = genlmsg_end(msg, hdr);
+ if (err < 0)
+ goto nla_put_failure;
+
+ swconfig_put_dev(dev);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ if (msg)
+ nlmsg_free(msg);
+error:
+ swconfig_put_dev(dev);
+ if (!err)
+ err = -ENOMEM;
+ return err;
+}
+
+static int
+swconfig_send_switch(struct sk_buff *msg, u32 pid, u32 seq, int flags,
+ const struct switch_dev *dev)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, pid, seq, &switch_fam, flags,
+ SWITCH_CMD_NEW_ATTR);
+ if (IS_ERR(hdr))
+ return -1;
+
+ NLA_PUT_U32(msg, SWITCH_ATTR_ID, dev->id);
+ NLA_PUT_STRING(msg, SWITCH_ATTR_DEV_NAME, dev->devname);
+ NLA_PUT_STRING(msg, SWITCH_ATTR_ALIAS, dev->alias);
+ NLA_PUT_STRING(msg, SWITCH_ATTR_NAME, dev->name);
+ NLA_PUT_U32(msg, SWITCH_ATTR_VLANS, dev->vlans);
+ NLA_PUT_U32(msg, SWITCH_ATTR_PORTS, dev->ports);
+ NLA_PUT_U32(msg, SWITCH_ATTR_CPU_PORT, dev->cpu_port);
+
+ return genlmsg_end(msg, hdr);
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int swconfig_dump_switches(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct switch_dev *dev;
+ int start = cb->args[0];
+ int idx = 0;
+
+ swconfig_lock();
+ list_for_each_entry(dev, &swdevs, dev_list) {
+ if (++idx <= start)
+ continue;
+ if (swconfig_send_switch(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ dev) < 0)
+ break;
+ }
+ swconfig_unlock();
+ cb->args[0] = idx;
+
+ return skb->len;
+}
+
+static int
+swconfig_done(struct netlink_callback *cb)
+{
+ return 0;
+}
+
+static struct genl_ops swconfig_ops[] = {
+ {
+ .cmd = SWITCH_CMD_LIST_GLOBAL,
+ .doit = swconfig_list_attrs,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_LIST_VLAN,
+ .doit = swconfig_list_attrs,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_LIST_PORT,
+ .doit = swconfig_list_attrs,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_GET_GLOBAL,
+ .doit = swconfig_get_attr,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_GET_VLAN,
+ .doit = swconfig_get_attr,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_GET_PORT,
+ .doit = swconfig_get_attr,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_SET_GLOBAL,
+ .doit = swconfig_set_attr,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_SET_VLAN,
+ .doit = swconfig_set_attr,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_SET_PORT,
+ .doit = swconfig_set_attr,
+ .policy = switch_policy,
+ },
+ {
+ .cmd = SWITCH_CMD_GET_SWITCH,
+ .dumpit = swconfig_dump_switches,
+ .policy = switch_policy,
+ .done = swconfig_done,
+ }
+};
+
+int
+register_switch(struct switch_dev *dev, struct net_device *netdev)
+{
+ struct switch_dev *sdev;
+ const int max_switches = 8 * sizeof(unsigned long);
+ unsigned long in_use = 0;
+ int err;
+ int i;
+
+ INIT_LIST_HEAD(&dev->dev_list);
+ if (netdev) {
+ dev->netdev = netdev;
+ if (!dev->alias)
+ dev->alias = netdev->name;
+ }
+ BUG_ON(!dev->alias);
+
+ if (dev->ports > 0) {
+ dev->portbuf = kzalloc(sizeof(struct switch_port) * dev->ports,
+ GFP_KERNEL);
+ if (!dev->portbuf)
+ return -ENOMEM;
+ }
+ swconfig_defaults_init(dev);
+ mutex_init(&dev->sw_mutex);
+ swconfig_lock();
+ dev->id = ++swdev_id;
+
+ list_for_each_entry(sdev, &swdevs, dev_list) {
+ if (!sscanf(sdev->devname, SWCONFIG_DEVNAME, &i))
+ continue;
+ if (i < 0 || i > max_switches)
+ continue;
+
+ set_bit(i, &in_use);
+ }
+ i = find_first_zero_bit(&in_use, max_switches);
+
+ if (i == max_switches) {
+ swconfig_unlock();
+ return -ENFILE;
+ }
+
+ /* fill device name */
+ snprintf(dev->devname, IFNAMSIZ, SWCONFIG_DEVNAME, i);
+
+ list_add(&dev->dev_list, &swdevs);
+ swconfig_unlock();
+
+ err = swconfig_create_led_trigger(dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_switch);
+
+void
+unregister_switch(struct switch_dev *dev)
+{
+ swconfig_destroy_led_trigger(dev);
+ kfree(dev->portbuf);
+ mutex_lock(&dev->sw_mutex);
+ swconfig_lock();
+ list_del(&dev->dev_list);
+ swconfig_unlock();
+ mutex_unlock(&dev->sw_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_switch);
+
+
+static int __init
+swconfig_init(void)
+{
+ int i, err;
+
+ INIT_LIST_HEAD(&swdevs);
+ err = genl_register_family(&switch_fam);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(swconfig_ops); i++) {
+ err = genl_register_ops(&switch_fam, &swconfig_ops[i]);
+ if (err)
+ goto unregister;
+ }
+
+ return 0;
+
+unregister:
+ genl_unregister_family(&switch_fam);
+ return err;
+}
+
+static void __exit
+swconfig_exit(void)
+{
+ genl_unregister_family(&switch_fam);
+}
+
+module_init(swconfig_init);
+module_exit(swconfig_exit);
+
diff --git a/target/linux/generic/files/drivers/net/phy/swconfig_leds.c b/target/linux/generic/files/drivers/net/phy/swconfig_leds.c
new file mode 100644
index 000000000..6f54cc15b
--- /dev/null
+++ b/target/linux/generic/files/drivers/net/phy/swconfig_leds.c
@@ -0,0 +1,354 @@
+/*
+ * swconfig_led.c: LED trigger support for the switch configuration API
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#ifdef CONFIG_SWCONFIG_LEDS
+
+#include <linux/leds.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+#define SWCONFIG_LED_TIMER_INTERVAL (HZ / 10)
+#define SWCONFIG_LED_NUM_PORTS 32
+
+struct switch_led_trigger {
+ struct led_trigger trig;
+ struct switch_dev *swdev;
+
+ struct delayed_work sw_led_work;
+ u32 port_mask;
+ u32 port_link;
+ unsigned long port_traffic[SWCONFIG_LED_NUM_PORTS];
+};
+
+struct swconfig_trig_data {
+ struct led_classdev *led_cdev;
+ struct switch_dev *swdev;
+
+ rwlock_t lock;
+ u32 port_mask;
+
+ bool prev_link;
+ unsigned long prev_traffic;
+ enum led_brightness prev_brightness;
+};
+
+static void
+swconfig_trig_set_brightness(struct swconfig_trig_data *trig_data,
+ enum led_brightness brightness)
+{
+ led_brightness_set(trig_data->led_cdev, brightness);
+ trig_data->prev_brightness = brightness;
+}
+
+static void
+swconfig_trig_update_port_mask(struct led_trigger *trigger)
+{
+ struct list_head *entry;
+ struct switch_led_trigger *sw_trig;
+ u32 port_mask;
+
+ if (!trigger)
+ return;
+
+ sw_trig = (void *) trigger;
+
+ port_mask = 0;
+ read_lock(&trigger->leddev_list_lock);
+ list_for_each(entry, &trigger->led_cdevs) {
+ struct led_classdev *led_cdev;
+ struct swconfig_trig_data *trig_data;
+
+ led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ trig_data = led_cdev->trigger_data;
+ if (trig_data) {
+ read_lock(&trig_data->lock);
+ port_mask |= trig_data->port_mask;
+ read_unlock(&trig_data->lock);
+ }
+ }
+ read_unlock(&trigger->leddev_list_lock);
+
+ sw_trig->port_mask = port_mask;
+
+ if (port_mask)
+ schedule_delayed_work(&sw_trig->sw_led_work,
+ SWCONFIG_LED_TIMER_INTERVAL);
+ else
+ cancel_delayed_work_sync(&sw_trig->sw_led_work);
+}
+
+static ssize_t
+swconfig_trig_port_mask_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct swconfig_trig_data *trig_data = led_cdev->trigger_data;
+ unsigned long port_mask;
+ ssize_t ret = -EINVAL;
+ char *after;
+ size_t count;
+
+ port_mask = simple_strtoul(buf, &after, 16);
+ count = after - buf;
+
+ if (*after && isspace(*after))
+ count++;
+
+ if (count == size) {
+ bool changed;
+
+ write_lock(&trig_data->lock);
+
+ changed = (trig_data->port_mask != port_mask);
+ if (changed) {
+ trig_data->port_mask = port_mask;
+ if (port_mask == 0)
+ swconfig_trig_set_brightness(trig_data, LED_OFF);
+ }
+
+ write_unlock(&trig_data->lock);
+
+ if (changed)
+ swconfig_trig_update_port_mask(led_cdev->trigger);
+
+ ret = count;
+ }
+
+ return ret;
+}
+
+static ssize_t
+swconfig_trig_port_mask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct swconfig_trig_data *trig_data = led_cdev->trigger_data;
+
+ read_lock(&trig_data->lock);
+ sprintf(buf, "%#x\n", trig_data->port_mask);
+ read_unlock(&trig_data->lock);
+
+ return strlen(buf) + 1;
+}
+
+static DEVICE_ATTR(port_mask, 0644, swconfig_trig_port_mask_show,
+ swconfig_trig_port_mask_store);
+
+static void
+swconfig_trig_activate(struct led_classdev *led_cdev)
+{
+ struct switch_led_trigger *sw_trig;
+ struct swconfig_trig_data *trig_data;
+ int err;
+
+ if (led_cdev->trigger->activate != swconfig_trig_activate)
+ return;
+
+ trig_data = kzalloc(sizeof(struct swconfig_trig_data), GFP_KERNEL);
+ if (!trig_data)
+ return;
+
+ sw_trig = (void *) led_cdev->trigger;
+
+ rwlock_init(&trig_data->lock);
+ trig_data->led_cdev = led_cdev;
+ trig_data->swdev = sw_trig->swdev;
+ led_cdev->trigger_data = trig_data;
+
+ err = device_create_file(led_cdev->dev, &dev_attr_port_mask);
+ if (err)
+ goto err_free;
+
+ return;
+
+err_free:
+ led_cdev->trigger_data = NULL;
+ kfree(trig_data);
+}
+
+static void
+swconfig_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct swconfig_trig_data *trig_data;
+
+ swconfig_trig_update_port_mask(led_cdev->trigger);
+
+ trig_data = (void *) led_cdev->trigger_data;
+ if (trig_data) {
+ device_remove_file(led_cdev->dev, &dev_attr_port_mask);
+ kfree(trig_data);
+ }
+}
+
+static void
+swconfig_trig_led_event(struct switch_led_trigger *sw_trig,
+ struct led_classdev *led_cdev)
+{
+ struct swconfig_trig_data *trig_data;
+ u32 port_mask;
+ bool link;
+
+ trig_data = led_cdev->trigger_data;
+ if (!trig_data)
+ return;
+
+ read_lock(&trig_data->lock);
+ port_mask = trig_data->port_mask;
+ read_unlock(&trig_data->lock);
+
+ link = !!(sw_trig->port_link & port_mask);
+ if (!link) {
+ if (link != trig_data->prev_link)
+ led_brightness_set(trig_data->led_cdev, LED_OFF);
+ } else {
+ unsigned long traffic;
+ int i;
+
+ traffic = 0;
+ for (i = 0; i < SWCONFIG_LED_NUM_PORTS; i++) {
+ if (port_mask & (1 << i))
+ traffic += sw_trig->port_traffic[i];
+ }
+
+ if (trig_data->prev_brightness != LED_FULL)
+ swconfig_trig_set_brightness(trig_data, LED_FULL);
+ else if (traffic != trig_data->prev_traffic)
+ swconfig_trig_set_brightness(trig_data, LED_OFF);
+
+ trig_data->prev_traffic = traffic;
+ }
+
+ trig_data->prev_link = link;
+}
+
+static void
+swconfig_trig_update_leds(struct switch_led_trigger *sw_trig)
+{
+ struct list_head *entry;
+ struct led_trigger *trigger;
+
+ trigger = &sw_trig->trig;
+ read_lock(&trigger->leddev_list_lock);
+ list_for_each(entry, &trigger->led_cdevs) {
+ struct led_classdev *led_cdev;
+
+ led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ swconfig_trig_led_event(sw_trig, led_cdev);
+ }
+ read_unlock(&trigger->leddev_list_lock);
+}
+
+static void
+swconfig_led_work_func(struct work_struct *work)
+{
+ struct switch_led_trigger *sw_trig;
+ struct switch_dev *swdev;
+ u32 port_mask;
+ u32 link;
+ int i;
+
+ sw_trig = container_of(work, struct switch_led_trigger,
+ sw_led_work.work);
+
+ port_mask = sw_trig->port_mask;
+ swdev = sw_trig->swdev;
+
+ link = 0;
+ for (i = 0; i < SWCONFIG_LED_NUM_PORTS; i++) {
+ u32 port_bit;
+
+ port_bit = BIT(i);
+ if ((port_mask & port_bit) == 0)
+ continue;
+
+ if (swdev->ops->get_port_link) {
+ struct switch_port_link port_link;
+
+ memset(&port_link, '\0', sizeof(port_link));
+ swdev->ops->get_port_link(swdev, i, &port_link);
+
+ if (port_link.link)
+ link |= port_bit;
+ }
+
+ if (swdev->ops->get_port_stats) {
+ struct switch_port_stats port_stats;
+
+ memset(&port_stats, '\0', sizeof(port_stats));
+ swdev->ops->get_port_stats(swdev, i, &port_stats);
+ sw_trig->port_traffic[i] = port_stats.tx_bytes +
+ port_stats.rx_bytes;
+ }
+ }
+
+ sw_trig->port_link = link;
+
+ swconfig_trig_update_leds(sw_trig);
+
+ schedule_delayed_work(&sw_trig->sw_led_work,
+ SWCONFIG_LED_TIMER_INTERVAL);
+}
+
+static int
+swconfig_create_led_trigger(struct switch_dev *swdev)
+{
+ struct switch_led_trigger *sw_trig;
+ int err;
+
+ if (!swdev->ops->get_port_link)
+ return 0;
+
+ sw_trig = kzalloc(sizeof(struct switch_led_trigger), GFP_KERNEL);
+ if (!sw_trig)
+ return -ENOMEM;
+
+ sw_trig->swdev = swdev;
+ sw_trig->trig.name = swdev->devname;
+ sw_trig->trig.activate = swconfig_trig_activate;
+ sw_trig->trig.deactivate = swconfig_trig_deactivate;
+
+ INIT_DELAYED_WORK(&sw_trig->sw_led_work, swconfig_led_work_func);
+
+ err = led_trigger_register(&sw_trig->trig);
+ if (err)
+ goto err_free;
+
+ swdev->led_trigger = sw_trig;
+
+ return 0;
+
+err_free:
+ kfree(sw_trig);
+ return err;
+}
+
+static void
+swconfig_destroy_led_trigger(struct switch_dev *swdev)
+{
+ struct switch_led_trigger *sw_trig;
+
+ sw_trig = swdev->led_trigger;
+ if (sw_trig) {
+ cancel_delayed_work_sync(&sw_trig->sw_led_work);
+ led_trigger_unregister(&sw_trig->trig);
+ kfree(sw_trig);
+ }
+}
+
+#else /* SWCONFIG_LEDS */
+static inline int
+swconfig_create_led_trigger(struct switch_dev *swdev) { return 0; }
+
+static inline void
+swconfig_destroy_led_trigger(struct switch_dev *swdev) { }
+#endif /* CONFIG_SWCONFIG_LEDS */
diff --git a/target/linux/generic/files/drivers/pwm/Kconfig b/target/linux/generic/files/drivers/pwm/Kconfig
new file mode 100644
index 000000000..70f4442ea
--- /dev/null
+++ b/target/linux/generic/files/drivers/pwm/Kconfig
@@ -0,0 +1,20 @@
+#
+# PWM infrastructure and devices
+#
+
+menuconfig GENERIC_PWM
+ tristate "PWM Support"
+ depends on SYSFS
+ help
+ This enables PWM support through the generic PWM library.
+ If unsure, say N.
+
+if GENERIC_PWM
+
+config GPIO_PWM
+ tristate "PWM emulation using GPIO"
+ help
+ This option enables a single-channel PWM device using
+ a kernel interval timer and a GPIO pin. If unsure, say N.
+
+endif
diff --git a/target/linux/generic/files/drivers/pwm/Makefile b/target/linux/generic/files/drivers/pwm/Makefile
new file mode 100644
index 000000000..3a0e99674
--- /dev/null
+++ b/target/linux/generic/files/drivers/pwm/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for pwm devices
+#
+obj-$(CONFIG_GENERIC_PWM) := pwm.o
+obj-$(CONFIG_GPIO_PWM) += gpio-pwm.o
diff --git a/target/linux/generic/files/drivers/pwm/gpio-pwm.c b/target/linux/generic/files/drivers/pwm/gpio-pwm.c
new file mode 100644
index 000000000..dff5d1d62
--- /dev/null
+++ b/target/linux/generic/files/drivers/pwm/gpio-pwm.c
@@ -0,0 +1,298 @@
+/*
+ * drivers/pwm/gpio.c
+ *
+ * Models a single-channel PWM device using a timer and a GPIO pin.
+ *
+ * Copyright (C) 2010 Bill Gatliff <bgat@billgatliff.com>
+ *
+ * This program is free software; you may redistribute and/or modify
+ * it under the terms of the GNU General Public License Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/pwm/pwm.h>
+
+struct gpio_pwm {
+ struct pwm_device pwm;
+ struct hrtimer timer;
+ struct work_struct work;
+ pwm_callback_t callback;
+ int gpio;
+ unsigned long polarity : 1;
+ unsigned long active : 1;
+};
+
+static inline struct gpio_pwm *to_gpio_pwm(const struct pwm_channel *p)
+{
+ return container_of(p->pwm, struct gpio_pwm, pwm);
+}
+
+static void
+gpio_pwm_work (struct work_struct *work)
+{
+ struct gpio_pwm *gp = container_of(work, struct gpio_pwm, work);
+
+ if (gp->active)
+ gpio_direction_output(gp->gpio, gp->polarity ? 1 : 0);
+ else
+ gpio_direction_output(gp->gpio, gp->polarity ? 0 : 1);
+}
+
+static enum hrtimer_restart
+gpio_pwm_timeout(struct hrtimer *t)
+{
+ struct gpio_pwm *gp = container_of(t, struct gpio_pwm, timer);
+ ktime_t tnew;
+
+ if (unlikely(gp->pwm.channels[0].duty_ticks == 0))
+ gp->active = 0;
+ else if (unlikely(gp->pwm.channels[0].duty_ticks
+ == gp->pwm.channels[0].period_ticks))
+ gp->active = 1;
+ else
+ gp->active ^= 1;
+
+ if (gpio_cansleep(gp->gpio))
+ schedule_work(&gp->work);
+ else
+ gpio_pwm_work(&gp->work);
+
+ if (!gp->active && gp->pwm.channels[0].callback)
+ gp->pwm.channels[0].callback(&gp->pwm.channels[0]);
+
+ if (unlikely(!gp->active &&
+ (gp->pwm.channels[0].flags & BIT(FLAG_STOP)))) {
+ clear_bit(FLAG_STOP, &gp->pwm.channels[0].flags);
+ complete_all(&gp->pwm.channels[0].complete);
+ return HRTIMER_NORESTART;
+ }
+
+ if (gp->active)
+ tnew = ktime_set(0, gp->pwm.channels[0].duty_ticks);
+ else
+ tnew = ktime_set(0, gp->pwm.channels[0].period_ticks
+ - gp->pwm.channels[0].duty_ticks);
+ hrtimer_start(&gp->timer, tnew, HRTIMER_MODE_REL);
+
+ return HRTIMER_NORESTART;
+}
+
+static void gpio_pwm_start(struct pwm_channel *p)
+{
+ struct gpio_pwm *gp = to_gpio_pwm(p);
+
+ gp->active = 0;
+ gpio_pwm_timeout(&gp->timer);
+}
+
+static int
+gpio_pwm_config_nosleep(struct pwm_channel *p,
+ struct pwm_channel_config *c)
+{
+ struct gpio_pwm *gp = to_gpio_pwm(p);
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ switch (c->config_mask) {
+
+ case PWM_CONFIG_DUTY_TICKS:
+ p->duty_ticks = c->duty_ticks;
+ break;
+
+ case PWM_CONFIG_START:
+ if (!hrtimer_active(&gp->timer)) {
+ gpio_pwm_start(p);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irqrestore(&p->lock, flags);
+ return ret;
+}
+
+static int
+gpio_pwm_stop_sync(struct pwm_channel *p)
+{
+ struct gpio_pwm *gp = to_gpio_pwm(p);
+ int ret;
+ int was_on = hrtimer_active(&gp->timer);
+
+ if (was_on) {
+ do {
+ init_completion(&p->complete);
+ set_bit(FLAG_STOP, &p->flags);
+ ret = wait_for_completion_interruptible(&p->complete);
+ if (ret)
+ return ret;
+ } while (p->flags & BIT(FLAG_STOP));
+ }
+
+ return was_on;
+}
+
+static int
+gpio_pwm_config(struct pwm_channel *p,
+ struct pwm_channel_config *c)
+{
+ struct gpio_pwm *gp = to_gpio_pwm(p);
+ int was_on = 0;
+
+ if (p->pwm->config_nosleep) {
+ if (!p->pwm->config_nosleep(p, c))
+ return 0;
+ }
+
+ might_sleep();
+
+ was_on = gpio_pwm_stop_sync(p);
+ if (was_on < 0)
+ return was_on;
+
+ if (c->config_mask & PWM_CONFIG_PERIOD_TICKS)
+ p->period_ticks = c->period_ticks;
+
+ if (c->config_mask & PWM_CONFIG_DUTY_TICKS)
+ p->duty_ticks = c->duty_ticks;
+
+ if (c->config_mask & PWM_CONFIG_POLARITY) {
+ gp->polarity = c->polarity ? 1 : 0;
+ p->active_high = gp->polarity;
+ }
+
+ if ((c->config_mask & PWM_CONFIG_START)
+ || (was_on && !(c->config_mask & PWM_CONFIG_STOP)))
+ gpio_pwm_start(p);
+
+ return 0;
+}
+
+static int
+gpio_pwm_set_callback(struct pwm_channel *p,
+ pwm_callback_t callback)
+{
+ struct gpio_pwm *gp = to_gpio_pwm(p);
+ gp->callback = callback;
+ return 0;
+}
+
+static int
+gpio_pwm_request(struct pwm_channel *p)
+{
+ p->tick_hz = 1000000000UL;
+ return 0;
+}
+
+static int __devinit
+gpio_pwm_probe(struct platform_device *pdev)
+{
+ struct gpio_pwm *gp;
+ struct gpio_pwm_platform_data *gpd = pdev->dev.platform_data;
+ int ret = 0;
+
+ /* TODO: create configfs entries, so users can assign GPIOs to
+ * PWMs at runtime instead of creating a platform_device
+ * specification and rebuilding their kernel */
+
+ if (!gpd || gpio_request(gpd->gpio, dev_name(&pdev->dev)))
+ return -EINVAL;
+
+ gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+ if (!gp) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ platform_set_drvdata(pdev, gp);
+
+ gp->pwm.dev = &pdev->dev;
+ gp->pwm.bus_id = dev_name(&pdev->dev);
+ gp->pwm.nchan = 1;
+ gp->gpio = gpd->gpio;
+
+ INIT_WORK(&gp->work, gpio_pwm_work);
+
+ hrtimer_init(&gp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ gp->timer.function = gpio_pwm_timeout;
+
+ gp->pwm.owner = THIS_MODULE;
+ gp->pwm.config_nosleep = gpio_pwm_config_nosleep;
+ gp->pwm.config = gpio_pwm_config;
+ gp->pwm.request = gpio_pwm_request;
+ gp->pwm.set_callback = gpio_pwm_set_callback;
+
+ ret = pwm_register(&gp->pwm);
+ if (ret)
+ goto err_pwm_register;
+
+ return 0;
+
+err_pwm_register:
+ platform_set_drvdata(pdev, 0);
+ kfree(gp);
+err_alloc:
+ return ret;
+}
+
+static int __devexit
+gpio_pwm_remove(struct platform_device *pdev)
+{
+ struct gpio_pwm *gp = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwm_unregister(&gp->pwm);
+ hrtimer_cancel(&gp->timer);
+ cancel_work_sync(&gp->work);
+ platform_set_drvdata(pdev, 0);
+ kfree(gp);
+
+ return 0;
+}
+
+static struct platform_driver gpio_pwm_driver = {
+ .driver = {
+ .name = "gpio_pwm",
+ .owner = THIS_MODULE,
+ },
+ .probe = gpio_pwm_probe,
+ .remove = __devexit_p(gpio_pwm_remove),
+};
+
+static int __init gpio_pwm_init(void)
+{
+ return platform_driver_register(&gpio_pwm_driver);
+}
+module_init(gpio_pwm_init);
+
+static void __exit gpio_pwm_exit(void)
+{
+ platform_driver_unregister(&gpio_pwm_driver);
+}
+module_exit(gpio_pwm_exit);
+
+MODULE_AUTHOR("Bill Gatliff <bgat@billgatliff.com>");
+MODULE_DESCRIPTION("PWM output using GPIO and a high-resolution timer");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio_pwm");
diff --git a/target/linux/generic/files/drivers/pwm/pwm.c b/target/linux/generic/files/drivers/pwm/pwm.c
new file mode 100644
index 000000000..c1596e9e7
--- /dev/null
+++ b/target/linux/generic/files/drivers/pwm/pwm.c
@@ -0,0 +1,643 @@
+/*
+ * drivers/pwm/pwm.c
+ *
+ * Copyright (C) 2010 Bill Gatliff <bgat@billgatliff.com>
+ *
+ * This program is free software; you may redistribute and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h> /*kcalloc, kfree since 2.6.34 */
+#include <linux/pwm/pwm.h>
+
+static int __pwm_create_sysfs(struct pwm_device *pwm);
+
+static const char *REQUEST_SYSFS = "sysfs";
+static LIST_HEAD(pwm_device_list);
+static DEFINE_MUTEX(device_list_mutex);
+static struct class pwm_class;
+static struct workqueue_struct *pwm_handler_workqueue;
+
+int pwm_register(struct pwm_device *pwm)
+{
+ struct pwm_channel *p;
+ int wchan;
+ int ret;
+
+ spin_lock_init(&pwm->list_lock);
+
+ p = kcalloc(pwm->nchan, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ for (wchan = 0; wchan < pwm->nchan; wchan++) {
+ spin_lock_init(&p[wchan].lock);
+ init_completion(&p[wchan].complete);
+ p[wchan].chan = wchan;
+ p[wchan].pwm = pwm;
+ }
+
+ pwm->channels = p;
+
+ mutex_lock(&device_list_mutex);
+
+ list_add_tail(&pwm->list, &pwm_device_list);
+ ret = __pwm_create_sysfs(pwm);
+ if (ret) {
+ mutex_unlock(&device_list_mutex);
+ goto err_create_sysfs;
+ }
+
+ mutex_unlock(&device_list_mutex);
+
+ dev_info(pwm->dev, "%d channel%s\n", pwm->nchan,
+ pwm->nchan > 1 ? "s" : "");
+ return 0;
+
+err_create_sysfs:
+ kfree(p);
+
+ return ret;
+}
+EXPORT_SYMBOL(pwm_register);
+
+static int __match_device(struct device *dev, void *data)
+{
+ return dev_get_drvdata(dev) == data;
+}
+
+int pwm_unregister(struct pwm_device *pwm)
+{
+ int wchan;
+ struct device *dev;
+
+ mutex_lock(&device_list_mutex);
+
+ for (wchan = 0; wchan < pwm->nchan; wchan++) {
+ if (pwm->channels[wchan].flags & BIT(FLAG_REQUESTED)) {
+ mutex_unlock(&device_list_mutex);
+ return -EBUSY;
+ }
+ }
+
+ for (wchan = 0; wchan < pwm->nchan; wchan++) {
+ dev = class_find_device(&pwm_class, NULL,
+ &pwm->channels[wchan],
+ __match_device);
+ if (dev) {
+ put_device(dev);
+ device_unregister(dev);
+ }
+ }
+
+ kfree(pwm->channels);
+ list_del(&pwm->list);
+ mutex_unlock(&device_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(pwm_unregister);
+
+static struct pwm_device *
+__pwm_find_device(const char *bus_id)
+{
+ struct pwm_device *p;
+
+ list_for_each_entry(p, &pwm_device_list, list) {
+ if (!strcmp(bus_id, p->bus_id))
+ return p;
+ }
+ return NULL;
+}
+
+static int
+__pwm_request_channel(struct pwm_channel *p,
+ const char *requester)
+{
+ int ret;
+
+ if (test_and_set_bit(FLAG_REQUESTED, &p->flags))
+ return -EBUSY;
+
+ if (p->pwm->request) {
+ ret = p->pwm->request(p);
+ if (ret) {
+ clear_bit(FLAG_REQUESTED, &p->flags);
+ return ret;
+ }
+ }
+
+ p->requester = requester;
+ if (!strcmp(requester, REQUEST_SYSFS))
+ p->pid = current->pid;
+
+ return 0;
+}
+
+struct pwm_channel *
+pwm_request(const char *bus_id,
+ int chan,
+ const char *requester)
+{
+ struct pwm_device *p;
+ int ret;
+
+ mutex_lock(&device_list_mutex);
+
+ p = __pwm_find_device(bus_id);
+ if (!p || chan >= p->nchan)
+ goto err_no_device;
+
+ if (!try_module_get(p->owner))
+ goto err_module_get_failed;
+
+ ret = __pwm_request_channel(&p->channels[chan], requester);
+ if (ret)
+ goto err_request_failed;
+
+ mutex_unlock(&device_list_mutex);
+ return &p->channels[chan];
+
+err_request_failed:
+ module_put(p->owner);
+err_module_get_failed:
+err_no_device:
+ mutex_unlock(&device_list_mutex);
+ return NULL;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_channel *p)
+{
+ mutex_lock(&device_list_mutex);
+
+ if (!test_and_clear_bit(FLAG_REQUESTED, &p->flags))
+ goto done;
+
+ pwm_stop(p);
+ pwm_unsynchronize(p, NULL);
+ pwm_set_handler(p, NULL, NULL);
+
+ if (p->pwm->free)
+ p->pwm->free(p);
+ module_put(p->pwm->owner);
+done:
+ mutex_unlock(&device_list_mutex);
+}
+EXPORT_SYMBOL(pwm_free);
+
+unsigned long pwm_ns_to_ticks(struct pwm_channel *p,
+ unsigned long nsecs)
+{
+ unsigned long long ticks;
+
+ ticks = nsecs;
+ ticks *= p->tick_hz;
+ do_div(ticks, 1000000000);
+ return ticks;
+}
+EXPORT_SYMBOL(pwm_ns_to_ticks);
+
+unsigned long pwm_ticks_to_ns(struct pwm_channel *p,
+ unsigned long ticks)
+{
+ unsigned long long ns;
+
+ if (!p->tick_hz)
+ return 0;
+
+ ns = ticks;
+ ns *= 1000000000UL;
+ do_div(ns, p->tick_hz);
+ return ns;
+}
+EXPORT_SYMBOL(pwm_ticks_to_ns);
+
+static void
+pwm_config_ns_to_ticks(struct pwm_channel *p,
+ struct pwm_channel_config *c)
+{
+ if (c->config_mask & PWM_CONFIG_PERIOD_NS) {
+ c->period_ticks = pwm_ns_to_ticks(p, c->period_ns);
+ c->config_mask &= ~PWM_CONFIG_PERIOD_NS;
+ c->config_mask |= PWM_CONFIG_PERIOD_TICKS;
+ }
+
+ if (c->config_mask & PWM_CONFIG_DUTY_NS) {
+ c->duty_ticks = pwm_ns_to_ticks(p, c->duty_ns);
+ c->config_mask &= ~PWM_CONFIG_DUTY_NS;
+ c->config_mask |= PWM_CONFIG_DUTY_TICKS;
+ }
+}
+
+static void
+pwm_config_percent_to_ticks(struct pwm_channel *p,
+ struct pwm_channel_config *c)
+{
+ if (c->config_mask & PWM_CONFIG_DUTY_PERCENT) {
+ if (c->config_mask & PWM_CONFIG_PERIOD_TICKS)
+ c->duty_ticks = c->period_ticks;
+ else
+ c->duty_ticks = p->period_ticks;
+
+ c->duty_ticks *= c->duty_percent;
+ c->duty_ticks /= 100;
+ c->config_mask &= ~PWM_CONFIG_DUTY_PERCENT;
+ c->config_mask |= PWM_CONFIG_DUTY_TICKS;
+ }
+}
+
+int pwm_config_nosleep(struct pwm_channel *p,
+ struct pwm_channel_config *c)
+{
+ if (!p->pwm->config_nosleep)
+ return -EINVAL;
+
+ pwm_config_ns_to_ticks(p, c);
+ pwm_config_percent_to_ticks(p, c);
+
+ return p->pwm->config_nosleep(p, c);
+}
+EXPORT_SYMBOL(pwm_config_nosleep);
+
+int pwm_config(struct pwm_channel *p,
+ struct pwm_channel_config *c)
+{
+ int ret = 0;
+
+ if (unlikely(!p->pwm->config))
+ return -EINVAL;
+
+ pwm_config_ns_to_ticks(p, c);
+ pwm_config_percent_to_ticks(p, c);
+
+ switch (c->config_mask & (PWM_CONFIG_PERIOD_TICKS
+ | PWM_CONFIG_DUTY_TICKS)) {
+ case PWM_CONFIG_PERIOD_TICKS:
+ if (p->duty_ticks > c->period_ticks) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case PWM_CONFIG_DUTY_TICKS:
+ if (p->period_ticks < c->duty_ticks) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case PWM_CONFIG_DUTY_TICKS | PWM_CONFIG_PERIOD_TICKS:
+ if (c->duty_ticks > c->period_ticks) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ break;
+ }
+
+err:
+ dev_dbg(p->pwm->dev, "%s: config_mask %d period_ticks %lu duty_ticks %lu"
+ " polarity %d duty_ns %lu period_ns %lu duty_percent %d\n",
+ __func__, c->config_mask, c->period_ticks, c->duty_ticks,
+ c->polarity, c->duty_ns, c->period_ns, c->duty_percent);
+
+ if (ret)
+ return ret;
+ return p->pwm->config(p, c);
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_set_period_ns(struct pwm_channel *p,
+ unsigned long period_ns)
+{
+ struct pwm_channel_config c = {
+ .config_mask = PWM_CONFIG_PERIOD_TICKS,
+ .period_ticks = pwm_ns_to_ticks(p, period_ns),
+ };
+
+ return pwm_config(p, &c);
+}
+EXPORT_SYMBOL(pwm_set_period_ns);
+
+unsigned long pwm_get_period_ns(struct pwm_channel *p)
+{
+ return pwm_ticks_to_ns(p, p->period_ticks);
+}
+EXPORT_SYMBOL(pwm_get_period_ns);
+
+int pwm_set_duty_ns(struct pwm_channel *p,
+ unsigned long duty_ns)
+{
+ struct pwm_channel_config c = {
+ .config_mask = PWM_CONFIG_DUTY_TICKS,
+ .duty_ticks = pwm_ns_to_ticks(p, duty_ns),
+ };
+ return pwm_config(p, &c);
+}
+EXPORT_SYMBOL(pwm_set_duty_ns);
+
+unsigned long pwm_get_duty_ns(struct pwm_channel *p)
+{
+ return pwm_ticks_to_ns(p, p->duty_ticks);
+}
+EXPORT_SYMBOL(pwm_get_duty_ns);
+
+int pwm_set_duty_percent(struct pwm_channel *p,
+ int percent)
+{
+ struct pwm_channel_config c = {
+ .config_mask = PWM_CONFIG_DUTY_PERCENT,
+ .duty_percent = percent,
+ };
+ return pwm_config(p, &c);
+}
+EXPORT_SYMBOL(pwm_set_duty_percent);
+
+int pwm_set_polarity(struct pwm_channel *p,
+ int active_high)
+{
+ struct pwm_channel_config c = {
+ .config_mask = PWM_CONFIG_POLARITY,
+ .polarity = active_high,
+ };
+ return pwm_config(p, &c);
+}
+EXPORT_SYMBOL(pwm_set_polarity);
+
+int pwm_start(struct pwm_channel *p)
+{
+ struct pwm_channel_config c = {
+ .config_mask = PWM_CONFIG_START,
+ };
+ return pwm_config(p, &c);
+}
+EXPORT_SYMBOL(pwm_start);
+
+int pwm_stop(struct pwm_channel *p)
+{
+ struct pwm_channel_config c = {
+ .config_mask = PWM_CONFIG_STOP,
+ };
+ return pwm_config(p, &c);
+}
+EXPORT_SYMBOL(pwm_stop);
+
+int pwm_synchronize(struct pwm_channel *p,
+ struct pwm_channel *to_p)
+{
+ if (p->pwm != to_p->pwm) {
+ /* TODO: support cross-device synchronization */
+ return -EINVAL;
+ }
+
+ if (!p->pwm->synchronize)
+ return -EINVAL;
+
+ return p->pwm->synchronize(p, to_p);
+}
+EXPORT_SYMBOL(pwm_synchronize);
+
+int pwm_unsynchronize(struct pwm_channel *p,
+ struct pwm_channel *from_p)
+{
+ if (from_p && (p->pwm != from_p->pwm)) {
+ /* TODO: support cross-device synchronization */
+ return -EINVAL;
+ }
+
+ if (!p->pwm->unsynchronize)
+ return -EINVAL;
+
+ return p->pwm->unsynchronize(p, from_p);
+}
+EXPORT_SYMBOL(pwm_unsynchronize);
+
+static void pwm_handler(struct work_struct *w)
+{
+ struct pwm_channel *p = container_of(w, struct pwm_channel,
+ handler_work);
+ if (p->handler && p->handler(p, p->handler_data))
+ pwm_stop(p);
+}
+
+static void __pwm_callback(struct pwm_channel *p)
+{
+ queue_work(pwm_handler_workqueue, &p->handler_work);
+ dev_dbg(p->pwm->dev, "handler %p scheduled with data %p\n",
+ p->handler, p->handler_data);
+}
+
+int pwm_set_handler(struct pwm_channel *p,
+ pwm_handler_t handler,
+ void *data)
+{
+ if (p->pwm->set_callback) {
+ p->handler_data = data;
+ p->handler = handler;
+ INIT_WORK(&p->handler_work, pwm_handler);
+ return p->pwm->set_callback(p, handler ? __pwm_callback : NULL);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(pwm_set_handler);
+
+static ssize_t pwm_run_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct pwm_channel *p = dev_get_drvdata(dev);
+ if (sysfs_streq(buf, "1"))
+ pwm_start(p);
+ else if (sysfs_streq(buf, "0"))
+ pwm_stop(p);
+ return len;
+}
+static DEVICE_ATTR(run, 0200, NULL, pwm_run_store);
+
+static ssize_t pwm_duty_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_channel *p = dev_get_drvdata(dev);
+ return sprintf(buf, "%lu\n", pwm_get_duty_ns(p));
+}
+
+static ssize_t pwm_duty_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long duty_ns;
+ struct pwm_channel *p = dev_get_drvdata(dev);
+
+ if (1 == sscanf(buf, "%lu", &duty_ns))
+ pwm_set_duty_ns(p, duty_ns);
+ return len;
+}
+static DEVICE_ATTR(duty_ns, 0644, pwm_duty_ns_show, pwm_duty_ns_store);
+
+static ssize_t pwm_period_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_channel *p = dev_get_drvdata(dev);
+ return sprintf(buf, "%lu\n", pwm_get_period_ns(p));
+}
+
+static ssize_t pwm_period_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long period_ns;
+ struct pwm_channel *p = dev_get_drvdata(dev);
+
+ if (1 == sscanf(buf, "%lu", &period_ns))
+ pwm_set_period_ns(p, period_ns);
+ return len;
+}
+static DEVICE_ATTR(period_ns, 0644, pwm_period_ns_show, pwm_period_ns_store);
+
+static ssize_t pwm_polarity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_channel *p = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", p->active_high ? 1 : 0);
+}
+
+static ssize_t pwm_polarity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int polarity;
+ struct pwm_channel *p = dev_get_drvdata(dev);
+
+ if (1 == sscanf(buf, "%d", &polarity))
+ pwm_set_polarity(p, polarity);
+ return len;
+}
+static DEVICE_ATTR(polarity, 0644, pwm_polarity_show, pwm_polarity_store);
+
+static ssize_t pwm_request_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_channel *p = dev_get_drvdata(dev);
+ mutex_lock(&device_list_mutex);
+ __pwm_request_channel(p, REQUEST_SYSFS);
+ mutex_unlock(&device_list_mutex);
+
+ if (p->pid)
+ return sprintf(buf, "%s %d\n", p->requester, p->pid);
+ else
+ return sprintf(buf, "%s\n", p->requester);
+}
+
+static ssize_t pwm_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct pwm_channel *p = dev_get_drvdata(dev);
+ pwm_free(p);
+ return len;
+}
+static DEVICE_ATTR(request, 0644, pwm_request_show, pwm_request_store);
+
+static const struct attribute *pwm_attrs[] =
+{
+ &dev_attr_run.attr,
+ &dev_attr_polarity.attr,
+ &dev_attr_duty_ns.attr,
+ &dev_attr_period_ns.attr,
+ &dev_attr_request.attr,
+ NULL,
+};
+
+static const struct attribute_group pwm_device_attr_group = {
+ .attrs = (struct attribute **)pwm_attrs,
+};
+
+static int __pwm_create_sysfs(struct pwm_device *pwm)
+{
+ int ret = 0;
+ struct device *dev;
+ int wchan;
+
+ for (wchan = 0; wchan < pwm->nchan; wchan++) {
+ dev = device_create(&pwm_class, pwm->dev, MKDEV(0, 0),
+ pwm->channels + wchan,
+ "%s:%d", pwm->bus_id, wchan);
+ if (!dev)
+ goto err_dev_create;
+ ret = sysfs_create_group(&dev->kobj, &pwm_device_attr_group);
+ if (ret)
+ goto err_dev_create;
+ }
+
+ return ret;
+
+err_dev_create:
+ for (wchan = 0; wchan < pwm->nchan; wchan++) {
+ dev = class_find_device(&pwm_class, NULL,
+ &pwm->channels[wchan],
+ __match_device);
+ if (dev) {
+ put_device(dev);
+ device_unregister(dev);
+ }
+ }
+
+ return ret;
+}
+
+static struct class_attribute pwm_class_attrs[] = {
+ __ATTR_NULL,
+};
+
+static struct class pwm_class = {
+ .name = "pwm",
+ .owner = THIS_MODULE,
+
+ .class_attrs = pwm_class_attrs,
+};
+
+static int __init pwm_init(void)
+{
+ int ret;
+
+ /* TODO: how to deal with devices that register very early? */
+ pr_err("%s\n", __func__);
+ ret = class_register(&pwm_class);
+ if (ret < 0)
+ return ret;
+
+ pwm_handler_workqueue = create_workqueue("pwmd");
+
+ return 0;
+}
+postcore_initcall(pwm_init);
diff --git a/target/linux/generic/files/fs/yaffs2/Kconfig b/target/linux/generic/files/fs/yaffs2/Kconfig
new file mode 100644
index 000000000..7b6f836cd
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/Kconfig
@@ -0,0 +1,175 @@
+#
+# YAFFS file system configurations
+#
+
+config YAFFS_FS
+ tristate "YAFFS2 file system support"
+ default n
+ depends on MTD
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+ YAFFS2, or Yet Another Flash Filing System, is a filing system
+ optimised for NAND Flash chips.
+
+ To compile the YAFFS2 file system support as a module, choose M
+ here: the module will be called yaffs2.
+
+ If unsure, say N.
+
+ Further information on YAFFS2 is available at
+ <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+ bool "512 byte / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable YAFFS1 support -- yaffs for 512 byte / page devices
+
+ Not needed for 2K-page devices.
+
+ If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+ bool "Use older-style on-NAND data format with pageStatus byte"
+ depends on YAFFS_YAFFS1
+ default n
+ help
+
+ Older-style on-NAND data format has a "pageStatus" byte to record
+ chunk/page state. This byte is zero when the page is discarded.
+ Choose this option if you have existing on-NAND data using this
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+ older-style format. See notes on tags formats and MTD versions.
+
+ If unsure, say N.
+
+config YAFFS_DOES_ECC
+ bool "Lets Yaffs do its own ECC"
+ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This enables Yaffs to use its own ECC functions instead of using
+ the ones from the generic MTD-NAND driver.
+
+ If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This makes yaffs_ecc.c use the same ecc byte order as Steven
+ Hill's nand_ecc.c. If not set, then you get the same ecc byte
+ order as SmartMedia.
+
+ If unsure, say N.
+
+config YAFFS_YAFFS2
+ bool "2048 byte (or larger) / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable YAFFS2 support -- yaffs for >= 2K bytes per page devices
+
+ If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+ bool "Autoselect yaffs2 format"
+ depends on YAFFS_YAFFS2
+ default y
+ help
+ Without this, you need to explicitely use yaffs2 as the file
+ system type. With this, you can say "yaffs" and yaffs or yaffs2
+ will be used depending on the device page size (yaffs on
+ 512-byte page devices, yaffs2 on 2K page devices).
+
+ If unsure, say Y.
+
+config YAFFS_DISABLE_LAZY_LOAD
+ bool "Disable lazy loading"
+ depends on YAFFS_YAFFS2
+ default n
+ help
+ "Lazy loading" defers loading file details until they are
+ required. This saves mount time, but makes the first look-up
+ a bit longer.
+
+ Lazy loading will only happen if enabled by this option being 'n'
+ and if the appropriate tags are available, else yaffs2 will
+ automatically fall back to immediate loading and do the right
+ thing.
+
+ Lazy laoding will be required by checkpointing.
+
+ Setting this to 'y' will disable lazy loading.
+
+ If unsure, say N.
+
+config YAFFS_CHECKPOINT_RESERVED_BLOCKS
+ int "Reserved blocks for checkpointing"
+ depends on YAFFS_YAFFS2
+ default 10
+ help
+ Give the number of Blocks to reserve for checkpointing.
+ Checkpointing saves the state at unmount so that mounting is
+ much faster as a scan of all the flash to regenerate this state
+ is not needed. These Blocks are reserved per partition, so if
+ you have very small partitions the default (10) may be a mess
+ for you. You can set this value to 0, but that does not mean
+ checkpointing is disabled at all. There only won't be any
+ specially reserved blocks for checkpointing, so if there is
+ enough free space on the filesystem, it will be used for
+ checkpointing.
+
+ If unsure, leave at default (10), but don't wonder if there are
+ always 2MB used on your large page device partition (10 x 2k
+ pagesize). When using small partitions or when being very small
+ on space, you probably want to set this to zero.
+
+config YAFFS_DISABLE_WIDE_TNODES
+ bool "Turn off wide tnodes"
+ depends on YAFFS_FS
+ default n
+ help
+ Wide tnodes are only used for NAND arrays >=32MB for 512-byte
+ page devices and >=128MB for 2k page devices. They use slightly
+ more RAM but are faster since they eliminate chunk group
+ searching.
+
+ Setting this to 'y' will force tnode width to 16 bits and save
+ memory but make large arrays slower.
+
+ If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ bool "Force chunk erase check"
+ depends on YAFFS_FS
+ default n
+ help
+ Normally YAFFS only checks chunks before writing until an erased
+ chunk is found. This helps to detect any partially written
+ chunks that might have happened due to power loss.
+
+ Enabling this forces on the test that chunks are erased in flash
+ before writing to them. This takes more time but is potentially
+ a bit more secure.
+
+ Suggest setting Y during development and ironing out driver
+ issues etc. Suggest setting to N if you want faster writing.
+
+ If unsure, say Y.
+
+config YAFFS_SHORT_NAMES_IN_RAM
+ bool "Cache short names in RAM"
+ depends on YAFFS_FS
+ default y
+ help
+ If this config is set, then short names are stored with the
+ yaffs_Object. This costs an extra 16 bytes of RAM per object,
+ but makes look-ups faster.
+
+ If unsure, say Y.
diff --git a/target/linux/generic/files/fs/yaffs2/Makefile b/target/linux/generic/files/fs/yaffs2/Makefile
new file mode 100644
index 000000000..73f46583f
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_fs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+yaffs-y += yaffs_mtdif1.o yaffs_packedtags1.o
+yaffs-y += yaffs_mtdif.o yaffs_mtdif2.o
diff --git a/target/linux/generic/files/fs/yaffs2/devextras.h b/target/linux/generic/files/fs/yaffs2/devextras.h
new file mode 100644
index 000000000..fcf2690a5
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/devextras.h
@@ -0,0 +1,264 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This file is just holds extra declarations used during development.
+ * Most of these are from kernel includes placed here so we can use them in
+ * applications.
+ *
+ */
+
+#ifndef __EXTRAS_H__
+#define __EXTRAS_H__
+
+#if defined WIN32
+#define __inline__ __inline
+#define new newHack
+#endif
+
+#if !(defined __KERNEL__) || (defined WIN32)
+
+/* User space defines */
+
+typedef unsigned char __u8;
+typedef unsigned short __u16;
+typedef unsigned __u32;
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+#define prefetch(x) 1
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+#define INIT_LIST_HEAD(ptr) do { \
+ (ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static __inline__ void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static __inline__ void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static __inline__ void list_add_tail(struct list_head *new,
+ struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static __inline__ void __list_del(struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+static __inline__ void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static __inline__ void list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static __inline__ int list_empty(struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * list_splice - join two lists
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static __inline__ void list_splice(struct list_head *list,
+ struct list_head *head)
+{
+ struct list_head *first = list->next;
+
+ if (first != list) {
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+ pos = pos->next, prefetch(pos->next))
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal
+ * of list entry
+ * @pos: the &struct list_head to use as a loop counter.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/*
+ * File types
+ */
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+
+#ifndef WIN32
+#include <sys/stat.h>
+#endif
+
+/*
+ * Attribute flags. These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE 1
+#define ATTR_UID 2
+#define ATTR_GID 4
+#define ATTR_SIZE 8
+#define ATTR_ATIME 16
+#define ATTR_MTIME 32
+#define ATTR_CTIME 64
+#define ATTR_ATIME_SET 128
+#define ATTR_MTIME_SET 256
+#define ATTR_FORCE 512 /* Not a change, but a change it */
+#define ATTR_ATTR_FLAG 1024
+
+struct iattr {
+ unsigned int ia_valid;
+ unsigned ia_mode;
+ unsigned ia_uid;
+ unsigned ia_gid;
+ unsigned ia_size;
+ unsigned ia_atime;
+ unsigned ia_mtime;
+ unsigned ia_ctime;
+ unsigned int ia_attr_flags;
+};
+
+#define KERN_DEBUG
+
+#else
+
+#ifndef WIN32
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#endif
+
+#endif
+
+#if defined WIN32
+#undef new
+#endif
+
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/moduleconfig.h b/target/linux/generic/files/fs/yaffs2/moduleconfig.h
new file mode 100644
index 000000000..016391ca5
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/moduleconfig.h
@@ -0,0 +1,65 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Martin Fouts <Martin.Fouts@palmsource.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CONFIG_H__
+#define __YAFFS_CONFIG_H__
+
+#ifdef YAFFS_OUT_OF_TREE
+
+/* DO NOT UNSET THESE THREE. YAFFS2 will not compile if you do. */
+#define CONFIG_YAFFS_FS
+#define CONFIG_YAFFS_YAFFS1
+#define CONFIG_YAFFS_YAFFS2
+
+/* These options are independent of each other. Select those that matter. */
+
+/* Default: Not selected */
+/* Meaning: Yaffs does its own ECC, rather than using MTD ECC */
+//#define CONFIG_YAFFS_DOES_ECC
+
+/* Default: Not selected */
+/* Meaning: ECC byte order is 'wrong'. Only meaningful if */
+/* CONFIG_YAFFS_DOES_ECC is set */
+//#define CONFIG_YAFFS_ECC_WRONG_ORDER
+
+/* Default: Selected */
+/* Meaning: Disables testing whether chunks are erased before writing to them*/
+#define CONFIG_YAFFS_DISABLE_CHUNK_ERASED_CHECK
+
+/* Default: Selected */
+/* Meaning: Cache short names, taking more RAM, but faster look-ups */
+#define CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+
+/* Default: 10 */
+/* Meaning: set the count of blocks to reserve for checkpointing */
+#define CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS 10
+
+/*
+Older-style on-NAND data format has a "pageStatus" byte to record
+chunk/page state. This byte is zeroed when the page is discarded.
+Choose this option if you have existing on-NAND data in this format
+that you need to continue to support. New data written also uses the
+older-style format.
+Note: Use of this option generally requires that MTD's oob layout be
+adjusted to use the older-style format. See notes on tags formats and
+MTD versions.
+*/
+/* Default: Not selected */
+/* Meaning: Use older-style on-NAND data format with pageStatus byte */
+#define CONFIG_YAFFS_9BYTE_TAGS
+
+#endif /* YAFFS_OUT_OF_TREE */
+
+#endif /* __YAFFS_CONFIG_H__ */
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_checkptrw.c b/target/linux/generic/files/fs/yaffs2/yaffs_checkptrw.c
new file mode 100644
index 000000000..933a33fb8
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,404 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+const char *yaffs_checkptrw_c_version =
+ "$Id: yaffs_checkptrw.c,v 1.14 2007-05-15 20:07:40 charles Exp $";
+
+
+#include "yaffs_checkptrw.h"
+
+
+static int yaffs_CheckpointSpaceOk(yaffs_Device *dev)
+{
+
+ int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
+
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("checkpt blocks available = %d" TENDSTR),
+ blocksAvailable));
+
+
+ return (blocksAvailable <= 0) ? 0 : 1;
+}
+
+
+static int yaffs_CheckpointErase(yaffs_Device *dev)
+{
+
+ int i;
+
+
+ if(!dev->eraseBlockInNAND)
+ return 0;
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("checking blocks %d to %d"TENDSTR),
+ dev->internalStartBlock,dev->internalEndBlock));
+
+ for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+ if(bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("erasing checkpt block %d"TENDSTR),i));
+ if(dev->eraseBlockInNAND(dev,i- dev->blockOffset /* realign */)){
+ bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+ dev->nErasedBlocks++;
+ dev->nFreeChunks += dev->nChunksPerBlock;
+ }
+ else {
+ dev->markNANDBlockBad(dev,i);
+ bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+ dev->blocksInCheckpoint = 0;
+
+ return 1;
+}
+
+
+static void yaffs_CheckpointFindNextErasedBlock(yaffs_Device *dev)
+{
+ int i;
+ int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("allocating checkpt block: erased %d reserved %d avail %d next %d "TENDSTR),
+ dev->nErasedBlocks,dev->nReservedBlocks,blocksAvailable,dev->checkpointNextBlock));
+
+ if(dev->checkpointNextBlock >= 0 &&
+ dev->checkpointNextBlock <= dev->internalEndBlock &&
+ blocksAvailable > 0){
+
+ for(i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++){
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+ if(bi->blockState == YAFFS_BLOCK_STATE_EMPTY){
+ dev->checkpointNextBlock = i + 1;
+ dev->checkpointCurrentBlock = i;
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("allocating checkpt block %d"TENDSTR),i));
+ return;
+ }
+ }
+ }
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("out of checkpt blocks"TENDSTR)));
+
+ dev->checkpointNextBlock = -1;
+ dev->checkpointCurrentBlock = -1;
+}
+
+static void yaffs_CheckpointFindNextCheckpointBlock(yaffs_Device *dev)
+{
+ int i;
+ yaffs_ExtendedTags tags;
+
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("find next checkpt block: start: blocks %d next %d" TENDSTR),
+ dev->blocksInCheckpoint, dev->checkpointNextBlock));
+
+ if(dev->blocksInCheckpoint < dev->checkpointMaxBlocks)
+ for(i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++){
+ int chunk = i * dev->nChunksPerBlock;
+ int realignedChunk = chunk - dev->chunkOffset;
+
+ dev->readChunkWithTagsFromNAND(dev,realignedChunk,NULL,&tags);
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR),
+ i, tags.objectId,tags.sequenceNumber,tags.eccResult));
+
+ if(tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA){
+ /* Right kind of block */
+ dev->checkpointNextBlock = tags.objectId;
+ dev->checkpointCurrentBlock = i;
+ dev->checkpointBlockList[dev->blocksInCheckpoint] = i;
+ dev->blocksInCheckpoint++;
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("found checkpt block %d"TENDSTR),i));
+ return;
+ }
+ }
+
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("found no more checkpt blocks"TENDSTR)));
+
+ dev->checkpointNextBlock = -1;
+ dev->checkpointCurrentBlock = -1;
+}
+
+
+int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting)
+{
+
+ /* Got the functions we need? */
+ if (!dev->writeChunkWithTagsToNAND ||
+ !dev->readChunkWithTagsFromNAND ||
+ !dev->eraseBlockInNAND ||
+ !dev->markNANDBlockBad)
+ return 0;
+
+ if(forWriting && !yaffs_CheckpointSpaceOk(dev))
+ return 0;
+
+ if(!dev->checkpointBuffer)
+ dev->checkpointBuffer = YMALLOC_DMA(dev->nDataBytesPerChunk);
+ if(!dev->checkpointBuffer)
+ return 0;
+
+
+ dev->checkpointPageSequence = 0;
+
+ dev->checkpointOpenForWrite = forWriting;
+
+ dev->checkpointByteCount = 0;
+ dev->checkpointSum = 0;
+ dev->checkpointXor = 0;
+ dev->checkpointCurrentBlock = -1;
+ dev->checkpointCurrentChunk = -1;
+ dev->checkpointNextBlock = dev->internalStartBlock;
+
+ /* Erase all the blocks in the checkpoint area */
+ if(forWriting){
+ memset(dev->checkpointBuffer,0,dev->nDataBytesPerChunk);
+ dev->checkpointByteOffset = 0;
+ return yaffs_CheckpointErase(dev);
+
+
+ } else {
+ int i;
+ /* Set to a value that will kick off a read */
+ dev->checkpointByteOffset = dev->nDataBytesPerChunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
+ * going to be way more than we need */
+ dev->blocksInCheckpoint = 0;
+ dev->checkpointMaxBlocks = (dev->internalEndBlock - dev->internalStartBlock)/16 + 2;
+ dev->checkpointBlockList = YMALLOC(sizeof(int) * dev->checkpointMaxBlocks);
+ for(i = 0; i < dev->checkpointMaxBlocks; i++)
+ dev->checkpointBlockList[i] = -1;
+ }
+
+ return 1;
+}
+
+int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum)
+{
+ __u32 compositeSum;
+ compositeSum = (dev->checkpointSum << 8) | (dev->checkpointXor & 0xFF);
+ *sum = compositeSum;
+ return 1;
+}
+
+static int yaffs_CheckpointFlushBuffer(yaffs_Device *dev)
+{
+
+ int chunk;
+ int realignedChunk;
+
+ yaffs_ExtendedTags tags;
+
+ if(dev->checkpointCurrentBlock < 0){
+ yaffs_CheckpointFindNextErasedBlock(dev);
+ dev->checkpointCurrentChunk = 0;
+ }
+
+ if(dev->checkpointCurrentBlock < 0)
+ return 0;
+
+ tags.chunkDeleted = 0;
+ tags.objectId = dev->checkpointNextBlock; /* Hint to next place to look */
+ tags.chunkId = dev->checkpointPageSequence + 1;
+ tags.sequenceNumber = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.byteCount = dev->nDataBytesPerChunk;
+ if(dev->checkpointCurrentChunk == 0){
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,dev->checkpointCurrentBlock);
+ bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocksInCheckpoint++;
+ }
+
+ chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock + dev->checkpointCurrentChunk;
+
+
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
+ chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk,tags.objectId,tags.chunkId));
+
+ realignedChunk = chunk - dev->chunkOffset;
+
+ dev->writeChunkWithTagsToNAND(dev,realignedChunk,dev->checkpointBuffer,&tags);
+ dev->checkpointByteOffset = 0;
+ dev->checkpointPageSequence++;
+ dev->checkpointCurrentChunk++;
+ if(dev->checkpointCurrentChunk >= dev->nChunksPerBlock){
+ dev->checkpointCurrentChunk = 0;
+ dev->checkpointCurrentBlock = -1;
+ }
+ memset(dev->checkpointBuffer,0,dev->nDataBytesPerChunk);
+
+ return 1;
+}
+
+
+int yaffs_CheckpointWrite(yaffs_Device *dev,const void *data, int nBytes)
+{
+ int i=0;
+ int ok = 1;
+
+
+ __u8 * dataBytes = (__u8 *)data;
+
+
+
+ if(!dev->checkpointBuffer)
+ return 0;
+
+ if(!dev->checkpointOpenForWrite)
+ return -1;
+
+ while(i < nBytes && ok) {
+
+
+
+ dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes ;
+ dev->checkpointSum += *dataBytes;
+ dev->checkpointXor ^= *dataBytes;
+
+ dev->checkpointByteOffset++;
+ i++;
+ dataBytes++;
+ dev->checkpointByteCount++;
+
+
+ if(dev->checkpointByteOffset < 0 ||
+ dev->checkpointByteOffset >= dev->nDataBytesPerChunk)
+ ok = yaffs_CheckpointFlushBuffer(dev);
+
+ }
+
+ return i;
+}
+
+int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes)
+{
+ int i=0;
+ int ok = 1;
+ yaffs_ExtendedTags tags;
+
+
+ int chunk;
+ int realignedChunk;
+
+ __u8 *dataBytes = (__u8 *)data;
+
+ if(!dev->checkpointBuffer)
+ return 0;
+
+ if(dev->checkpointOpenForWrite)
+ return -1;
+
+ while(i < nBytes && ok) {
+
+
+ if(dev->checkpointByteOffset < 0 ||
+ dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
+
+ if(dev->checkpointCurrentBlock < 0){
+ yaffs_CheckpointFindNextCheckpointBlock(dev);
+ dev->checkpointCurrentChunk = 0;
+ }
+
+ if(dev->checkpointCurrentBlock < 0)
+ ok = 0;
+ else {
+
+ chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock +
+ dev->checkpointCurrentChunk;
+
+ realignedChunk = chunk - dev->chunkOffset;
+
+ /* read in the next chunk */
+ /* printf("read checkpoint page %d\n",dev->checkpointPage); */
+ dev->readChunkWithTagsFromNAND(dev, realignedChunk,
+ dev->checkpointBuffer,
+ &tags);
+
+ if(tags.chunkId != (dev->checkpointPageSequence + 1) ||
+ tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ ok = 0;
+
+ dev->checkpointByteOffset = 0;
+ dev->checkpointPageSequence++;
+ dev->checkpointCurrentChunk++;
+
+ if(dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
+ dev->checkpointCurrentBlock = -1;
+ }
+ }
+
+ if(ok){
+ *dataBytes = dev->checkpointBuffer[dev->checkpointByteOffset];
+ dev->checkpointSum += *dataBytes;
+ dev->checkpointXor ^= *dataBytes;
+ dev->checkpointByteOffset++;
+ i++;
+ dataBytes++;
+ dev->checkpointByteCount++;
+ }
+ }
+
+ return i;
+}
+
+int yaffs_CheckpointClose(yaffs_Device *dev)
+{
+
+ if(dev->checkpointOpenForWrite){
+ if(dev->checkpointByteOffset != 0)
+ yaffs_CheckpointFlushBuffer(dev);
+ } else {
+ int i;
+ for(i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++){
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,dev->checkpointBlockList[i]);
+ if(bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
+ bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
+ else {
+ // Todo this looks odd...
+ }
+ }
+ YFREE(dev->checkpointBlockList);
+ dev->checkpointBlockList = NULL;
+ }
+
+ dev->nFreeChunks -= dev->blocksInCheckpoint * dev->nChunksPerBlock;
+ dev->nErasedBlocks -= dev->blocksInCheckpoint;
+
+
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint byte count %d" TENDSTR),
+ dev->checkpointByteCount));
+
+ if(dev->checkpointBuffer){
+ /* free the buffer */
+ YFREE(dev->checkpointBuffer);
+ dev->checkpointBuffer = NULL;
+ return 1;
+ }
+ else
+ return 0;
+
+}
+
+int yaffs_CheckpointInvalidateStream(yaffs_Device *dev)
+{
+ /* Erase the first checksum block */
+
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint invalidate"TENDSTR)));
+
+ if(!yaffs_CheckpointSpaceOk(dev))
+ return 0;
+
+ return yaffs_CheckpointErase(dev);
+}
+
+
+
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_checkptrw.h b/target/linux/generic/files/fs/yaffs2/yaffs_checkptrw.h
new file mode 100644
index 000000000..d3ff17405
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting);
+
+int yaffs_CheckpointWrite(yaffs_Device *dev,const void *data, int nBytes);
+
+int yaffs_CheckpointRead(yaffs_Device *dev,void *data, int nBytes);
+
+int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum);
+
+int yaffs_CheckpointClose(yaffs_Device *dev);
+
+int yaffs_CheckpointInvalidateStream(yaffs_Device *dev);
+
+
+#endif
+
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_ecc.c b/target/linux/generic/files/fs/yaffs2/yaffs_ecc.c
new file mode 100644
index 000000000..e2860393d
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,331 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore
+ * this bytes influence on the line parity.
+ */
+
+const char *yaffs_ecc_c_version =
+ "$Id: yaffs_ecc.c,v 1.9 2007-02-14 01:09:06 wookey Exp $";
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+static const unsigned char column_parity_table[] = {
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+/* Count the bits in an unsigned char or a U32 */
+
+static int yaffs_CountBits(unsigned char x)
+{
+ int r = 0;
+ while (x) {
+ if (x & 1)
+ r++;
+ x >>= 1;
+ }
+ return r;
+}
+
+static int yaffs_CountBits32(unsigned x)
+{
+ int r = 0;
+ while (x) {
+ if (x & 1)
+ r++;
+ x >>= 1;
+ }
+ return r;
+}
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc)
+{
+ unsigned int i;
+
+ unsigned char col_parity = 0;
+ unsigned char line_parity = 0;
+ unsigned char line_parity_prime = 0;
+ unsigned char t;
+ unsigned char b;
+
+ for (i = 0; i < 256; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) // odd number of bits in the byte
+ {
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+
+ t = 0;
+ if (line_parity & 0x80)
+ t |= 0x80;
+ if (line_parity_prime & 0x80)
+ t |= 0x40;
+ if (line_parity & 0x40)
+ t |= 0x20;
+ if (line_parity_prime & 0x40)
+ t |= 0x10;
+ if (line_parity & 0x20)
+ t |= 0x08;
+ if (line_parity_prime & 0x20)
+ t |= 0x04;
+ if (line_parity & 0x10)
+ t |= 0x02;
+ if (line_parity_prime & 0x10)
+ t |= 0x01;
+ ecc[1] = ~t;
+
+ t = 0;
+ if (line_parity & 0x08)
+ t |= 0x80;
+ if (line_parity_prime & 0x08)
+ t |= 0x40;
+ if (line_parity & 0x04)
+ t |= 0x20;
+ if (line_parity_prime & 0x04)
+ t |= 0x10;
+ if (line_parity & 0x02)
+ t |= 0x08;
+ if (line_parity_prime & 0x02)
+ t |= 0x04;
+ if (line_parity & 0x01)
+ t |= 0x02;
+ if (line_parity_prime & 0x01)
+ t |= 0x01;
+ ecc[0] = ~t;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+ // Swap the bytes into the wrong order
+ t = ecc[0];
+ ecc[0] = ecc[1];
+ ecc[1] = t;
+#endif
+}
+
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+{
+ unsigned char d0, d1, d2; /* deltas */
+
+ d0 = read_ecc[0] ^ test_ecc[0];
+ d1 = read_ecc[1] ^ test_ecc[1];
+ d2 = read_ecc[2] ^ test_ecc[2];
+
+ if ((d0 | d1 | d2) == 0)
+ return 0; /* no error */
+
+ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+ /* Single bit (recoverable) error in data */
+
+ unsigned byte;
+ unsigned bit;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+ // swap the bytes to correct for the wrong order
+ unsigned char t;
+
+ t = d0;
+ d0 = d1;
+ d1 = t;
+#endif
+
+ bit = byte = 0;
+
+ if (d1 & 0x80)
+ byte |= 0x80;
+ if (d1 & 0x20)
+ byte |= 0x40;
+ if (d1 & 0x08)
+ byte |= 0x20;
+ if (d1 & 0x02)
+ byte |= 0x10;
+ if (d0 & 0x80)
+ byte |= 0x08;
+ if (d0 & 0x20)
+ byte |= 0x04;
+ if (d0 & 0x08)
+ byte |= 0x02;
+ if (d0 & 0x02)
+ byte |= 0x01;
+
+ if (d2 & 0x80)
+ bit |= 0x04;
+ if (d2 & 0x20)
+ bit |= 0x02;
+ if (d2 & 0x08)
+ bit |= 0x01;
+
+ data[byte] ^= (1 << bit);
+
+ return 1; /* Corrected the error */
+ }
+
+ if ((yaffs_CountBits(d0) +
+ yaffs_CountBits(d1) +
+ yaffs_CountBits(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+ read_ecc[1] = test_ecc[1];
+ read_ecc[2] = test_ecc[2];
+
+ return 1; /* Corrected the error */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
+ yaffs_ECCOther * eccOther)
+{
+ unsigned int i;
+
+ unsigned char col_parity = 0;
+ unsigned line_parity = 0;
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+ for (i = 0; i < nBytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) {
+ /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ eccOther->colParity = (col_parity >> 2) & 0x3f;
+ eccOther->lineParity = line_parity;
+ eccOther->lineParityPrime = line_parity_prime;
+}
+
+int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
+ yaffs_ECCOther * read_ecc,
+ const yaffs_ECCOther * test_ecc)
+{
+ unsigned char cDelta; /* column parity delta */
+ unsigned lDelta; /* line parity delta */
+ unsigned lDeltaPrime; /* line parity delta */
+ unsigned bit;
+
+ cDelta = read_ecc->colParity ^ test_ecc->colParity;
+ lDelta = read_ecc->lineParity ^ test_ecc->lineParity;
+ lDeltaPrime = read_ecc->lineParityPrime ^ test_ecc->lineParityPrime;
+
+ if ((cDelta | lDelta | lDeltaPrime) == 0)
+ return 0; /* no error */
+
+ if (lDelta == ~lDeltaPrime &&
+ (((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15))
+ {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+
+ if (cDelta & 0x20)
+ bit |= 0x04;
+ if (cDelta & 0x08)
+ bit |= 0x02;
+ if (cDelta & 0x02)
+ bit |= 0x01;
+
+ if(lDelta >= nBytes)
+ return -1;
+
+ data[lDelta] ^= (1 << bit);
+
+ return 1; /* corrected */
+ }
+
+ if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) +
+ yaffs_CountBits(cDelta)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+ return 1; /* corrected */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_ecc.h b/target/linux/generic/files/fs/yaffs2/yaffs_ecc.h
new file mode 100644
index 000000000..79bc3d117
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+ /*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+typedef struct {
+ unsigned char colParity;
+ unsigned lineParity;
+ unsigned lineParityPrime;
+} yaffs_ECCOther;
+
+void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc);
+int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
+ yaffs_ECCOther * ecc);
+int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
+ yaffs_ECCOther * read_ecc,
+ const yaffs_ECCOther * test_ecc);
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_fs.c b/target/linux/generic/files/fs/yaffs2/yaffs_fs.c
new file mode 100644
index 000000000..67001b10e
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_fs.c
@@ -0,0 +1,2299 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the yaffs_Device associated with
+ * this superblock
+ * >> 2.6: sb->s_fs_info points to the yaffs_Device associated with this
+ * superblock
+ * >> inode->u.generic_ip points to the associated yaffs_Object.
+ */
+
+const char *yaffs_fs_c_version =
+ "$Id: yaffs_fs.c,v 1.63 2007-09-19 20:35:40 imcd Exp $";
+extern const char *yaffs_guts_c_version;
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+#include <linux/config.h>
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+
+#include <linux/statfs.h> /* Added NCB 15-8-2003 */
+#include <asm/statfs.h>
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+/* FIXME: use sb->s_id instead ? */
+#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+
+#else
+
+#include <linux/locks.h>
+#define BDEVNAME_SIZE 0
+#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+#define __user
+#endif
+
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) (mtd)->writesize
+#else
+#define WRITE_SIZE_STR "oobblock"
+#define WRITE_SIZE(mtd) (mtd)->oobblock
+#endif
+
+#include <asm/uaccess.h>
+
+#include "yportenv.h"
+#include "yaffs_guts.h"
+
+#include <linux/mtd/mtd.h>
+#include "yaffs_mtdif.h"
+#include "yaffs_mtdif1.h"
+#include "yaffs_mtdif2.h"
+
+unsigned int yaffs_traceMask = YAFFS_TRACE_BAD_BLOCKS;
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+
+/* Module Parameters */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+module_param(yaffs_traceMask,uint,0644);
+module_param(yaffs_wr_attempts,uint,0644);
+#else
+MODULE_PARM(yaffs_traceMask,"i");
+MODULE_PARM(yaffs_wr_attempts,"i");
+#endif
+
+/*#define T(x) printk x */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18))
+#define yaffs_InodeToObjectLV(iptr) (iptr)->i_private
+#else
+#define yaffs_InodeToObjectLV(iptr) (iptr)->u.generic_ip
+#endif
+
+#define yaffs_InodeToObject(iptr) ((yaffs_Object *)(yaffs_InodeToObjectLV(iptr)))
+#define yaffs_DentryToObject(dptr) yaffs_InodeToObject((dptr)->d_inode)
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+#define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->s_fs_info)
+#else
+#define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->u.generic_sbp)
+#endif
+
+static void yaffs_put_super(struct super_block *sb);
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+ loff_t * pos);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static int yaffs_file_flush(struct file *file, fl_owner_t id);
+#else
+static int yaffs_file_flush(struct file *file);
+#endif
+
+static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+ int datasync);
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n);
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n);
+#else
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
+#endif
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry);
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname);
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t dev);
+#else
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ int dev);
+#endif
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static int yaffs_sync_fs(struct super_block *sb, int wait);
+static void yaffs_write_super(struct super_block *sb);
+#else
+static int yaffs_sync_fs(struct super_block *sb);
+static int yaffs_write_super(struct super_block *sb);
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
+#else
+static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
+#endif
+static void yaffs_read_inode(struct inode *inode);
+
+static void yaffs_put_inode(struct inode *inode);
+static void yaffs_delete_inode(struct inode *);
+static void yaffs_clear_inode(struct inode *);
+
+static int yaffs_readpage(struct file *file, struct page *page);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
+#else
+static int yaffs_writepage(struct page *page);
+#endif
+static int yaffs_prepare_write(struct file *f, struct page *pg,
+ unsigned offset, unsigned to);
+static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+ unsigned to);
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+ int buflen);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+#else
+static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+#endif
+
+static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
+ .prepare_write = yaffs_prepare_write,
+ .commit_write = yaffs_commit_write,
+};
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
+static struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
+};
+
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18))
+
+static struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .sendfile = generic_file_sendfile,
+};
+
+#else
+
+static struct file_operations yaffs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+ .sendfile = generic_file_sendfile,
+#endif
+};
+#endif
+
+static struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+};
+
+static struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+ .follow_link = yaffs_follow_link,
+ .setattr = yaffs_setattr,
+};
+
+static struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+ .unlink = yaffs_unlink,
+ .symlink = yaffs_symlink,
+ .mkdir = yaffs_mkdir,
+ .rmdir = yaffs_unlink,
+ .mknod = yaffs_mknod,
+ .rename = yaffs_rename,
+ .setattr = yaffs_setattr,
+};
+
+static struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = yaffs_readdir,
+ .fsync = yaffs_sync_object,
+};
+
+static struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
+ .read_inode = yaffs_read_inode,
+ .put_inode = yaffs_put_inode,
+ .put_super = yaffs_put_super,
+ .delete_inode = yaffs_delete_inode,
+ .clear_inode = yaffs_clear_inode,
+ .sync_fs = yaffs_sync_fs,
+ .write_super = yaffs_write_super,
+};
+
+static void yaffs_GrossLock(yaffs_Device * dev)
+{
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs locking\n"));
+
+ down(&dev->grossLock);
+}
+
+static void yaffs_GrossUnlock(yaffs_Device * dev)
+{
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs unlocking\n"));
+ up(&dev->grossLock);
+
+}
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+ int buflen)
+{
+ unsigned char *alias;
+ int ret;
+
+ yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+
+ yaffs_GrossLock(dev);
+
+ alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
+
+ yaffs_GrossUnlock(dev);
+
+ if (!alias)
+ return -ENOMEM;
+
+ ret = vfs_readlink(dentry, buffer, buflen, alias);
+ kfree(alias);
+ return ret;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+#else
+static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+#endif
+{
+ unsigned char *alias;
+ int ret;
+ yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+
+ yaffs_GrossLock(dev);
+
+ alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
+
+ yaffs_GrossUnlock(dev);
+
+ if (!alias)
+ {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = vfs_follow_link(nd, alias);
+ kfree(alias);
+out:
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
+ return ERR_PTR (ret);
+#else
+ return ret;
+#endif
+}
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+ yaffs_Object * obj);
+
+/*
+ * Lookup is used to find objects in the fs
+ */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n)
+#else
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+#endif
+{
+ yaffs_Object *obj;
+ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
+
+ yaffs_Device *dev = yaffs_InodeToObject(dir)->myDev;
+
+ yaffs_GrossLock(dev);
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_lookup for %d:%s\n",
+ yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
+
+ obj =
+ yaffs_FindObjectByName(yaffs_InodeToObject(dir),
+ dentry->d_name.name);
+
+ obj = yaffs_GetEquivalentObject(obj); /* in case it was a hardlink */
+
+ /* Can't hold gross lock when calling yaffs_get_inode() */
+ yaffs_GrossUnlock(dev);
+
+ if (obj) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_lookup found %d\n", obj->objectId));
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+
+ if (inode) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_loookup dentry \n"));
+/* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
+ * d_add even if NULL inode */
+#if 0
+ /*dget(dentry); // try to solve directory bug */
+ d_add(dentry, inode);
+
+ /* return dentry; */
+ return NULL;
+#endif
+ }
+
+ } else {
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_lookup not found\n"));
+
+ }
+
+/* added NCB for 2.5/6 compatability - forces add even if inode is
+ * NULL which creates dentry hash */
+ d_add(dentry, inode);
+
+ return NULL;
+ /* return (ERR_PTR(-EIO)); */
+
+}
+
+/* For now put inode is just for debugging
+ * Put inode is called when the inode **structure** is put.
+ */
+static void yaffs_put_inode(struct inode *inode)
+{
+ T(YAFFS_TRACE_OS,
+ ("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
+ atomic_read(&inode->i_count)));
+
+}
+
+/* clear is called to tell the fs to release any per-inode data it holds */
+static void yaffs_clear_inode(struct inode *inode)
+{
+ yaffs_Object *obj;
+ yaffs_Device *dev;
+
+ obj = yaffs_InodeToObject(inode);
+
+ T(YAFFS_TRACE_OS,
+ ("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+ atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object"));
+
+ if (obj) {
+ dev = obj->myDev;
+ yaffs_GrossLock(dev);
+
+ /* Clear the association between the inode and
+ * the yaffs_Object.
+ */
+ obj->myInode = NULL;
+ yaffs_InodeToObjectLV(inode) = NULL;
+
+ /* If the object freeing was deferred, then the real
+ * free happens now.
+ * This should fix the inode inconsistency problem.
+ */
+
+ yaffs_HandleDeferedFree(obj);
+
+ yaffs_GrossUnlock(dev);
+ }
+
+}
+
+/* delete is called when the link count is zero and the inode
+ * is put (ie. nobody wants to know about it anymore, time to
+ * delete the file).
+ * NB Must call clear_inode()
+ */
+static void yaffs_delete_inode(struct inode *inode)
+{
+ yaffs_Object *obj = yaffs_InodeToObject(inode);
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+ ("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+ atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object"));
+
+ if (obj) {
+ dev = obj->myDev;
+ yaffs_GrossLock(dev);
+ yaffs_DeleteFile(obj);
+ yaffs_GrossUnlock(dev);
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
+ truncate_inode_pages (&inode->i_data, 0);
+#endif
+ clear_inode(inode);
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+#else
+static int yaffs_file_flush(struct file *file)
+#endif
+{
+ yaffs_Object *obj = yaffs_DentryToObject(file->f_dentry);
+
+ yaffs_Device *dev = obj->myDev;
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_file_flush object %d (%s)\n", obj->objectId,
+ obj->dirty ? "dirty" : "clean"));
+
+ yaffs_GrossLock(dev);
+
+ yaffs_FlushFile(obj, 1);
+
+ yaffs_GrossUnlock(dev);
+
+ return 0;
+}
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+ /* Lifted from jffs2 */
+
+ yaffs_Object *obj;
+ unsigned char *pg_buf;
+ int ret;
+
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage at %08x, size %08x\n",
+ (unsigned)(pg->index << PAGE_CACHE_SHIFT),
+ (unsigned)PAGE_CACHE_SIZE));
+
+ obj = yaffs_DentryToObject(f->f_dentry);
+
+ dev = obj->myDev;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+ BUG_ON(!PageLocked(pg));
+#else
+ if (!PageLocked(pg))
+ PAGE_BUG(pg);
+#endif
+
+ pg_buf = kmap(pg);
+ /* FIXME: Can kmap fail? */
+
+ yaffs_GrossLock(dev);
+
+ ret =
+ yaffs_ReadDataFromFile(obj, pg_buf, pg->index << PAGE_CACHE_SHIFT,
+ PAGE_CACHE_SIZE);
+
+ yaffs_GrossUnlock(dev);
+
+ if (ret >= 0)
+ ret = 0;
+
+ if (ret) {
+ ClearPageUptodate(pg);
+ SetPageError(pg);
+ } else {
+ SetPageUptodate(pg);
+ ClearPageError(pg);
+ }
+
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage done\n"));
+ return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+ int ret = yaffs_readpage_nolock(f, pg);
+ UnlockPage(pg);
+ return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+ return yaffs_readpage_unlock(f, pg);
+}
+
+/* writepage inspired by/stolen from smbfs */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+#else
+static int yaffs_writepage(struct page *page)
+#endif
+{
+ struct address_space *mapping = page->mapping;
+ loff_t offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
+ struct inode *inode;
+ unsigned long end_index;
+ char *buffer;
+ yaffs_Object *obj;
+ int nWritten = 0;
+ unsigned nBytes;
+
+ if (!mapping)
+ BUG();
+ inode = mapping->host;
+ if (!inode)
+ BUG();
+
+ if (offset > inode->i_size) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG
+ "yaffs_writepage at %08x, inode size = %08x!!!\n",
+ (unsigned)(page->index << PAGE_CACHE_SHIFT),
+ (unsigned)inode->i_size));
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG " -> don't care!!\n"));
+ unlock_page(page);
+ return 0;
+ }
+
+ end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+
+ /* easy case */
+ if (page->index < end_index) {
+ nBytes = PAGE_CACHE_SIZE;
+ } else {
+ nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);
+ }
+
+ get_page(page);
+
+ buffer = kmap(page);
+
+ obj = yaffs_InodeToObject(inode);
+ yaffs_GrossLock(obj->myDev);
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_writepage at %08x, size %08x\n",
+ (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "writepag0: obj = %05x, ino = %05x\n",
+ (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+
+ nWritten =
+ yaffs_WriteDataToFile(obj, buffer, page->index << PAGE_CACHE_SHIFT,
+ nBytes, 0);
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "writepag1: obj = %05x, ino = %05x\n",
+ (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+
+ yaffs_GrossUnlock(obj->myDev);
+
+ kunmap(page);
+ SetPageUptodate(page);
+ UnlockPage(page);
+ put_page(page);
+
+ return (nWritten == nBytes) ? 0 : -ENOSPC;
+}
+
+static int yaffs_prepare_write(struct file *f, struct page *pg,
+ unsigned offset, unsigned to)
+{
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_prepair_write\n"));
+ if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
+ return yaffs_readpage_nolock(f, pg);
+
+ return 0;
+
+}
+
+static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+ unsigned to)
+{
+
+ void *addr = page_address(pg) + offset;
+ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
+ int nBytes = to - offset;
+ int nWritten;
+
+ unsigned spos = pos;
+ unsigned saddr = (unsigned)addr;
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_commit_write addr %x pos %x nBytes %d\n", saddr,
+ spos, nBytes));
+
+ nWritten = yaffs_file_write(f, addr, nBytes, &pos);
+
+ if (nWritten != nBytes) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG
+ "yaffs_commit_write not same size nWritten %d nBytes %d\n",
+ nWritten, nBytes));
+ SetPageError(pg);
+ ClearPageUptodate(pg);
+ } else {
+ SetPageUptodate(pg);
+ }
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_commit_write returning %d\n",
+ nWritten == nBytes ? 0 : nWritten));
+
+ return nWritten == nBytes ? 0 : nWritten;
+
+}
+
+static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object * obj)
+{
+ if (inode && obj) {
+
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+ __u32 mode = obj->yst_mode;
+ switch( obj->variantType ){
+ case YAFFS_OBJECT_TYPE_FILE :
+ if( ! S_ISREG(mode) ){
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFREG;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK :
+ if( ! S_ISLNK(mode) ){
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFLNK;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY :
+ if( ! S_ISDIR(mode) ){
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFDIR;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN :
+ case YAFFS_OBJECT_TYPE_HARDLINK :
+ case YAFFS_OBJECT_TYPE_SPECIAL :
+ default:
+ /* TODO? */
+ break;
+ }
+
+ inode->i_ino = obj->objectId;
+ inode->i_mode = obj->yst_mode;
+ inode->i_uid = obj->yst_uid;
+ inode->i_gid = obj->yst_gid;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+ inode->i_blksize = inode->i_sb->s_blocksize;
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+ inode->i_atime.tv_nsec = 0;
+ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+ inode->i_mtime.tv_nsec = 0;
+ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+ inode->i_ctime.tv_nsec = 0;
+#else
+ inode->i_rdev = obj->yst_rdev;
+ inode->i_atime = obj->yst_atime;
+ inode->i_mtime = obj->yst_mtime;
+ inode->i_ctime = obj->yst_ctime;
+#endif
+ inode->i_size = yaffs_GetObjectFileLength(obj);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG
+ "yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
+ inode->i_mode, inode->i_uid, inode->i_gid,
+ (int)inode->i_size, atomic_read(&inode->i_count)));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+ init_special_inode(inode, obj->yst_mode,
+ old_decode_dev(obj->yst_rdev));
+#else
+ init_special_inode(inode, obj->yst_mode,
+ (dev_t) (obj->yst_rdev));
+#endif
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+ break;
+ case S_IFLNK: /* symlink */
+ inode->i_op = &yaffs_symlink_inode_operations;
+ break;
+ }
+
+ yaffs_InodeToObjectLV(inode) = obj;
+
+ obj->myInode = inode;
+
+ } else {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_FileInode invalid parameters\n"));
+ }
+
+}
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+ yaffs_Object * obj)
+{
+ struct inode *inode;
+
+ if (!sb) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_get_inode for NULL super_block!!\n"));
+ return NULL;
+
+ }
+
+ if (!obj) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_get_inode for NULL object!!\n"));
+ return NULL;
+
+ }
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_get_inode for object %d\n", obj->objectId));
+
+ inode = iget(sb, obj->objectId);
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+ /* NB You can't be holding grossLock or deadlock will happen! */
+
+ return inode;
+}
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+ loff_t * pos)
+{
+ yaffs_Object *obj;
+ int nWritten, ipos;
+ struct inode *inode;
+ yaffs_Device *dev;
+
+ obj = yaffs_DentryToObject(f->f_dentry);
+
+ dev = obj->myDev;
+
+ yaffs_GrossLock(dev);
+
+ inode = f->f_dentry->d_inode;
+
+ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND) {
+ ipos = inode->i_size;
+ } else {
+ ipos = *pos;
+ }
+
+ if (!obj) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_file_write: hey obj is null!\n"));
+ } else {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG
+ "yaffs_file_write about to write writing %d bytes"
+ "to object %d at %d\n",
+ n, obj->objectId, ipos));
+ }
+
+ nWritten = yaffs_WriteDataToFile(obj, buf, ipos, n, 0);
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_file_write writing %d bytes, %d written at %d\n",
+ n, nWritten, ipos));
+ if (nWritten > 0) {
+ ipos += nWritten;
+ *pos = ipos;
+ if (ipos > inode->i_size) {
+ inode->i_size = ipos;
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG
+ "yaffs_file_write size updated to %d bytes, "
+ "%d blocks\n",
+ ipos, (int)(inode->i_blocks)));
+ }
+
+ }
+ yaffs_GrossUnlock(dev);
+ return nWritten == 0 ? -ENOSPC : nWritten;
+}
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+ yaffs_Object *obj;
+ yaffs_Device *dev;
+ struct inode *inode = f->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct list_head *i;
+ yaffs_Object *l;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_DentryToObject(f->f_dentry);
+ dev = obj->myDev;
+
+ yaffs_GrossLock(dev);
+
+ offset = f->f_pos;
+
+ T(YAFFS_TRACE_OS, ("yaffs_readdir: starting at %d\n", (int)offset));
+
+ if (offset == 0) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_readdir: entry . ino %d \n",
+ (int)inode->i_ino));
+ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR)
+ < 0) {
+ goto out;
+ }
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_readdir: entry .. ino %d \n",
+ (int)f->f_dentry->d_parent->d_inode->i_ino));
+ if (filldir
+ (dirent, "..", 2, offset,
+ f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
+ goto out;
+ }
+ offset++;
+ f->f_pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+
+ if (f->f_version != inode->i_version) {
+ offset = 2;
+ f->f_pos = offset;
+ f->f_version = inode->i_version;
+ }
+
+ list_for_each(i, &obj->variant.directoryVariant.children) {
+ curoffs++;
+ if (curoffs >= offset) {
+ l = list_entry(i, yaffs_Object, siblings);
+
+ yaffs_GetObjectName(l, name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_readdir: %s inode %d\n", name,
+ yaffs_GetObjectInode(l)));
+
+ if (filldir(dirent,
+ name,
+ strlen(name),
+ offset,
+ yaffs_GetObjectInode(l),
+ yaffs_GetObjectType(l))
+ < 0) {
+ goto up_and_out;
+ }
+
+ offset++;
+ f->f_pos++;
+ }
+ }
+
+ up_and_out:
+ out:
+
+ yaffs_GrossUnlock(dev);
+
+ return 0;
+}
+
+/*
+ * File creation. Allocate an inode, and we're done..
+ */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+#else
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ int rdev)
+#endif
+{
+ struct inode *inode;
+
+ yaffs_Object *obj = NULL;
+ yaffs_Device *dev;
+
+ yaffs_Object *parent = yaffs_InodeToObject(dir);
+
+ int error = -ENOSPC;
+ uid_t uid = current->fsuid;
+ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
+
+ if((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_mknod: parent object %d type %d\n",
+ parent->objectId, parent->variantType));
+ } else {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_mknod: could not get parent object\n"));
+ return -EPERM;
+ }
+
+ T(YAFFS_TRACE_OS, ("yaffs_mknod: making oject for %s, "
+ "mode %x dev %x\n",
+ dentry->d_name.name, mode, rdev));
+
+ dev = parent->myDev;
+
+ yaffs_GrossLock(dev);
+
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+ T(YAFFS_TRACE_OS, (KERN_DEBUG
+ "yaffs_mknod: making special\n"));
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+ obj =
+ yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+ gid, old_encode_dev(rdev));
+#else
+ obj =
+ yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+ gid, rdev);
+#endif
+ break;
+ case S_IFREG: /* file */
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mknod: making file\n"));
+ obj =
+ yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
+ gid);
+ break;
+ case S_IFDIR: /* directory */
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_mknod: making directory\n"));
+ obj =
+ yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
+ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mknod: making file\n"));
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+
+ /* Can not call yaffs_get_inode() with gross lock held */
+ yaffs_GrossUnlock(dev);
+
+ if (obj) {
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_mknod created object %d count = %d\n",
+ obj->objectId, atomic_read(&inode->i_count)));
+ error = 0;
+ } else {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_mknod failed making object\n"));
+ error = -ENOMEM;
+ }
+
+ return error;
+}
+
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ int retVal;
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mkdir\n"));
+ retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+#if 0
+ /* attempt to fix dir bug - didn't work */
+ if (!retVal) {
+ dget(dentry);
+ }
+#endif
+ return retVal;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+#else
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_create\n"));
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int retVal;
+
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_unlink %d:%s\n", (int)(dir->i_ino),
+ dentry->d_name.name));
+
+ dev = yaffs_InodeToObject(dir)->myDev;
+
+ yaffs_GrossLock(dev);
+
+ retVal = yaffs_Unlink(yaffs_InodeToObject(dir), dentry->d_name.name);
+
+ if (retVal == YAFFS_OK) {
+ dentry->d_inode->i_nlink--;
+ dir->i_version++;
+ yaffs_GrossUnlock(dev);
+ mark_inode_dirty(dentry->d_inode);
+ return 0;
+ }
+ yaffs_GrossUnlock(dev);
+ return -ENOTEMPTY;
+}
+
+/*
+ * Create a link...
+ */
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ yaffs_Object *obj = NULL;
+ yaffs_Object *link = NULL;
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_link\n"));
+
+ obj = yaffs_InodeToObject(inode);
+ dev = obj->myDev;
+
+ yaffs_GrossLock(dev);
+
+ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+ {
+ link =
+ yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
+ obj);
+ }
+
+ if (link) {
+ old_dentry->d_inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_link link count %d i_count %d\n",
+ old_dentry->d_inode->i_nlink,
+ atomic_read(&old_dentry->d_inode->i_count)));
+
+ }
+
+ yaffs_GrossUnlock(dev);
+
+ if (link) {
+
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ yaffs_Object *obj;
+ yaffs_Device *dev;
+ uid_t uid = current->fsuid;
+ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_symlink\n"));
+
+ dev = yaffs_InodeToObject(dir)->myDev;
+ yaffs_GrossLock(dev);
+ obj = yaffs_MknodSymLink(yaffs_InodeToObject(dir), dentry->d_name.name,
+ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_GrossUnlock(dev);
+
+ if (obj) {
+
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "symlink created OK\n"));
+ return 0;
+ } else {
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "symlink not created\n"));
+
+ }
+
+ return -ENOMEM;
+}
+
+static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+ int datasync)
+{
+
+ yaffs_Object *obj;
+ yaffs_Device *dev;
+
+ obj = yaffs_DentryToObject(dentry);
+
+ dev = obj->myDev;
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_sync_object\n"));
+ yaffs_GrossLock(dev);
+ yaffs_FlushFile(obj, 1);
+ yaffs_GrossUnlock(dev);
+ return 0;
+}
+
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ yaffs_Device *dev;
+ int retVal = YAFFS_FAIL;
+ yaffs_Object *target;
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_rename\n"));
+ dev = yaffs_InodeToObject(old_dir)->myDev;
+
+ yaffs_GrossLock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+ target =
+ yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
+ new_dentry->d_name.name);
+
+
+
+ if (target &&
+ target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+ !list_empty(&target->variant.directoryVariant.children)) {
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "target is non-empty dir\n"));
+
+ retVal = YAFFS_FAIL;
+ } else {
+
+ /* Now does unlinking internally using shadowing mechanism */
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "calling yaffs_RenameObject\n"));
+
+ retVal =
+ yaffs_RenameObject(yaffs_InodeToObject(old_dir),
+ old_dentry->d_name.name,
+ yaffs_InodeToObject(new_dir),
+ new_dentry->d_name.name);
+
+ }
+ yaffs_GrossUnlock(dev);
+
+ if (retVal == YAFFS_OK) {
+ if(target) {
+ new_dentry->d_inode->i_nlink--;
+ mark_inode_dirty(new_dentry->d_inode);
+ }
+
+ return 0;
+ } else {
+ return -ENOTEMPTY;
+ }
+
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_setattr of object %d\n",
+ yaffs_InodeToObject(inode)->objectId));
+
+ if ((error = inode_change_ok(inode, attr)) == 0) {
+
+ dev = yaffs_InodeToObject(inode)->myDev;
+ yaffs_GrossLock(dev);
+ if (yaffs_SetAttributes(yaffs_InodeToObject(inode), attr) ==
+ YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+ }
+ yaffs_GrossUnlock(dev);
+ if (!error)
+ error = inode_setattr(inode, attr);
+ }
+ return error;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+ struct super_block *sb = dentry->d_sb;
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+#else
+static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
+{
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+#endif
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_statfs\n"));
+
+ yaffs_GrossLock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+ if (sb->s_blocksize > dev->nDataBytesPerChunk) {
+
+ buf->f_blocks =
+ (dev->endBlock - dev->startBlock +
+ 1) * dev->nChunksPerBlock / (sb->s_blocksize /
+ dev->nDataBytesPerChunk);
+ buf->f_bfree =
+ yaffs_GetNumberOfFreeChunks(dev) / (sb->s_blocksize /
+ dev->nDataBytesPerChunk);
+ } else {
+
+ buf->f_blocks =
+ (dev->endBlock - dev->startBlock +
+ 1) * dev->nChunksPerBlock * (dev->nDataBytesPerChunk /
+ sb->s_blocksize);
+ buf->f_bfree =
+ yaffs_GetNumberOfFreeChunks(dev) * (dev->nDataBytesPerChunk /
+ sb->s_blocksize);
+ }
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+
+ yaffs_GrossUnlock(dev);
+ return 0;
+}
+
+
+/**
+static int yaffs_do_sync_fs(struct super_block *sb)
+{
+
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_do_sync_fs\n"));
+
+ if(sb->s_dirt) {
+ yaffs_GrossLock(dev);
+
+ if(dev)
+ yaffs_CheckpointSave(dev);
+
+ yaffs_GrossUnlock(dev);
+
+ sb->s_dirt = 0;
+ }
+ return 0;
+}
+**/
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static void yaffs_write_super(struct super_block *sb)
+#else
+static int yaffs_write_super(struct super_block *sb)
+#endif
+{
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_write_super\n"));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+ return 0; /* yaffs_do_sync_fs(sb);*/
+#endif
+}
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+#else
+static int yaffs_sync_fs(struct super_block *sb)
+#endif
+{
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_sync_fs\n"));
+
+ return 0; /* yaffs_do_sync_fs(sb);*/
+
+}
+
+
+static void yaffs_read_inode(struct inode *inode)
+{
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ yaffs_Object *obj;
+ yaffs_Device *dev = yaffs_SuperToDevice(inode->i_sb);
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_read_inode for %d\n", (int)inode->i_ino));
+
+ yaffs_GrossLock(dev);
+
+ obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
+
+ yaffs_FillInodeFromObject(inode, obj);
+
+ yaffs_GrossUnlock(dev);
+}
+
+static LIST_HEAD(yaffs_dev_list);
+
+#if 0 // not used
+static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+
+ if( *flags & MS_RDONLY ) {
+ struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_remount_fs: %s: RO\n", dev->name ));
+
+ yaffs_GrossLock(dev);
+
+ yaffs_FlushEntireDeviceCache(dev);
+
+ yaffs_CheckpointSave(dev);
+
+ if (mtd->sync)
+ mtd->sync(mtd);
+
+ yaffs_GrossUnlock(dev);
+ }
+ else {
+ T(YAFFS_TRACE_OS,
+ (KERN_DEBUG "yaffs_remount_fs: %s: RW\n", dev->name ));
+ }
+
+ return 0;
+}
+#endif
+
+static void yaffs_put_super(struct super_block *sb)
+{
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_put_super\n"));
+
+ yaffs_GrossLock(dev);
+
+ yaffs_FlushEntireDeviceCache(dev);
+
+ yaffs_CheckpointSave(dev);
+
+ if (dev->putSuperFunc) {
+ dev->putSuperFunc(sb);
+ }
+
+ yaffs_Deinitialise(dev);
+
+ yaffs_GrossUnlock(dev);
+
+ /* we assume this is protected by lock_kernel() in mount/umount */
+ list_del(&dev->devList);
+
+ if(dev->spareBuffer){
+ YFREE(dev->spareBuffer);
+ dev->spareBuffer = NULL;
+ }
+
+ kfree(dev);
+}
+
+
+static void yaffs_MTDPutSuper(struct super_block *sb)
+{
+
+ struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+
+ if (mtd->sync) {
+ mtd->sync(mtd);
+ }
+
+ put_mtd_device(mtd);
+}
+
+
+static void yaffs_MarkSuperBlockDirty(void *vsb)
+{
+ struct super_block *sb = (struct super_block *)vsb;
+
+ T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_MarkSuperBlockDirty() sb = %p\n",sb));
+// if(sb)
+// sb->s_dirt = 1;
+}
+
+typedef struct {
+ int inband_tags;
+ int skip_checkpoint_read;
+ int skip_checkpoint_write;
+ int no_cache;
+} yaffs_options;
+
+#define MAX_OPT_LEN 20
+static int yaffs_parse_options(yaffs_options *options, const char *options_str)
+{
+ char cur_opt[MAX_OPT_LEN+1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+ while(options_str && *options_str && !error){
+ memset(cur_opt,0,MAX_OPT_LEN+1);
+ p = 0;
+
+ while(*options_str && *options_str != ','){
+ if(p < MAX_OPT_LEN){
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+ if(!strcmp(cur_opt,"inband-tags"))
+ options->inband_tags = 1;
+ else if(!strcmp(cur_opt,"no-cache"))
+ options->no_cache = 1;
+ else if(!strcmp(cur_opt,"no-checkpoint-read"))
+ options->skip_checkpoint_read = 1;
+ else if(!strcmp(cur_opt,"no-checkpoint-write"))
+ options->skip_checkpoint_write = 1;
+ else if(!strcmp(cur_opt,"no-checkpoint")){
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",cur_opt);
+ error = 1;
+ }
+
+ }
+
+ return error;
+}
+
+static struct super_block *yaffs_internal_read_super(int yaffsVersion,
+ struct super_block *sb,
+ void *data, int silent)
+{
+ int nBlocks;
+ struct inode *inode = NULL;
+ struct dentry *root;
+ yaffs_Device *dev = 0;
+ char devname_buf[BDEVNAME_SIZE + 1];
+ struct mtd_info *mtd;
+ int err;
+ char *data_str = (char *)data;
+
+ yaffs_options options;
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
+
+ if (!sb)
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+ else if (!sb->s_dev)
+ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+ else if (!yaffs_devname(sb, devname_buf))
+ printk(KERN_INFO "yaffs: devname is NULL\n");
+ else
+ printk(KERN_INFO "yaffs: dev is %d name is \"%s\"\n",
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf));
+
+ if(!data_str)
+ data_str = "";
+
+ printk(KERN_INFO "yaffs: passed flags \"%s\"\n",data_str);
+
+ memset(&options,0,sizeof(options));
+
+ if(yaffs_parse_options(&options,data_str)){
+ /* Option parsing failed */
+ return NULL;
+ }
+
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ T(YAFFS_TRACE_OS, ("yaffs_read_super: Using yaffs%d\n", yaffsVersion));
+ T(YAFFS_TRACE_OS,
+ ("yaffs_read_super: block size %d\n", (int)(sb->s_blocksize)));
+
+#ifdef CONFIG_YAFFS_DISABLE_WRITE_VERIFY
+ T(YAFFS_TRACE_OS,
+ ("yaffs: Write verification disabled. All guarantees "
+ "null and void\n"));
+#endif
+
+ T(YAFFS_TRACE_ALWAYS, ("yaffs: Attempting MTD mount on %u.%u, "
+ "\"%s\"\n",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ yaffs_devname(sb, devname_buf)));
+
+ /* Check it's an mtd device..... */
+ if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR) {
+ return NULL; /* This isn't an mtd device */
+ }
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs: MTD device #%u doesn't appear to exist\n",
+ MINOR(sb->s_dev)));
+ return NULL;
+ }
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs: MTD device is not NAND it's type %d\n", mtd->type));
+ return NULL;
+ }
+
+ T(YAFFS_TRACE_OS, (" erase %p\n", mtd->erase));
+ T(YAFFS_TRACE_OS, (" read %p\n", mtd->read));
+ T(YAFFS_TRACE_OS, (" write %p\n", mtd->write));
+ T(YAFFS_TRACE_OS, (" readoob %p\n", mtd->read_oob));
+ T(YAFFS_TRACE_OS, (" writeoob %p\n", mtd->write_oob));
+ T(YAFFS_TRACE_OS, (" block_isbad %p\n", mtd->block_isbad));
+ T(YAFFS_TRACE_OS, (" block_markbad %p\n", mtd->block_markbad));
+ T(YAFFS_TRACE_OS, (" %s %d\n", WRITE_SIZE_STR, WRITE_SIZE(mtd)));
+ T(YAFFS_TRACE_OS, (" oobsize %d\n", mtd->oobsize));
+ T(YAFFS_TRACE_OS, (" erasesize %d\n", mtd->erasesize));
+ T(YAFFS_TRACE_OS, (" size %d\n", mtd->size));
+
+#ifdef CONFIG_YAFFS_AUTO_YAFFS2
+
+ if (yaffsVersion == 1 &&
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ mtd->writesize >= 2048) {
+#else
+ mtd->oobblock >= 2048) {
+#endif
+ T(YAFFS_TRACE_ALWAYS,("yaffs: auto selecting yaffs2\n"));
+ yaffsVersion = 2;
+ }
+
+ /* Added NCB 26/5/2006 for completeness */
+ if (yaffsVersion == 2 &&
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ mtd->writesize == 512) {
+#else
+ mtd->oobblock == 512) {
+#endif
+ T(YAFFS_TRACE_ALWAYS,("yaffs: auto selecting yaffs1\n"));
+ yaffsVersion = 1;
+ }
+
+#endif
+
+ if (yaffsVersion == 2) {
+ /* Check for version 2 style functions */
+ if (!mtd->erase ||
+ !mtd->block_isbad ||
+ !mtd->block_markbad ||
+ !mtd->read ||
+ !mtd->write ||
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ !mtd->read_oob || !mtd->write_oob) {
+#else
+ !mtd->write_ecc ||
+ !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
+#endif
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs: MTD device does not support required "
+ "functions\n"));;
+ return NULL;
+ }
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ if (mtd->writesize < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+#else
+ if (mtd->oobblock < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+#endif
+ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) {
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs: MTD device does not have the "
+ "right page sizes\n"));
+ return NULL;
+ }
+ } else {
+ /* Check for V1 style functions */
+ if (!mtd->erase ||
+ !mtd->read ||
+ !mtd->write ||
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ !mtd->read_oob || !mtd->write_oob) {
+#else
+ !mtd->write_ecc ||
+ !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
+#endif
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs: MTD device does not support required "
+ "functions\n"));;
+ return NULL;
+ }
+
+ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs: MTD device does not support have the "
+ "right page sizes\n"));
+ return NULL;
+ }
+ }
+
+ /* OK, so if we got here, we have an MTD that's NAND and looks
+ * like it has the right capabilities
+ * Set the yaffs_Device up for mtd
+ */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+ sb->s_fs_info = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+#else
+ sb->u.generic_sbp = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+#endif
+ if (!dev) {
+ /* Deep shit could not allocate device structure */
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs_read_super: Failed trying to allocate "
+ "yaffs_Device. \n"));
+ return NULL;
+ }
+
+ memset(dev, 0, sizeof(yaffs_Device));
+ dev->genericDevice = mtd;
+ dev->name = mtd->name;
+
+ /* Set up the memory size parameters.... */
+
+ nBlocks = mtd->size / (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
+ dev->startBlock = 0;
+ dev->endBlock = nBlocks - 1;
+ dev->nChunksPerBlock = YAFFS_CHUNKS_PER_BLOCK;
+ dev->nDataBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
+ dev->nReservedBlocks = 5;
+ dev->nShortOpCaches = (options.no_cache) ? 0 : 10;
+
+ /* ... and the functions. */
+ if (yaffsVersion == 2) {
+ dev->writeChunkWithTagsToNAND =
+ nandmtd2_WriteChunkWithTagsToNAND;
+ dev->readChunkWithTagsFromNAND =
+ nandmtd2_ReadChunkWithTagsFromNAND;
+ dev->markNANDBlockBad = nandmtd2_MarkNANDBlockBad;
+ dev->queryNANDBlock = nandmtd2_QueryNANDBlock;
+ dev->spareBuffer = YMALLOC(mtd->oobsize);
+ dev->isYaffs2 = 1;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ dev->nDataBytesPerChunk = mtd->writesize;
+ dev->nChunksPerBlock = mtd->erasesize / mtd->writesize;
+#else
+ dev->nDataBytesPerChunk = mtd->oobblock;
+ dev->nChunksPerBlock = mtd->erasesize / mtd->oobblock;
+#endif
+ nBlocks = mtd->size / mtd->erasesize;
+
+ dev->nCheckpointReservedBlocks = CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS;
+ dev->startBlock = 0;
+ dev->endBlock = nBlocks - 1;
+ } else {
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ /* use the MTD interface in yaffs_mtdif1.c */
+ dev->writeChunkWithTagsToNAND =
+ nandmtd1_WriteChunkWithTagsToNAND;
+ dev->readChunkWithTagsFromNAND =
+ nandmtd1_ReadChunkWithTagsFromNAND;
+ dev->markNANDBlockBad = nandmtd1_MarkNANDBlockBad;
+ dev->queryNANDBlock = nandmtd1_QueryNANDBlock;
+#else
+ dev->writeChunkToNAND = nandmtd_WriteChunkToNAND;
+ dev->readChunkFromNAND = nandmtd_ReadChunkFromNAND;
+#endif
+ dev->isYaffs2 = 0;
+ }
+ /* ... and common functions */
+ dev->eraseBlockInNAND = nandmtd_EraseBlockInNAND;
+ dev->initialiseNAND = nandmtd_InitialiseNAND;
+
+ dev->putSuperFunc = yaffs_MTDPutSuper;
+
+ dev->superBlock = (void *)sb;
+ dev->markSuperBlockDirty = yaffs_MarkSuperBlockDirty;
+
+
+#ifndef CONFIG_YAFFS_DOES_ECC
+ dev->useNANDECC = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_WIDE_TNODES
+ dev->wideTnodesDisabled = 1;
+#endif
+
+ dev->skipCheckpointRead = options.skip_checkpoint_read;
+ dev->skipCheckpointWrite = options.skip_checkpoint_write;
+
+ /* we assume this is protected by lock_kernel() in mount/umount */
+ list_add_tail(&dev->devList, &yaffs_dev_list);
+
+ init_MUTEX(&dev->grossLock);
+
+ yaffs_GrossLock(dev);
+
+ err = yaffs_GutsInitialise(dev);
+
+ T(YAFFS_TRACE_OS,
+ ("yaffs_read_super: guts initialised %s\n",
+ (err == YAFFS_OK) ? "OK" : "FAILED"));
+
+ /* Release lock before yaffs_get_inode() */
+ yaffs_GrossUnlock(dev);
+
+ /* Create root inode */
+ if (err == YAFFS_OK)
+ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0,
+ yaffs_Root(dev));
+
+ if (!inode)
+ return NULL;
+
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+
+ T(YAFFS_TRACE_OS, ("yaffs_read_super: got root inode\n"));
+
+ root = d_alloc_root(inode);
+
+ T(YAFFS_TRACE_OS, ("yaffs_read_super: d_alloc_root done\n"));
+
+ if (!root) {
+ iput(inode);
+ return NULL;
+ }
+ sb->s_root = root;
+
+ T(YAFFS_TRACE_OS, ("yaffs_read_super: done\n"));
+ return sb;
+}
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static int yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+ .get_sb = yaffs_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+ FS_REQUIRES_DEV);
+#endif
+
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static int yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+ .get_sb = yaffs2_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs2_read_super(struct super_block *sb,
+ void *data, int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+ FS_REQUIRES_DEV);
+#endif
+
+#endif /* CONFIG_YAFFS_YAFFS2 */
+
+static struct proc_dir_entry *my_proc_entry;
+
+static char *yaffs_dump_dev(char *buf, yaffs_Device * dev)
+{
+ buf += sprintf(buf, "startBlock......... %d\n", dev->startBlock);
+ buf += sprintf(buf, "endBlock........... %d\n", dev->endBlock);
+ buf += sprintf(buf, "nDataBytesPerChunk. %d\n", dev->nDataBytesPerChunk);
+ buf += sprintf(buf, "chunkGroupBits..... %d\n", dev->chunkGroupBits);
+ buf += sprintf(buf, "chunkGroupSize..... %d\n", dev->chunkGroupSize);
+ buf += sprintf(buf, "nErasedBlocks...... %d\n", dev->nErasedBlocks);
+ buf += sprintf(buf, "nReservedBlocks.... %d\n", dev->nReservedBlocks);
+ buf += sprintf(buf, "nCheckptResBlocks.. %d\n", dev->nCheckpointReservedBlocks);
+ buf += sprintf(buf, "blocksInCheckpoint. %d\n", dev->blocksInCheckpoint);
+ buf += sprintf(buf, "nTnodesCreated..... %d\n", dev->nTnodesCreated);
+ buf += sprintf(buf, "nFreeTnodes........ %d\n", dev->nFreeTnodes);
+ buf += sprintf(buf, "nObjectsCreated.... %d\n", dev->nObjectsCreated);
+ buf += sprintf(buf, "nFreeObjects....... %d\n", dev->nFreeObjects);
+ buf += sprintf(buf, "nFreeChunks........ %d\n", dev->nFreeChunks);
+ buf += sprintf(buf, "nPageWrites........ %d\n", dev->nPageWrites);
+ buf += sprintf(buf, "nPageReads......... %d\n", dev->nPageReads);
+ buf += sprintf(buf, "nBlockErasures..... %d\n", dev->nBlockErasures);
+ buf += sprintf(buf, "nGCCopies.......... %d\n", dev->nGCCopies);
+ buf +=
+ sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
+ buf +=
+ sprintf(buf, "passiveGCs......... %d\n",
+ dev->passiveGarbageCollections);
+ buf += sprintf(buf, "nRetriedWrites..... %d\n", dev->nRetriedWrites);
+ buf += sprintf(buf, "nShortOpCaches..... %d\n", dev->nShortOpCaches);
+ buf += sprintf(buf, "nRetireBlocks...... %d\n", dev->nRetiredBlocks);
+ buf += sprintf(buf, "eccFixed........... %d\n", dev->eccFixed);
+ buf += sprintf(buf, "eccUnfixed......... %d\n", dev->eccUnfixed);
+ buf += sprintf(buf, "tagsEccFixed....... %d\n", dev->tagsEccFixed);
+ buf += sprintf(buf, "tagsEccUnfixed..... %d\n", dev->tagsEccUnfixed);
+ buf += sprintf(buf, "cacheHits.......... %d\n", dev->cacheHits);
+ buf += sprintf(buf, "nDeletedFiles...... %d\n", dev->nDeletedFiles);
+ buf += sprintf(buf, "nUnlinkedFiles..... %d\n", dev->nUnlinkedFiles);
+ buf +=
+ sprintf(buf, "nBackgroudDeletions %d\n", dev->nBackgroundDeletions);
+ buf += sprintf(buf, "useNANDECC......... %d\n", dev->useNANDECC);
+ buf += sprintf(buf, "isYaffs2........... %d\n", dev->isYaffs2);
+
+ return buf;
+}
+
+static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+{
+ struct list_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+
+ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+ * We use 'offset' (*ppos) to indicate where we are in devList.
+ * This also assumes the user has posted a read buffer large
+ * enough to hold the complete output; but that's life in /proc.
+ */
+
+ *(int *)start = 1;
+
+ /* Print header first */
+ if (step == 0) {
+ buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__
+ "\n%s\n%s\n", yaffs_fs_c_version,
+ yaffs_guts_c_version);
+ }
+
+ /* hold lock_kernel while traversing yaffs_dev_list */
+ lock_kernel();
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_dev_list) {
+ yaffs_Device *dev = list_entry(item, yaffs_Device, devList);
+ if (n < step) {
+ n++;
+ continue;
+ }
+ buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->name);
+ buf = yaffs_dump_dev(buf, dev);
+ break;
+ }
+ unlock_kernel();
+
+ return buf - page < count ? buf - page : count;
+}
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+ char *mask_name;
+ unsigned mask_bitfield;
+} mask_flags[] = {
+ {"allocate", YAFFS_TRACE_ALLOCATE},
+ {"always", YAFFS_TRACE_ALWAYS},
+ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+ {"buffers", YAFFS_TRACE_BUFFERS},
+ {"bug", YAFFS_TRACE_BUG},
+ {"checkpt", YAFFS_TRACE_CHECKPOINT},
+ {"deletion", YAFFS_TRACE_DELETION},
+ {"erase", YAFFS_TRACE_ERASE},
+ {"error", YAFFS_TRACE_ERROR},
+ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+ {"gc", YAFFS_TRACE_GC},
+ {"mtd", YAFFS_TRACE_MTD},
+ {"nandaccess", YAFFS_TRACE_NANDACCESS},
+ {"os", YAFFS_TRACE_OS},
+ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+ {"scan", YAFFS_TRACE_SCAN},
+ {"tracing", YAFFS_TRACE_TRACING},
+
+ {"verify", YAFFS_TRACE_VERIFY},
+ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+
+ {"write", YAFFS_TRACE_WRITE},
+ {"all", 0xffffffff},
+ {"none", 0},
+ {NULL, 0},
+};
+
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ unsigned rg = 0, mask_bitfield;
+ char *end;
+ char *mask_name;
+ const char *x;
+ char substring[MAX_MASK_NAME_LENGTH+1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+ int pos = 0;
+
+ rg = yaffs_traceMask;
+
+ while (!done && (pos < count)) {
+ done = 1;
+ while ((pos < count) && isspace(buf[pos])) {
+ pos++;
+ }
+
+ switch (buf[pos]) {
+ case '+':
+ case '-':
+ case '=':
+ add = buf[pos];
+ pos++;
+ break;
+
+ default:
+ add = ' ';
+ break;
+ }
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+ for(x = buf + pos, i = 0;
+ (*x == '_' || (*x >='a' && *x <= 'z')) &&
+ i <MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ if(strcmp(substring,mask_flags[i].mask_name) == 0){
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield = mask_flags[i].mask_bitfield;
+ done = 0;
+ break;
+ }
+ }
+ }
+
+ if (mask_name != NULL) {
+ done = 0;
+ switch(add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+ case '+':
+ rg |= mask_bitfield;
+ break;
+ case '=':
+ rg = mask_bitfield;
+ break;
+ default:
+ rg |= mask_bitfield;
+ break;
+ }
+ }
+ }
+
+ yaffs_traceMask = rg | YAFFS_TRACE_ALWAYS;
+
+ printk("new trace = 0x%08X\n",yaffs_traceMask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) == mask_flags[i].mask_bitfield) ? '+' : '-';
+ printk("%c%s\n", flag, mask_flags[i].mask_name);
+ }
+ }
+
+ return count;
+}
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+ struct file_system_type *fst;
+ int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+//#ifdef CONFIG_YAFFS_YAFFS1
+ {&yaffs_fs_type, 0},
+//#endif
+//#ifdef CONFIG_YAFFS_YAFFS2
+ {&yaffs2_fs_type, 0},
+//#endif
+ {NULL, 0}
+};
+
+static int __init init_yaffs_fs(void)
+{
+ int error = 0;
+ struct file_system_to_install *fsinst;
+
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs " __DATE__ " " __TIME__ " Installing. \n"));
+
+ /* Install the proc_fs entry */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG,
+ &proc_root);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+ } else {
+ return -ENOMEM;
+ }
+
+ /* Now add the file system entries */
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+ if (!error) {
+ fsinst->installed = 1;
+ }
+ fsinst++;
+ }
+
+ /* Any errors? uninstall */
+ if (error) {
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+ }
+
+ return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+ struct file_system_to_install *fsinst;
+
+ T(YAFFS_TRACE_ALWAYS, ("yaffs " __DATE__ " " __TIME__
+ " removing. \n"));
+
+ remove_proc_entry("yaffs", &proc_root);
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+
+}
+
+module_init(init_yaffs_fs)
+module_exit(exit_yaffs_fs)
+
+MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2006");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_guts.c b/target/linux/generic/files/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 000000000..2ab814691
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,7469 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+const char *yaffs_guts_c_version =
+ "$Id: yaffs_guts.c,v 1.49 2007-05-15 20:07:40 charles Exp $";
+
+#include "yportenv.h"
+
+#include "yaffsinterface.h"
+#include "yaffs_guts.h"
+#include "yaffs_tagsvalidity.h"
+
+#include "yaffs_tagscompat.h"
+#ifndef CONFIG_YAFFS_USE_OWN_SORT
+#include "yaffs_qsort.h"
+#endif
+#include "yaffs_nand.h"
+
+#include "yaffs_checkptrw.h"
+
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+
+
+#ifdef CONFIG_YAFFS_WINCE
+void yfsd_LockYAFFS(BOOL fsLockOnly);
+void yfsd_UnlockYAFFS(BOOL fsLockOnly);
+#endif
+
+#define YAFFS_PASSIVE_GC_CHUNKS 2
+
+#include "yaffs_ecc.h"
+
+
+/* Robustification (if it ever comes about...) */
+static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND);
+static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk);
+static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data,
+ const yaffs_ExtendedTags * tags);
+static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+ const yaffs_ExtendedTags * tags);
+
+/* Other local prototypes */
+static int yaffs_UnlinkObject( yaffs_Object *obj);
+static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj);
+
+static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList);
+
+static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device * dev,
+ const __u8 * buffer,
+ yaffs_ExtendedTags * tags,
+ int useReserve);
+static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode,
+ int chunkInNAND, int inScan);
+
+static yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number,
+ yaffs_ObjectType type);
+static void yaffs_AddObjectToDirectory(yaffs_Object * directory,
+ yaffs_Object * obj);
+static int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name,
+ int force, int isShrink, int shadows);
+static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj);
+static int yaffs_CheckStructures(void);
+static int yaffs_DeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, __u32 level,
+ int chunkOffset, int *limit);
+static int yaffs_DoGenericObjectDeletion(yaffs_Object * in);
+
+static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blockNo);
+
+static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo);
+static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer,
+ int lineNo);
+
+static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND);
+
+static int yaffs_UnlinkWorker(yaffs_Object * obj);
+static void yaffs_DestroyObject(yaffs_Object * obj);
+
+static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId,
+ int chunkInObject);
+
+loff_t yaffs_GetFileSize(yaffs_Object * obj);
+
+static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr);
+
+static void yaffs_VerifyFreeChunks(yaffs_Device * dev);
+
+static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in);
+
+#ifdef YAFFS_PARANOID
+static int yaffs_CheckFileSanity(yaffs_Object * in);
+#else
+#define yaffs_CheckFileSanity(in)
+#endif
+
+static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in);
+static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId);
+
+static void yaffs_InvalidateCheckpoint(yaffs_Device *dev);
+
+static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode,
+ yaffs_ExtendedTags * tags);
+
+static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos);
+static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev,
+ yaffs_FileStructure * fStruct,
+ __u32 chunkId);
+
+
+/* Function to calculate chunk and offset */
+
+static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, __u32 *chunk, __u32 *offset)
+{
+ if(dev->chunkShift){
+ /* Easy-peasy power of 2 case */
+ *chunk = (__u32)(addr >> dev->chunkShift);
+ *offset = (__u32)(addr & dev->chunkMask);
+ }
+ else if(dev->crumbsPerChunk)
+ {
+ /* Case where we're using "crumbs" */
+ *offset = (__u32)(addr & dev->crumbMask);
+ addr >>= dev->crumbShift;
+ *chunk = ((__u32)addr)/dev->crumbsPerChunk;
+ *offset += ((addr - (*chunk * dev->crumbsPerChunk)) << dev->crumbShift);
+ }
+ else
+ YBUG();
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or equal
+ * to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static __u32 ShiftsGE(__u32 x)
+{
+ int extraBits;
+ int nShifts;
+
+ nShifts = extraBits = 0;
+
+ while(x>1){
+ if(x & 1) extraBits++;
+ x>>=1;
+ nShifts++;
+ }
+
+ if(extraBits)
+ nShifts++;
+
+ return nShifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static __u32 ShiftDiv(__u32 x)
+{
+ int nShifts;
+
+ nShifts = 0;
+
+ if(!x) return 0;
+
+ while( !(x&1)){
+ x>>=1;
+ nShifts++;
+ }
+
+ return nShifts;
+}
+
+
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_InitialiseTempBuffers(yaffs_Device *dev)
+{
+ int i;
+ __u8 *buf = (__u8 *)1;
+
+ memset(dev->tempBuffer,0,sizeof(dev->tempBuffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->tempBuffer[i].line = 0; /* not in use */
+ dev->tempBuffer[i].buffer = buf =
+ YMALLOC_DMA(dev->nDataBytesPerChunk);
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+
+}
+
+static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo)
+{
+ int i, j;
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].line == 0) {
+ dev->tempBuffer[i].line = lineNo;
+ if ((i + 1) > dev->maxTemp) {
+ dev->maxTemp = i + 1;
+ for (j = 0; j <= i; j++)
+ dev->tempBuffer[j].maxLine =
+ dev->tempBuffer[j].line;
+ }
+
+ return dev->tempBuffer[i].buffer;
+ }
+ }
+
+ T(YAFFS_TRACE_BUFFERS,
+ (TSTR("Out of temp buffers at line %d, other held by lines:"),
+ lineNo));
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line));
+ }
+ T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR)));
+
+ /*
+ * If we got here then we have to allocate an unmanaged one
+ * This is not good.
+ */
+
+ dev->unmanagedTempAllocations++;
+ return YMALLOC(dev->nDataBytesPerChunk);
+
+}
+
+static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer,
+ int lineNo)
+{
+ int i;
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].buffer == buffer) {
+ dev->tempBuffer[i].line = 0;
+ return;
+ }
+ }
+
+ if (buffer) {
+ /* assume it is an unmanaged one. */
+ T(YAFFS_TRACE_BUFFERS,
+ (TSTR("Releasing unmanaged temp buffer in line %d" TENDSTR),
+ lineNo));
+ YFREE(buffer);
+ dev->unmanagedTempDeallocations++;
+ }
+
+}
+
+/*
+ * Determine if we have a managed buffer.
+ */
+int yaffs_IsManagedTempBuffer(yaffs_Device * dev, const __u8 * buffer)
+{
+ int i;
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].buffer == buffer)
+ return 1;
+
+ }
+
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if( dev->srCache[i].data == buffer )
+ return 1;
+
+ }
+
+ if (buffer == dev->checkpointBuffer)
+ return 1;
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: unmaged buffer detected.\n" TENDSTR)));
+ return 0;
+}
+
+
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device * dev, int blk)
+{
+ if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
+ blk));
+ YBUG();
+ }
+ return dev->chunkBits +
+ (dev->chunkBitmapStride * (blk - dev->internalStartBlock));
+}
+
+static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk)
+{
+ if(blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
+ chunk < 0 || chunk >= dev->nChunksPerBlock) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),blk,chunk));
+ YBUG();
+ }
+}
+
+static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device * dev, int blk)
+{
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+ memset(blkBits, 0, dev->chunkBitmapStride);
+}
+
+static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device * dev, int blk, int chunk)
+{
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+ yaffs_VerifyChunkBitId(dev,blk,chunk);
+
+ blkBits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+static Y_INLINE void yaffs_SetChunkBit(yaffs_Device * dev, int blk, int chunk)
+{
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+ yaffs_VerifyChunkBitId(dev,blk,chunk);
+
+ blkBits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device * dev, int blk, int chunk)
+{
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+ yaffs_VerifyChunkBitId(dev,blk,chunk);
+
+ return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device * dev, int blk)
+{
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+ int i;
+ for (i = 0; i < dev->chunkBitmapStride; i++) {
+ if (*blkBits)
+ return 1;
+ blkBits++;
+ }
+ return 0;
+}
+
+static int yaffs_CountChunkBits(yaffs_Device * dev, int blk)
+{
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+ int i;
+ int n = 0;
+ for (i = 0; i < dev->chunkBitmapStride; i++) {
+ __u8 x = *blkBits;
+ while(x){
+ if(x & 1)
+ n++;
+ x >>=1;
+ }
+
+ blkBits++;
+ }
+ return n;
+}
+
+/*
+ * Verification code
+ */
+
+static int yaffs_SkipVerification(yaffs_Device *dev)
+{
+ return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_SkipFullVerification(yaffs_Device *dev)
+{
+ return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_SkipNANDVerification(yaffs_Device *dev)
+{
+ return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char * blockStateName[] = {
+"Unknown",
+"Needs scanning",
+"Scanning",
+"Empty",
+"Allocating",
+"Full",
+"Dirty",
+"Checkpoint",
+"Collecting",
+"Dead"
+};
+
+static void yaffs_VerifyBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n)
+{
+ int actuallyUsed;
+ int inUse;
+
+ if(yaffs_SkipVerification(dev))
+ return;
+
+ /* Report illegal runtime states */
+ if(bi->blockState <0 || bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
+ T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has undefined state %d"TENDSTR),n,bi->blockState));
+
+ switch(bi->blockState){
+ case YAFFS_BLOCK_STATE_UNKNOWN:
+ case YAFFS_BLOCK_STATE_SCANNING:
+ case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+ T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has bad run-state %s"TENDSTR),
+ n,blockStateName[bi->blockState]));
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actuallyUsed = bi->pagesInUse - bi->softDeletions;
+
+ if(bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
+ bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock ||
+ actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock)
+ T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
+ n,bi->pagesInUse,bi->softDeletions));
+
+
+ /* Check chunk bitmap legal */
+ inUse = yaffs_CountChunkBits(dev,n);
+ if(inUse != bi->pagesInUse)
+ T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
+ n,bi->pagesInUse,inUse));
+
+ /* Check that the sequence number is valid.
+ * Ten million is legal, but is very unlikely
+ */
+ if(dev->isYaffs2 &&
+ (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) &&
+ (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000 ))
+ T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has suspect sequence number of %d"TENDSTR),
+ n,bi->sequenceNumber));
+
+}
+
+static void yaffs_VerifyCollectedBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n)
+{
+ yaffs_VerifyBlock(dev,bi,n);
+
+ /* After collection the block should be in the erased state */
+ /* TODO: This will need to change if we do partial gc */
+
+ if(bi->blockState != YAFFS_BLOCK_STATE_EMPTY){
+ T(YAFFS_TRACE_ERROR,(TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
+ n,bi->blockState));
+ }
+}
+
+static void yaffs_VerifyBlocks(yaffs_Device *dev)
+{
+ int i;
+ int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int nIllegalBlockStates = 0;
+
+
+ if(yaffs_SkipVerification(dev))
+ return;
+
+ memset(nBlocksPerState,0,sizeof(nBlocksPerState));
+
+
+ for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++){
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+ yaffs_VerifyBlock(dev,bi,i);
+
+ if(bi->blockState >=0 && bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
+ nBlocksPerState[bi->blockState]++;
+ else
+ nIllegalBlockStates++;
+
+ }
+
+ T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR)));
+ T(YAFFS_TRACE_VERIFY,(TSTR("Block summary"TENDSTR)));
+
+ T(YAFFS_TRACE_VERIFY,(TSTR("%d blocks have illegal states"TENDSTR),nIllegalBlockStates));
+ if(nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+ T(YAFFS_TRACE_VERIFY,(TSTR("Too many allocating blocks"TENDSTR)));
+
+ for(i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("%s %d blocks"TENDSTR),
+ blockStateName[i],nBlocksPerState[i]));
+
+ if(dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
+ dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
+
+ if(dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
+ dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
+
+ if(nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
+ nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
+
+ T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR)));
+
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
+ * case those tests will not be performed.
+ */
+static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck)
+{
+ if(yaffs_SkipVerification(obj->myDev))
+ return;
+
+ if(!(tags && obj && oh)){
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
+ (__u32)tags,(__u32)obj,(__u32)oh));
+ return;
+ }
+
+ if(oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+ oh->type > YAFFS_OBJECT_TYPE_MAX)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
+ tags->objectId, oh->type));
+
+ if(tags->objectId != obj->objectId)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d header mismatch objectId %d"TENDSTR),
+ tags->objectId, obj->objectId));
+
+
+ /*
+ * Check that the object's parent ids match if parentCheck requested.
+ *
+ * Tests do not apply to the root object.
+ */
+
+ if(parentCheck && tags->objectId > 1 && !obj->parent)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
+ tags->objectId, oh->parentObjectId));
+
+
+ if(parentCheck && obj->parent &&
+ oh->parentObjectId != obj->parent->objectId &&
+ (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
+ obj->parent->objectId != YAFFS_OBJECTID_DELETED))
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
+ tags->objectId, oh->parentObjectId, obj->parent->objectId));
+
+
+ if(tags->objectId > 1 && oh->name[0] == 0) /* Null name */
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d header name is NULL"TENDSTR),
+ obj->objectId));
+
+ if(tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d header name is 0xFF"TENDSTR),
+ obj->objectId));
+}
+
+
+
+static int yaffs_VerifyTnodeWorker(yaffs_Object * obj, yaffs_Tnode * tn,
+ __u32 level, int chunkOffset)
+{
+ int i;
+ yaffs_Device *dev = obj->myDev;
+ int ok = 1;
+ int nTnodeBytes = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++){
+ if (tn->internal[i]) {
+ ok = yaffs_VerifyTnodeWorker(obj,
+ tn->internal[i],
+ level - 1,
+ (chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
+ }
+ }
+ } else if (level == 0) {
+ int i;
+ yaffs_ExtendedTags tags;
+ __u32 objectId = obj->objectId;
+
+ chunkOffset <<= YAFFS_TNODES_LEVEL0_BITS;
+
+ for(i = 0; i < YAFFS_NTNODES_LEVEL0; i++){
+ __u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
+
+ if(theChunk > 0){
+ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.objectId,tags.chunkId,theChunk)); */
+ yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags);
+ if(tags.objectId != objectId || tags.chunkId != chunkOffset){
+ T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+ objectId, chunkOffset, theChunk,
+ tags.objectId, tags.chunkId));
+ }
+ }
+ chunkOffset++;
+ }
+ }
+ }
+
+ return ok;
+
+}
+
+
+static void yaffs_VerifyFile(yaffs_Object *obj)
+{
+ int requiredTallness;
+ int actualTallness;
+ __u32 lastChunk;
+ __u32 x;
+ __u32 i;
+ int ok;
+ yaffs_Device *dev;
+ yaffs_ExtendedTags tags;
+ yaffs_Tnode *tn;
+ __u32 objectId;
+
+ if(obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+ dev = obj->myDev;
+ objectId = obj->objectId;
+
+ /* Check file size is consistent with tnode depth */
+ lastChunk = obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1;
+ x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
+ requiredTallness = 0;
+ while (x> 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ requiredTallness++;
+ }
+
+ actualTallness = obj->variant.fileVariant.topLevel;
+
+ if(requiredTallness > actualTallness )
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR),
+ obj->objectId,actualTallness, requiredTallness));
+
+
+ /* Check that the chunks in the tnode tree are all correct.
+ * We do this by scanning through the tnode tree and
+ * checking the tags for every chunk match.
+ */
+
+ if(yaffs_SkipNANDVerification(dev))
+ return;
+
+ for(i = 1; i <= lastChunk; i++){
+ tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant,i);
+
+ if (tn) {
+ __u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
+ if(theChunk > 0){
+ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */
+ yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags);
+ if(tags.objectId != objectId || tags.chunkId != i){
+ T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+ objectId, i, theChunk,
+ tags.objectId, tags.chunkId));
+ }
+ }
+ }
+
+ }
+
+}
+
+static void yaffs_VerifyDirectory(yaffs_Object *obj)
+{
+ if(obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+}
+
+static void yaffs_VerifyHardLink(yaffs_Object *obj)
+{
+ if(obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+ /* Verify sane equivalent object */
+}
+
+static void yaffs_VerifySymlink(yaffs_Object *obj)
+{
+ if(obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+ /* Verify symlink string */
+}
+
+static void yaffs_VerifySpecial(yaffs_Object *obj)
+{
+ if(obj && yaffs_SkipVerification(obj->myDev))
+ return;
+}
+
+static void yaffs_VerifyObject(yaffs_Object *obj)
+{
+ yaffs_Device *dev;
+
+ __u32 chunkMin;
+ __u32 chunkMax;
+
+ __u32 chunkIdOk;
+ __u32 chunkIsLive;
+
+ if(!obj)
+ return;
+
+ dev = obj->myDev;
+
+ if(yaffs_SkipVerification(dev))
+ return;
+
+ /* Check sane object header chunk */
+
+ chunkMin = dev->internalStartBlock * dev->nChunksPerBlock;
+ chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1;
+
+ chunkIdOk = (obj->chunkId >= chunkMin && obj->chunkId <= chunkMax);
+ chunkIsLive = chunkIdOk &&
+ yaffs_CheckChunkBit(dev,
+ obj->chunkId / dev->nChunksPerBlock,
+ obj->chunkId % dev->nChunksPerBlock);
+ if(!obj->fake &&
+ (!chunkIdOk || !chunkIsLive)) {
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
+ obj->objectId,obj->chunkId,
+ chunkIdOk ? "" : ",out of range",
+ chunkIsLive || !chunkIdOk ? "" : ",marked as deleted"));
+ }
+
+ if(chunkIdOk && chunkIsLive &&!yaffs_SkipNANDVerification(dev)) {
+ yaffs_ExtendedTags tags;
+ yaffs_ObjectHeader *oh;
+ __u8 *buffer = yaffs_GetTempBuffer(dev,__LINE__);
+
+ oh = (yaffs_ObjectHeader *)buffer;
+
+ yaffs_ReadChunkWithTagsFromNAND(dev, obj->chunkId,buffer, &tags);
+
+ yaffs_VerifyObjectHeader(obj,oh,&tags,1);
+
+ yaffs_ReleaseTempBuffer(dev,buffer,__LINE__);
+ }
+
+ /* Verify it has a parent */
+ if(obj && !obj->fake &&
+ (!obj->parent || obj->parent->myDev != dev)){
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
+ obj->objectId,obj->parent));
+ }
+
+ /* Verify parent is a directory */
+ if(obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY){
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
+ obj->objectId,obj->parent->variantType));
+ }
+
+ switch(obj->variantType){
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_VerifyFile(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_VerifySymlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_VerifyDirectory(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_VerifyHardLink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_VerifySpecial(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d has illegaltype %d"TENDSTR),
+ obj->objectId,obj->variantType));
+ break;
+ }
+
+
+}
+
+static void yaffs_VerifyObjects(yaffs_Device *dev)
+{
+ yaffs_Object *obj;
+ int i;
+ struct list_head *lh;
+
+ if(yaffs_SkipVerification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+ for(i = 0; i < YAFFS_NOBJECT_BUCKETS; i++){
+ list_for_each(lh, &dev->objectBucket[i].list) {
+ if (lh) {
+ obj = list_entry(lh, yaffs_Object, hashLink);
+ yaffs_VerifyObject(obj);
+ }
+ }
+ }
+
+}
+
+
+/*
+ * Simple hash function. Needs to have a reasonable spread
+ */
+
+static Y_INLINE int yaffs_HashFunction(int n)
+{
+ n = abs(n);
+ return (n % YAFFS_NOBJECT_BUCKETS);
+}
+
+/*
+ * Access functions to useful fake objects
+ */
+
+yaffs_Object *yaffs_Root(yaffs_Device * dev)
+{
+ return dev->rootDir;
+}
+
+yaffs_Object *yaffs_LostNFound(yaffs_Device * dev)
+{
+ return dev->lostNFoundDir;
+}
+
+
+/*
+ * Erased NAND checking functions
+ */
+
+int yaffs_CheckFF(__u8 * buffer, int nBytes)
+{
+ /* Horrible, slow implementation */
+ while (nBytes--) {
+ if (*buffer != 0xFF)
+ return 0;
+ buffer++;
+ }
+ return 1;
+}
+
+static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND)
+{
+
+ int retval = YAFFS_OK;
+ __u8 *data = yaffs_GetTempBuffer(dev, __LINE__);
+ yaffs_ExtendedTags tags;
+ int result;
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
+
+ if(tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+
+ if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) {
+ T(YAFFS_TRACE_NANDACCESS,
+ (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND));
+ retval = YAFFS_FAIL;
+ }
+
+ yaffs_ReleaseTempBuffer(dev, data, __LINE__);
+
+ return retval;
+
+}
+
+
+static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+ const __u8 * data,
+ yaffs_ExtendedTags * tags,
+ int useReserve)
+{
+ int attempts = 0;
+ int writeOk = 0;
+ int chunk;
+
+ yaffs_InvalidateCheckpoint(dev);
+
+ do {
+ yaffs_BlockInfo *bi = 0;
+ int erasedOk = 0;
+
+ chunk = yaffs_AllocateChunk(dev, useReserve, &bi);
+ if (chunk < 0) {
+ /* no space */
+ break;
+ }
+
+ /* First check this chunk is erased, if it needs
+ * checking. The checking policy (unless forced
+ * always on) is as follows:
+ *
+ * Check the first page we try to write in a block.
+ * If the check passes then we don't need to check any
+ * more. If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc,
+ * then we think there might be something odd about
+ * this block and stop using it.
+ *
+ * Rationale: We should only ever see chunks that have
+ * not been erased if there was a partially written
+ * chunk due to power loss. This checking policy should
+ * catch that case with very few checks and thus save a
+ * lot of checks that are most likely not needed.
+ */
+ if (bi->gcPrioritise) {
+ yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
+ /* try another chunk */
+ continue;
+ }
+
+ /* let's give it a try */
+ attempts++;
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ bi->skipErasedCheck = 0;
+#endif
+ if (!bi->skipErasedCheck) {
+ erasedOk = yaffs_CheckChunkErased(dev, chunk);
+ if (erasedOk != YAFFS_OK) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR ("**>> yaffs chunk %d was not erased"
+ TENDSTR), chunk));
+
+ /* try another chunk */
+ continue;
+ }
+ bi->skipErasedCheck = 1;
+ }
+
+ writeOk = yaffs_WriteChunkWithTagsToNAND(dev, chunk,
+ data, tags);
+ if (writeOk != YAFFS_OK) {
+ yaffs_HandleWriteChunkError(dev, chunk, erasedOk);
+ /* try another chunk */
+ continue;
+ }
+
+ /* Copy the data into the robustification buffer */
+ yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
+
+ } while (writeOk != YAFFS_OK && attempts < yaffs_wr_attempts);
+
+ if (attempts > 1) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("**>> yaffs write required %d attempts" TENDSTR),
+ attempts));
+
+ dev->nRetriedWrites += (attempts - 1);
+ }
+
+ return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND)
+{
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
+
+ yaffs_InvalidateCheckpoint(dev);
+
+ yaffs_MarkBlockBad(dev, blockInNAND);
+
+ bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+ bi->gcPrioritise = 0;
+ bi->needsRetiring = 0;
+
+ dev->nRetiredBlocks++;
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data,
+ const yaffs_ExtendedTags * tags)
+{
+}
+
+static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+ const yaffs_ExtendedTags * tags)
+{
+}
+
+void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
+{
+ if(!bi->gcPrioritise){
+ bi->gcPrioritise = 1;
+ dev->hasPendingPrioritisedGCs = 1;
+ bi->chunkErrorStrikes ++;
+
+ if(bi->chunkErrorStrikes > 3){
+ bi->needsRetiring = 1; /* Too many stikes, so retire this */
+ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
+
+ }
+
+ }
+}
+
+static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk)
+{
+
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
+
+ yaffs_HandleChunkError(dev,bi);
+
+
+ if(erasedOk ) {
+ /* Was an actual write failure, so mark the block for retirement */
+ bi->needsRetiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND));
+
+
+ }
+
+ /* Delete the chunk */
+ yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+}
+
+
+/*---------------- Name handling functions ------------*/
+
+static __u16 yaffs_CalcNameSum(const YCHAR * name)
+{
+ __u16 sum = 0;
+ __u16 i = 1;
+
+ YUCHAR *bname = (YUCHAR *) name;
+ if (bname) {
+ while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ sum += yaffs_toupper(*bname) * i;
+#else
+ sum += (*bname) * i;
+#endif
+ i++;
+ bname++;
+ }
+ }
+ return sum;
+}
+
+static void yaffs_SetObjectName(yaffs_Object * obj, const YCHAR * name)
+{
+#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+ if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH) {
+ yaffs_strcpy(obj->shortName, name);
+ } else {
+ obj->shortName[0] = _Y('\0');
+ }
+#endif
+ obj->sum = yaffs_CalcNameSum(name);
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+/* yaffs_CreateTnodes creates a bunch more tnodes and
+ * adds them to the tnode free list.
+ * Don't use this function directly
+ */
+
+static int yaffs_CreateTnodes(yaffs_Device * dev, int nTnodes)
+{
+ int i;
+ int tnodeSize;
+ yaffs_Tnode *newTnodes;
+ __u8 *mem;
+ yaffs_Tnode *curr;
+ yaffs_Tnode *next;
+ yaffs_TnodeList *tnl;
+
+ if (nTnodes < 1)
+ return YAFFS_OK;
+
+ /* Calculate the tnode size in bytes for variable width tnode support.
+ * Must be a multiple of 32-bits */
+ tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+ /* make these things */
+
+ newTnodes = YMALLOC(nTnodes * tnodeSize);
+ mem = (__u8 *)newTnodes;
+
+ if (!newTnodes) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+#if 0
+ for (i = 0; i < nTnodes - 1; i++) {
+ newTnodes[i].internal[0] = &newTnodes[i + 1];
+#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+ newTnodes[i].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+#endif
+ }
+
+ newTnodes[nTnodes - 1].internal[0] = dev->freeTnodes;
+#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+ newTnodes[nTnodes - 1].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+#endif
+ dev->freeTnodes = newTnodes;
+#else
+ /* New hookup for wide tnodes */
+ for(i = 0; i < nTnodes -1; i++) {
+ curr = (yaffs_Tnode *) &mem[i * tnodeSize];
+ next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize];
+ curr->internal[0] = next;
+ }
+
+ curr = (yaffs_Tnode *) &mem[(nTnodes - 1) * tnodeSize];
+ curr->internal[0] = dev->freeTnodes;
+ dev->freeTnodes = (yaffs_Tnode *)mem;
+
+#endif
+
+
+ dev->nFreeTnodes += nTnodes;
+ dev->nTnodesCreated += nTnodes;
+
+ /* Now add this bunch of tnodes to a list for freeing up.
+ * NB If we can't add this to the management list it isn't fatal
+ * but it just means we can't free this bunch of tnodes later.
+ */
+
+ tnl = YMALLOC(sizeof(yaffs_TnodeList));
+ if (!tnl) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("yaffs: Could not add tnodes to management list" TENDSTR)));
+ return YAFFS_FAIL;
+
+ } else {
+ tnl->tnodes = newTnodes;
+ tnl->next = dev->allocatedTnodeList;
+ dev->allocatedTnodeList = tnl;
+ }
+
+ T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR)));
+
+ return YAFFS_OK;
+}
+
+/* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */
+
+static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device * dev)
+{
+ yaffs_Tnode *tn = NULL;
+
+ /* If there are none left make more */
+ if (!dev->freeTnodes) {
+ yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES);
+ }
+
+ if (dev->freeTnodes) {
+ tn = dev->freeTnodes;
+#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+ if (tn->internal[YAFFS_NTNODES_INTERNAL] != (void *)1) {
+ /* Hoosterman, this thing looks like it isn't in the list */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: Tnode list bug 1" TENDSTR)));
+ }
+#endif
+ dev->freeTnodes = dev->freeTnodes->internal[0];
+ dev->nFreeTnodes--;
+ }
+
+ return tn;
+}
+
+static yaffs_Tnode *yaffs_GetTnode(yaffs_Device * dev)
+{
+ yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev);
+
+ if(tn)
+ memset(tn, 0, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_FreeTnode(yaffs_Device * dev, yaffs_Tnode * tn)
+{
+ if (tn) {
+#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+ if (tn->internal[YAFFS_NTNODES_INTERNAL] != 0) {
+ /* Hoosterman, this thing looks like it is already in the list */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: Tnode list bug 2" TENDSTR)));
+ }
+ tn->internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+#endif
+ tn->internal[0] = dev->freeTnodes;
+ dev->freeTnodes = tn;
+ dev->nFreeTnodes++;
+ }
+}
+
+static void yaffs_DeinitialiseTnodes(yaffs_Device * dev)
+{
+ /* Free the list of allocated tnodes */
+ yaffs_TnodeList *tmp;
+
+ while (dev->allocatedTnodeList) {
+ tmp = dev->allocatedTnodeList->next;
+
+ YFREE(dev->allocatedTnodeList->tnodes);
+ YFREE(dev->allocatedTnodeList);
+ dev->allocatedTnodeList = tmp;
+
+ }
+
+ dev->freeTnodes = NULL;
+ dev->nFreeTnodes = 0;
+}
+
+static void yaffs_InitialiseTnodes(yaffs_Device * dev)
+{
+ dev->allocatedTnodeList = NULL;
+ dev->freeTnodes = NULL;
+ dev->nFreeTnodes = 0;
+ dev->nTnodesCreated = 0;
+
+}
+
+
+void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos, unsigned val)
+{
+ __u32 *map = (__u32 *)tn;
+ __u32 bitInMap;
+ __u32 bitInWord;
+ __u32 wordInMap;
+ __u32 mask;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+ val >>= dev->chunkGroupBits;
+
+ bitInMap = pos * dev->tnodeWidth;
+ wordInMap = bitInMap /32;
+ bitInWord = bitInMap & (32 -1);
+
+ mask = dev->tnodeMask << bitInWord;
+
+ map[wordInMap] &= ~mask;
+ map[wordInMap] |= (mask & (val << bitInWord));
+
+ if(dev->tnodeWidth > (32-bitInWord)) {
+ bitInWord = (32 - bitInWord);
+ wordInMap++;;
+ mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
+ map[wordInMap] &= ~mask;
+ map[wordInMap] |= (mask & (val >> bitInWord));
+ }
+}
+
+static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos)
+{
+ __u32 *map = (__u32 *)tn;
+ __u32 bitInMap;
+ __u32 bitInWord;
+ __u32 wordInMap;
+ __u32 val;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+ bitInMap = pos * dev->tnodeWidth;
+ wordInMap = bitInMap /32;
+ bitInWord = bitInMap & (32 -1);
+
+ val = map[wordInMap] >> bitInWord;
+
+ if(dev->tnodeWidth > (32-bitInWord)) {
+ bitInWord = (32 - bitInWord);
+ wordInMap++;;
+ val |= (map[wordInMap] << bitInWord);
+ }
+
+ val &= dev->tnodeMask;
+ val <<= dev->chunkGroupBits;
+
+ return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of topLevel
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev,
+ yaffs_FileStructure * fStruct,
+ __u32 chunkId)
+{
+
+ yaffs_Tnode *tn = fStruct->top;
+ __u32 i;
+ int requiredTallness;
+ int level = fStruct->topLevel;
+
+ /* Check sane level and chunk Id */
+ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) {
+ return NULL;
+ }
+
+ if (chunkId > YAFFS_MAX_CHUNK_ID) {
+ return NULL;
+ }
+
+ /* First check we're tall enough (ie enough topLevel) */
+
+ i = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
+ requiredTallness = 0;
+ while (i) {
+ i >>= YAFFS_TNODES_INTERNAL_BITS;
+ requiredTallness++;
+ }
+
+ if (requiredTallness > fStruct->topLevel) {
+ /* Not tall enough, so we can't find it, return NULL. */
+ return NULL;
+ }
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+ tn = tn->
+ internal[(chunkId >>
+ ( YAFFS_TNODES_LEVEL0_BITS +
+ (level - 1) *
+ YAFFS_TNODES_INTERNAL_BITS)
+ ) &
+ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+
+ }
+
+ return tn;
+}
+
+/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
+ * This happens in two steps:
+ * 1. If the tree isn't tall enough, then make it taller.
+ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
+ * be plugged into the ttree.
+ */
+
+static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device * dev,
+ yaffs_FileStructure * fStruct,
+ __u32 chunkId,
+ yaffs_Tnode *passedTn)
+{
+
+ int requiredTallness;
+ int i;
+ int l;
+ yaffs_Tnode *tn;
+
+ __u32 x;
+
+
+ /* Check sane level and page Id */
+ if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL) {
+ return NULL;
+ }
+
+ if (chunkId > YAFFS_MAX_CHUNK_ID) {
+ return NULL;
+ }
+
+ /* First check we're tall enough (ie enough topLevel) */
+
+ x = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
+ requiredTallness = 0;
+ while (x) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ requiredTallness++;
+ }
+
+
+ if (requiredTallness > fStruct->topLevel) {
+ /* Not tall enough,gotta make the tree taller */
+ for (i = fStruct->topLevel; i < requiredTallness; i++) {
+
+ tn = yaffs_GetTnode(dev);
+
+ if (tn) {
+ tn->internal[0] = fStruct->top;
+ fStruct->top = tn;
+ } else {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("yaffs: no more tnodes" TENDSTR)));
+ }
+ }
+
+ fStruct->topLevel = requiredTallness;
+ }
+
+ /* Traverse down to level 0, adding anything we need */
+
+ l = fStruct->topLevel;
+ tn = fStruct->top;
+
+ if(l > 0) {
+ while (l > 0 && tn) {
+ x = (chunkId >>
+ ( YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+
+ if((l>1) && !tn->internal[x]){
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_GetTnode(dev);
+
+ } else if(l == 1) {
+ /* Looking from level 1 at level 0 */
+ if (passedTn) {
+ /* If we already have one, then release it.*/
+ if(tn->internal[x])
+ yaffs_FreeTnode(dev,tn->internal[x]);
+ tn->internal[x] = passedTn;
+
+ } else if(!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_GetTnode(dev);
+ }
+ }
+
+ tn = tn->internal[x];
+ l--;
+ }
+ } else {
+ /* top is level 0 */
+ if(passedTn) {
+ memcpy(tn,passedTn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
+ yaffs_FreeTnode(dev,passedTn);
+ }
+ }
+
+ return tn;
+}
+
+static int yaffs_FindChunkInGroup(yaffs_Device * dev, int theChunk,
+ yaffs_ExtendedTags * tags, int objectId,
+ int chunkInInode)
+{
+ int j;
+
+ for (j = 0; theChunk && j < dev->chunkGroupSize; j++) {
+ if (yaffs_CheckChunkBit
+ (dev, theChunk / dev->nChunksPerBlock,
+ theChunk % dev->nChunksPerBlock)) {
+ yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL,
+ tags);
+ if (yaffs_TagsMatch(tags, objectId, chunkInInode)) {
+ /* found it; */
+ return theChunk;
+
+ }
+ }
+ theChunk++;
+ }
+ return -1;
+}
+
+
+/* DeleteWorker scans backwards through the tnode tree and deletes all the
+ * chunks and tnodes in the file
+ * Returns 1 if the tree was deleted.
+ * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
+ */
+
+static int yaffs_DeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, __u32 level,
+ int chunkOffset, int *limit)
+{
+ int i;
+ int chunkInInode;
+ int theChunk;
+ yaffs_ExtendedTags tags;
+ int foundChunk;
+ yaffs_Device *dev = in->myDev;
+
+ int allDone = 1;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ if (limit && (*limit) < 0) {
+ allDone = 0;
+ } else {
+ allDone =
+ yaffs_DeleteWorker(in,
+ tn->
+ internal
+ [i],
+ level -
+ 1,
+ (chunkOffset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i,
+ limit);
+ }
+ if (allDone) {
+ yaffs_FreeTnode(dev,
+ tn->
+ internal[i]);
+ tn->internal[i] = NULL;
+ }
+ }
+
+ }
+ return (allDone) ? 1 : 0;
+ } else if (level == 0) {
+ int hitLimit = 0;
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
+ i--) {
+ theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
+ if (theChunk) {
+
+ chunkInInode =
+ (chunkOffset <<
+ YAFFS_TNODES_LEVEL0_BITS) + i;
+
+ foundChunk =
+ yaffs_FindChunkInGroup(dev,
+ theChunk,
+ &tags,
+ in->objectId,
+ chunkInInode);
+
+ if (foundChunk > 0) {
+ yaffs_DeleteChunk(dev,
+ foundChunk, 1,
+ __LINE__);
+ in->nDataChunks--;
+ if (limit) {
+ *limit = *limit - 1;
+ if (*limit <= 0) {
+ hitLimit = 1;
+ }
+ }
+
+ }
+
+ yaffs_PutLevel0Tnode(dev,tn,i,0);
+ }
+
+ }
+ return (i < 0) ? 1 : 0;
+
+ }
+
+ }
+
+ return 1;
+
+}
+
+static void yaffs_SoftDeleteChunk(yaffs_Device * dev, int chunk)
+{
+
+ yaffs_BlockInfo *theBlock;
+
+ T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
+
+ theBlock = yaffs_GetBlockInfo(dev, chunk / dev->nChunksPerBlock);
+ if (theBlock) {
+ theBlock->softDeletions++;
+ dev->nFreeChunks++;
+ }
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
+ * of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+ */
+
+static int yaffs_SoftDeleteWorker(yaffs_Object * in, yaffs_Tnode * tn,
+ __u32 level, int chunkOffset)
+{
+ int i;
+ int theChunk;
+ int allDone = 1;
+ yaffs_Device *dev = in->myDev;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ allDone =
+ yaffs_SoftDeleteWorker(in,
+ tn->
+ internal[i],
+ level - 1,
+ (chunkOffset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ if (allDone) {
+ yaffs_FreeTnode(dev,
+ tn->
+ internal[i]);
+ tn->internal[i] = NULL;
+ } else {
+ /* Hoosterman... how could this happen? */
+ }
+ }
+ }
+ return (allDone) ? 1 : 0;
+ } else if (level == 0) {
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+ theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
+ if (theChunk) {
+ /* Note this does not find the real chunk, only the chunk group.
+ * We make an assumption that a chunk group is not larger than
+ * a block.
+ */
+ yaffs_SoftDeleteChunk(dev, theChunk);
+ yaffs_PutLevel0Tnode(dev,tn,i,0);
+ }
+
+ }
+ return 1;
+
+ }
+
+ }
+
+ return 1;
+
+}
+
+static void yaffs_SoftDeleteFile(yaffs_Object * obj)
+{
+ if (obj->deleted &&
+ obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) {
+ if (obj->nDataChunks <= 0) {
+ /* Empty file with no duplicate object headers, just delete it immediately */
+ yaffs_FreeTnode(obj->myDev,
+ obj->variant.fileVariant.top);
+ obj->variant.fileVariant.top = NULL;
+ T(YAFFS_TRACE_TRACING,
+ (TSTR("yaffs: Deleting empty file %d" TENDSTR),
+ obj->objectId));
+ yaffs_DoGenericObjectDeletion(obj);
+ } else {
+ yaffs_SoftDeleteWorker(obj,
+ obj->variant.fileVariant.top,
+ obj->variant.fileVariant.
+ topLevel, 0);
+ obj->softDeleted = 1;
+ }
+ }
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ */
+
+static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device * dev, yaffs_Tnode * tn,
+ __u32 level, int del0)
+{
+ int i;
+ int hasData;
+
+ if (tn) {
+ hasData = 0;
+
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i] && level > 0) {
+ tn->internal[i] =
+ yaffs_PruneWorker(dev, tn->internal[i],
+ level - 1,
+ (i == 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i]) {
+ hasData++;
+ }
+ }
+
+ if (hasData == 0 && del0) {
+ /* Free and return NULL */
+
+ yaffs_FreeTnode(dev, tn);
+ tn = NULL;
+ }
+
+ }
+
+ return tn;
+
+}
+
+static int yaffs_PruneFileStructure(yaffs_Device * dev,
+ yaffs_FileStructure * fStruct)
+{
+ int i;
+ int hasData;
+ int done = 0;
+ yaffs_Tnode *tn;
+
+ if (fStruct->topLevel > 0) {
+ fStruct->top =
+ yaffs_PruneWorker(dev, fStruct->top, fStruct->topLevel, 0);
+
+ /* Now we have a tree with all the non-zero branches NULL but the height
+ * is the same as it was.
+ * Let's see if we can trim internal tnodes to shorten the tree.
+ * We can do this if only the 0th element in the tnode is in use
+ * (ie all the non-zero are NULL)
+ */
+
+ while (fStruct->topLevel && !done) {
+ tn = fStruct->top;
+
+ hasData = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ hasData++;
+ }
+ }
+
+ if (!hasData) {
+ fStruct->top = tn->internal[0];
+ fStruct->topLevel--;
+ yaffs_FreeTnode(dev, tn);
+ } else {
+ done = 1;
+ }
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* yaffs_CreateFreeObjects creates a bunch more objects and
+ * adds them to the object free list.
+ */
+static int yaffs_CreateFreeObjects(yaffs_Device * dev, int nObjects)
+{
+ int i;
+ yaffs_Object *newObjects;
+ yaffs_ObjectList *list;
+
+ if (nObjects < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ newObjects = YMALLOC(nObjects * sizeof(yaffs_Object));
+ list = YMALLOC(sizeof(yaffs_ObjectList));
+
+ if (!newObjects || !list) {
+ if(newObjects)
+ YFREE(newObjects);
+ if(list)
+ YFREE(list);
+ T(YAFFS_TRACE_ALLOCATE,
+ (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+ for (i = 0; i < nObjects - 1; i++) {
+ newObjects[i].siblings.next =
+ (struct list_head *)(&newObjects[i + 1]);
+ }
+
+ newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects;
+ dev->freeObjects = newObjects;
+ dev->nFreeObjects += nObjects;
+ dev->nObjectsCreated += nObjects;
+
+ /* Now add this bunch of Objects to a list for freeing up. */
+
+ list->objects = newObjects;
+ list->next = dev->allocatedObjectList;
+ dev->allocatedObjectList = list;
+
+ return YAFFS_OK;
+}
+
+
+/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device * dev)
+{
+ yaffs_Object *tn = NULL;
+
+ /* If there are none left make more */
+ if (!dev->freeObjects) {
+ yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS);
+ }
+
+ if (dev->freeObjects) {
+ tn = dev->freeObjects;
+ dev->freeObjects =
+ (yaffs_Object *) (dev->freeObjects->siblings.next);
+ dev->nFreeObjects--;
+
+ /* Now sweeten it up... */
+
+ memset(tn, 0, sizeof(yaffs_Object));
+ tn->myDev = dev;
+ tn->chunkId = -1;
+ tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN;
+ INIT_LIST_HEAD(&(tn->hardLinks));
+ INIT_LIST_HEAD(&(tn->hashLink));
+ INIT_LIST_HEAD(&tn->siblings);
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lostNFound in lostNFound so
+ * check if lostNFound exists first
+ */
+ if (dev->lostNFoundDir) {
+ yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn);
+ }
+ }
+
+ return tn;
+}
+
+static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device * dev, int number,
+ __u32 mode)
+{
+
+ yaffs_Object *obj =
+ yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (obj) {
+ obj->fake = 1; /* it is fake so it has no NAND presence... */
+ obj->renameAllowed = 0; /* ... and we're not allowed to rename it... */
+ obj->unlinkAllowed = 0; /* ... or unlink it */
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->myDev = dev;
+ obj->chunkId = 0; /* Not a valid chunk. */
+ }
+
+ return obj;
+
+}
+
+static void yaffs_UnhashObject(yaffs_Object * tn)
+{
+ int bucket;
+ yaffs_Device *dev = tn->myDev;
+
+ /* If it is still linked into the bucket list, free from the list */
+ if (!list_empty(&tn->hashLink)) {
+ list_del_init(&tn->hashLink);
+ bucket = yaffs_HashFunction(tn->objectId);
+ dev->objectBucket[bucket].count--;
+ }
+
+}
+
+/* FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_FreeObject(yaffs_Object * tn)
+{
+
+ yaffs_Device *dev = tn->myDev;
+
+#ifdef __KERNEL__
+ if (tn->myInode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+ */
+ tn->deferedFree = 1;
+ return;
+ }
+#endif
+
+ yaffs_UnhashObject(tn);
+
+ /* Link into the free list. */
+ tn->siblings.next = (struct list_head *)(dev->freeObjects);
+ dev->freeObjects = tn;
+ dev->nFreeObjects++;
+}
+
+#ifdef __KERNEL__
+
+void yaffs_HandleDeferedFree(yaffs_Object * obj)
+{
+ if (obj->deferedFree) {
+ yaffs_FreeObject(obj);
+ }
+}
+
+#endif
+
+static void yaffs_DeinitialiseObjects(yaffs_Device * dev)
+{
+ /* Free the list of allocated Objects */
+
+ yaffs_ObjectList *tmp;
+
+ while (dev->allocatedObjectList) {
+ tmp = dev->allocatedObjectList->next;
+ YFREE(dev->allocatedObjectList->objects);
+ YFREE(dev->allocatedObjectList);
+
+ dev->allocatedObjectList = tmp;
+ }
+
+ dev->freeObjects = NULL;
+ dev->nFreeObjects = 0;
+}
+
+static void yaffs_InitialiseObjects(yaffs_Device * dev)
+{
+ int i;
+
+ dev->allocatedObjectList = NULL;
+ dev->freeObjects = NULL;
+ dev->nFreeObjects = 0;
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ INIT_LIST_HEAD(&dev->objectBucket[i].list);
+ dev->objectBucket[i].count = 0;
+ }
+
+}
+
+static int yaffs_FindNiceObjectBucket(yaffs_Device * dev)
+{
+ static int x = 0;
+ int i;
+ int l = 999;
+ int lowest = 999999;
+
+ /* First let's see if we can find one that's empty. */
+
+ for (i = 0; i < 10 && lowest > 0; i++) {
+ x++;
+ x %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->objectBucket[x].count < lowest) {
+ lowest = dev->objectBucket[x].count;
+ l = x;
+ }
+
+ }
+
+ /* If we didn't find an empty list, then try
+ * looking a bit further for a short one
+ */
+
+ for (i = 0; i < 10 && lowest > 3; i++) {
+ x++;
+ x %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->objectBucket[x].count < lowest) {
+ lowest = dev->objectBucket[x].count;
+ l = x;
+ }
+
+ }
+
+ return l;
+}
+
+static int yaffs_CreateNewObjectNumber(yaffs_Device * dev)
+{
+ int bucket = yaffs_FindNiceObjectBucket(dev);
+
+ /* Now find an object value that has not already been taken
+ * by scanning the list.
+ */
+
+ int found = 0;
+ struct list_head *i;
+
+ __u32 n = (__u32) bucket;
+
+ /* yaffs_CheckObjectHashSanity(); */
+
+ while (!found) {
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->objectBucket[bucket].count > 0) {
+ list_for_each(i, &dev->objectBucket[bucket].list) {
+ /* If there is already one in the list */
+ if (i
+ && list_entry(i, yaffs_Object,
+ hashLink)->objectId == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+
+
+ return n;
+}
+
+static void yaffs_HashObject(yaffs_Object * in)
+{
+ int bucket = yaffs_HashFunction(in->objectId);
+ yaffs_Device *dev = in->myDev;
+
+ list_add(&in->hashLink, &dev->objectBucket[bucket].list);
+ dev->objectBucket[bucket].count++;
+
+}
+
+yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number)
+{
+ int bucket = yaffs_HashFunction(number);
+ struct list_head *i;
+ yaffs_Object *in;
+
+ list_for_each(i, &dev->objectBucket[bucket].list) {
+ /* Look if it is in the list */
+ if (i) {
+ in = list_entry(i, yaffs_Object, hashLink);
+ if (in->objectId == number) {
+#ifdef __KERNEL__
+ /* Don't tell the VFS about this one if it is defered free */
+ if (in->deferedFree)
+ return NULL;
+#endif
+
+ return in;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number,
+ yaffs_ObjectType type)
+{
+
+ yaffs_Object *theObject;
+ yaffs_Tnode *tn;
+
+ if (number < 0) {
+ number = yaffs_CreateNewObjectNumber(dev);
+ }
+
+ theObject = yaffs_AllocateEmptyObject(dev);
+ if(!theObject)
+ return NULL;
+
+ if(type == YAFFS_OBJECT_TYPE_FILE){
+ tn = yaffs_GetTnode(dev);
+ if(!tn){
+ yaffs_FreeObject(theObject);
+ return NULL;
+ }
+ }
+
+
+
+ if (theObject) {
+ theObject->fake = 0;
+ theObject->renameAllowed = 1;
+ theObject->unlinkAllowed = 1;
+ theObject->objectId = number;
+ yaffs_HashObject(theObject);
+ theObject->variantType = type;
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_WinFileTimeNow(theObject->win_atime);
+ theObject->win_ctime[0] = theObject->win_mtime[0] =
+ theObject->win_atime[0];
+ theObject->win_ctime[1] = theObject->win_mtime[1] =
+ theObject->win_atime[1];
+
+#else
+
+ theObject->yst_atime = theObject->yst_mtime =
+ theObject->yst_ctime = Y_CURRENT_TIME;
+#endif
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ theObject->variant.fileVariant.fileSize = 0;
+ theObject->variant.fileVariant.scannedFileSize = 0;
+ theObject->variant.fileVariant.shrinkSize = 0xFFFFFFFF; /* max __u32 */
+ theObject->variant.fileVariant.topLevel = 0;
+ theObject->variant.fileVariant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&theObject->variant.directoryVariant.
+ children);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* No action required */
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* todo this should not happen */
+ break;
+ }
+ }
+
+ return theObject;
+}
+
+static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device * dev,
+ int number,
+ yaffs_ObjectType type)
+{
+ yaffs_Object *theObject = NULL;
+
+ if (number > 0) {
+ theObject = yaffs_FindObjectByNumber(dev, number);
+ }
+
+ if (!theObject) {
+ theObject = yaffs_CreateNewObject(dev, number, type);
+ }
+
+ return theObject;
+
+}
+
+
+static YCHAR *yaffs_CloneString(const YCHAR * str)
+{
+ YCHAR *newStr = NULL;
+
+ if (str && *str) {
+ newStr = YMALLOC((yaffs_strlen(str) + 1) * sizeof(YCHAR));
+ if(newStr)
+ yaffs_strcpy(newStr, str);
+ }
+
+ return newStr;
+
+}
+
+/*
+ * Mknod (create) a new object.
+ * equivalentObject only has meaning for a hard link;
+ * aliasString only has meaning for a sumlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type,
+ yaffs_Object * parent,
+ const YCHAR * name,
+ __u32 mode,
+ __u32 uid,
+ __u32 gid,
+ yaffs_Object * equivalentObject,
+ const YCHAR * aliasString, __u32 rdev)
+{
+ yaffs_Object *in;
+ YCHAR *str;
+
+ yaffs_Device *dev = parent->myDev;
+
+ /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
+ if (yaffs_FindObjectByName(parent, name)) {
+ return NULL;
+ }
+
+ in = yaffs_CreateNewObject(dev, -1, type);
+
+ if(type == YAFFS_OBJECT_TYPE_SYMLINK){
+ str = yaffs_CloneString(aliasString);
+ if(!str){
+ yaffs_FreeObject(in);
+ return NULL;
+ }
+ }
+
+
+
+ if (in) {
+ in->chunkId = -1;
+ in->valid = 1;
+ in->variantType = type;
+
+ in->yst_mode = mode;
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_WinFileTimeNow(in->win_atime);
+ in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0];
+ in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1];
+
+#else
+ in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME;
+
+ in->yst_rdev = rdev;
+ in->yst_uid = uid;
+ in->yst_gid = gid;
+#endif
+ in->nDataChunks = 0;
+
+ yaffs_SetObjectName(in, name);
+ in->dirty = 1;
+
+ yaffs_AddObjectToDirectory(parent, in);
+
+ in->myDev = parent->myDev;
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symLinkVariant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardLinkVariant.equivalentObject =
+ equivalentObject;
+ in->variant.hardLinkVariant.equivalentObjectId =
+ equivalentObject->objectId;
+ list_add(&in->hardLinks, &equivalentObject->hardLinks);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) {
+ /* Could not create the object header, fail the creation */
+ yaffs_DestroyObject(in);
+ in = NULL;
+ }
+
+ }
+
+ return in;
+}
+
+yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name,
+ __u32 mode, __u32 uid, __u32 gid)
+{
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+ uid, gid, NULL, NULL, 0);
+}
+
+yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name,
+ __u32 mode, __u32 uid, __u32 gid)
+{
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+}
+
+yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name,
+ __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
+{
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+}
+
+yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name,
+ __u32 mode, __u32 uid, __u32 gid,
+ const YCHAR * alias)
+{
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+ uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_Link returns the object id of the equivalent object.*/
+yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name,
+ yaffs_Object * equivalentObject)
+{
+ /* Get the real object in case we were fed a hard link as an equivalent object */
+ equivalentObject = yaffs_GetEquivalentObject(equivalentObject);
+
+ if (yaffs_MknodObject
+ (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
+ equivalentObject, NULL, 0)) {
+ return equivalentObject;
+ } else {
+ return NULL;
+ }
+
+}
+
+static int yaffs_ChangeObjectName(yaffs_Object * obj, yaffs_Object * newDir,
+ const YCHAR * newName, int force, int shadows)
+{
+ int unlinkOp;
+ int deleteOp;
+
+ yaffs_Object *existingTarget;
+
+ if (newDir == NULL) {
+ newDir = obj->parent; /* use the old directory */
+ }
+
+ if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragendy: yaffs_ChangeObjectName: newDir is not a directory"
+ TENDSTR)));
+ YBUG();
+ }
+
+ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+ if (obj->myDev->isYaffs2) {
+ unlinkOp = (newDir == obj->myDev->unlinkedDir);
+ } else {
+ unlinkOp = (newDir == obj->myDev->unlinkedDir
+ && obj->variantType == YAFFS_OBJECT_TYPE_FILE);
+ }
+
+ deleteOp = (newDir == obj->myDev->deletedDir);
+
+ existingTarget = yaffs_FindObjectByName(newDir, newName);
+
+ /* If the object is a file going into the unlinked directory,
+ * then it is OK to just stuff it in since duplicate names are allowed.
+ * else only proceed if the new name does not exist and if we're putting
+ * it into a directory.
+ */
+ if ((unlinkOp ||
+ deleteOp ||
+ force ||
+ (shadows > 0) ||
+ !existingTarget) &&
+ newDir->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_SetObjectName(obj, newName);
+ obj->dirty = 1;
+
+ yaffs_AddObjectToDirectory(newDir, obj);
+
+ if (unlinkOp)
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc purposes. */
+ if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows)>= 0)
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName,
+ yaffs_Object * newDir, const YCHAR * newName)
+{
+ yaffs_Object *obj;
+ yaffs_Object *existingTarget;
+ int force = 0;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems (eg. WinCE).
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+ if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0) {
+ force = 1;
+ }
+#endif
+
+ obj = yaffs_FindObjectByName(oldDir, oldName);
+ /* Check new name to long. */
+ if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK &&
+ yaffs_strlen(newName) > YAFFS_MAX_ALIAS_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+ else if (obj->variantType != YAFFS_OBJECT_TYPE_SYMLINK &&
+ yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+
+ if (obj && obj->renameAllowed) {
+
+ /* Now do the handling for an existing target, if there is one */
+
+ existingTarget = yaffs_FindObjectByName(newDir, newName);
+ if (existingTarget &&
+ existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+ !list_empty(&existingTarget->variant.directoryVariant.children)) {
+ /* There is a target that is a non-empty directory, so we fail */
+ return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */
+ } else if (existingTarget && existingTarget != obj) {
+ /* Nuke the target first, using shadowing,
+ * but only if it isn't the same object
+ */
+ yaffs_ChangeObjectName(obj, newDir, newName, force,
+ existingTarget->objectId);
+ yaffs_UnlinkObject(existingTarget);
+ }
+
+ return yaffs_ChangeObjectName(obj, newDir, newName, 1, 0);
+ }
+ return YAFFS_FAIL;
+}
+
+/*------------------------- Block Management and Page Allocation ----------------*/
+
+static int yaffs_InitialiseBlocks(yaffs_Device * dev)
+{
+ int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+
+ dev->blockInfo = NULL;
+ dev->chunkBits = NULL;
+
+ dev->allocationBlock = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo));
+ if(!dev->blockInfo){
+ dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo));
+ dev->blockInfoAlt = 1;
+ }
+ else
+ dev->blockInfoAlt = 0;
+
+ if(dev->blockInfo){
+
+ /* Set up dynamic blockinfo stuff. */
+ dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
+ dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
+ if(!dev->chunkBits){
+ dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
+ dev->chunkBitsAlt = 1;
+ }
+ else
+ dev->chunkBitsAlt = 0;
+ }
+
+ if (dev->blockInfo && dev->chunkBits) {
+ memset(dev->blockInfo, 0, nBlocks * sizeof(yaffs_BlockInfo));
+ memset(dev->chunkBits, 0, dev->chunkBitmapStride * nBlocks);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+
+}
+
+static void yaffs_DeinitialiseBlocks(yaffs_Device * dev)
+{
+ if(dev->blockInfoAlt && dev->blockInfo)
+ YFREE_ALT(dev->blockInfo);
+ else if(dev->blockInfo)
+ YFREE(dev->blockInfo);
+
+ dev->blockInfoAlt = 0;
+
+ dev->blockInfo = NULL;
+
+ if(dev->chunkBitsAlt && dev->chunkBits)
+ YFREE_ALT(dev->chunkBits);
+ else if(dev->chunkBits)
+ YFREE(dev->chunkBits);
+ dev->chunkBitsAlt = 0;
+ dev->chunkBits = NULL;
+}
+
+static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device * dev,
+ yaffs_BlockInfo * bi)
+{
+ int i;
+ __u32 seq;
+ yaffs_BlockInfo *b;
+
+ if (!dev->isYaffs2)
+ return 1; /* disqualification only applies to yaffs2. */
+
+ if (!bi->hasShrinkHeader)
+ return 1; /* can gc */
+
+ /* Find the oldest dirty sequence number if we don't know it and save it
+ * so we don't have to keep recomputing it.
+ */
+ if (!dev->oldestDirtySequence) {
+ seq = dev->sequenceNumber;
+
+ for (i = dev->internalStartBlock; i <= dev->internalEndBlock;
+ i++) {
+ b = yaffs_GetBlockInfo(dev, i);
+ if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
+ (b->pagesInUse - b->softDeletions) <
+ dev->nChunksPerBlock && b->sequenceNumber < seq) {
+ seq = b->sequenceNumber;
+ }
+ }
+ dev->oldestDirtySequence = seq;
+ }
+
+ /* Can't do gc of this block if there are any blocks older than this one that have
+ * discarded pages.
+ */
+ return (bi->sequenceNumber <= dev->oldestDirtySequence);
+
+}
+
+/* FindDiretiestBlock is used to select the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static int yaffs_FindBlockForGarbageCollection(yaffs_Device * dev,
+ int aggressive)
+{
+
+ int b = dev->currentDirtyChecker;
+
+ int i;
+ int iterations;
+ int dirtiest = -1;
+ int pagesInUse = 0;
+ int prioritised=0;
+ yaffs_BlockInfo *bi;
+ int pendingPrioritisedExist = 0;
+
+ /* First let's see if we need to grab a prioritised block */
+ if(dev->hasPendingPrioritisedGCs){
+ for(i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++){
+
+ bi = yaffs_GetBlockInfo(dev, i);
+ //yaffs_VerifyBlock(dev,bi,i);
+
+ if(bi->gcPrioritise) {
+ pendingPrioritisedExist = 1;
+ if(bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_BlockNotDisqualifiedFromGC(dev, bi)){
+ pagesInUse = (bi->pagesInUse - bi->softDeletions);
+ dirtiest = i;
+ prioritised = 1;
+ aggressive = 1; /* Fool the non-aggressive skip logiv below */
+ }
+ }
+ }
+
+ if(!pendingPrioritisedExist) /* None found, so we can clear this */
+ dev->hasPendingPrioritisedGCs = 0;
+ }
+
+ /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
+ * search harder.
+ * else (we're doing a leasurely gc), then we only bother to do this if the
+ * block has only a few pages in use.
+ */
+
+ dev->nonAggressiveSkip--;
+
+ if (!aggressive && (dev->nonAggressiveSkip > 0)) {
+ return -1;
+ }
+
+ if(!prioritised)
+ pagesInUse =
+ (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
+
+ if (aggressive) {
+ iterations =
+ dev->internalEndBlock - dev->internalStartBlock + 1;
+ } else {
+ iterations =
+ dev->internalEndBlock - dev->internalStartBlock + 1;
+ iterations = iterations / 16;
+ if (iterations > 200) {
+ iterations = 200;
+ }
+ }
+
+ for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) {
+ b++;
+ if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
+ b = dev->internalStartBlock;
+ }
+
+ if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("**>> Block %d is not valid" TENDSTR), b));
+ YBUG();
+ }
+
+ bi = yaffs_GetBlockInfo(dev, b);
+
+#if 0
+ if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dirtiest = b;
+ pagesInUse = 0;
+ }
+ else
+#endif
+
+ if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+ (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
+ yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+ dirtiest = b;
+ pagesInUse = (bi->pagesInUse - bi->softDeletions);
+ }
+ }
+
+ dev->currentDirtyChecker = b;
+
+ if (dirtiest > 0) {
+ T(YAFFS_TRACE_GC,
+ (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
+ dev->nChunksPerBlock - pagesInUse,prioritised));
+ }
+
+ dev->oldestDirtySequence = 0;
+
+ if (dirtiest > 0) {
+ dev->nonAggressiveSkip = 4;
+ }
+
+ return dirtiest;
+}
+
+static void yaffs_BlockBecameDirty(yaffs_Device * dev, int blockNo)
+{
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo);
+
+ int erasedOk = 0;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ (TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR),
+ blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : ""));
+
+ bi->blockState = YAFFS_BLOCK_STATE_DIRTY;
+
+ if (!bi->needsRetiring) {
+ yaffs_InvalidateCheckpoint(dev);
+ erasedOk = yaffs_EraseBlockInNAND(dev, blockNo);
+ if (!erasedOk) {
+ dev->nErasureFailures++;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>> Erasure failed %d" TENDSTR), blockNo));
+ }
+ }
+
+ if (erasedOk &&
+ ((yaffs_traceMask & YAFFS_TRACE_ERASE) || !yaffs_SkipVerification(dev))) {
+ int i;
+ for (i = 0; i < dev->nChunksPerBlock; i++) {
+ if (!yaffs_CheckChunkErased
+ (dev, blockNo * dev->nChunksPerBlock + i)) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ (">>Block %d erasure supposedly OK, but chunk %d not erased"
+ TENDSTR), blockNo, i));
+ }
+ }
+ }
+
+ if (erasedOk) {
+ /* Clean it up... */
+ bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+ dev->nErasedBlocks++;
+ bi->pagesInUse = 0;
+ bi->softDeletions = 0;
+ bi->hasShrinkHeader = 0;
+ bi->skipErasedCheck = 1; /* This is clean, so no need to check */
+ bi->gcPrioritise = 0;
+ yaffs_ClearChunkBits(dev, blockNo);
+
+ T(YAFFS_TRACE_ERASE,
+ (TSTR("Erased block %d" TENDSTR), blockNo));
+ } else {
+ dev->nFreeChunks -= dev->nChunksPerBlock; /* We lost a block of free space */
+
+ yaffs_RetireBlock(dev, blockNo);
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>> Block %d retired" TENDSTR), blockNo));
+ }
+}
+
+static int yaffs_FindBlockForAllocation(yaffs_Device * dev)
+{
+ int i;
+
+ yaffs_BlockInfo *bi;
+
+ if (dev->nErasedBlocks < 1) {
+ /* Hoosterman we've got a problem.
+ * Can't get space to gc
+ */
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("yaffs tragedy: no more eraased blocks" TENDSTR)));
+
+ return -1;
+ }
+
+ /* Find an empty block. */
+
+ for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+ dev->allocationBlockFinder++;
+ if (dev->allocationBlockFinder < dev->internalStartBlock
+ || dev->allocationBlockFinder > dev->internalEndBlock) {
+ dev->allocationBlockFinder = dev->internalStartBlock;
+ }
+
+ bi = yaffs_GetBlockInfo(dev, dev->allocationBlockFinder);
+
+ if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
+ bi->blockState = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->sequenceNumber++;
+ bi->sequenceNumber = dev->sequenceNumber;
+ dev->nErasedBlocks--;
+ T(YAFFS_TRACE_ALLOCATE,
+ (TSTR("Allocated block %d, seq %d, %d left" TENDSTR),
+ dev->allocationBlockFinder, dev->sequenceNumber,
+ dev->nErasedBlocks));
+ return dev->allocationBlockFinder;
+ }
+ }
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("yaffs tragedy: no more eraased blocks, but there should have been %d"
+ TENDSTR), dev->nErasedBlocks));
+
+ return -1;
+}
+
+
+// Check if there's space to allocate...
+// Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
+static int yaffs_CheckSpaceForAllocation(yaffs_Device * dev)
+{
+ int reservedChunks;
+ int reservedBlocks = dev->nReservedBlocks;
+ int checkpointBlocks;
+
+ checkpointBlocks = dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint;
+ if(checkpointBlocks < 0)
+ checkpointBlocks = 0;
+
+ reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock);
+
+ return (dev->nFreeChunks > reservedChunks);
+}
+
+static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr)
+{
+ int retVal;
+ yaffs_BlockInfo *bi;
+
+ if (dev->allocationBlock < 0) {
+ /* Get next block to allocate off */
+ dev->allocationBlock = yaffs_FindBlockForAllocation(dev);
+ dev->allocationPage = 0;
+ }
+
+ if (!useReserve && !yaffs_CheckSpaceForAllocation(dev)) {
+ /* Not enough space to allocate unless we're allowed to use the reserve. */
+ return -1;
+ }
+
+ if (dev->nErasedBlocks < dev->nReservedBlocks
+ && dev->allocationPage == 0) {
+ T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
+ }
+
+ /* Next page please.... */
+ if (dev->allocationBlock >= 0) {
+ bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
+
+ retVal = (dev->allocationBlock * dev->nChunksPerBlock) +
+ dev->allocationPage;
+ bi->pagesInUse++;
+ yaffs_SetChunkBit(dev, dev->allocationBlock,
+ dev->allocationPage);
+
+ dev->allocationPage++;
+
+ dev->nFreeChunks--;
+
+ /* If the block is full set the state to full */
+ if (dev->allocationPage >= dev->nChunksPerBlock) {
+ bi->blockState = YAFFS_BLOCK_STATE_FULL;
+ dev->allocationBlock = -1;
+ }
+
+ if(blockUsedPtr)
+ *blockUsedPtr = bi;
+
+ return retVal;
+ }
+
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
+
+ return -1;
+}
+
+static int yaffs_GetErasedChunks(yaffs_Device * dev)
+{
+ int n;
+
+ n = dev->nErasedBlocks * dev->nChunksPerBlock;
+
+ if (dev->allocationBlock > 0) {
+ n += (dev->nChunksPerBlock - dev->allocationPage);
+ }
+
+ return n;
+
+}
+
+static int yaffs_GarbageCollectBlock(yaffs_Device * dev, int block)
+{
+ int oldChunk;
+ int newChunk;
+ int chunkInBlock;
+ int markNAND;
+ int retVal = YAFFS_OK;
+ int cleanups = 0;
+ int i;
+ int isCheckpointBlock;
+ int matchingChunk;
+
+ int chunksBefore = yaffs_GetErasedChunks(dev);
+ int chunksAfter;
+
+ yaffs_ExtendedTags tags;
+
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, block);
+
+ yaffs_Object *object;
+
+ isCheckpointBlock = (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ bi->blockState = YAFFS_BLOCK_STATE_COLLECTING;
+
+ T(YAFFS_TRACE_TRACING,
+ (TSTR("Collecting block %d, in use %d, shrink %d, " TENDSTR), block,
+ bi->pagesInUse, bi->hasShrinkHeader));
+
+ /*yaffs_VerifyFreeChunks(dev); */
+
+ bi->hasShrinkHeader = 0; /* clear the flag so that the block can erase */
+
+ /* Take off the number of soft deleted entries because
+ * they're going to get really deleted during GC.
+ */
+ dev->nFreeChunks -= bi->softDeletions;
+
+ dev->isDoingGC = 1;
+
+ if (isCheckpointBlock ||
+ !yaffs_StillSomeChunkBits(dev, block)) {
+ T(YAFFS_TRACE_TRACING,
+ (TSTR
+ ("Collecting block %d that has no chunks in use" TENDSTR),
+ block));
+ yaffs_BlockBecameDirty(dev, block);
+ } else {
+
+ __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
+
+ yaffs_VerifyBlock(dev,bi,block);
+
+ for (chunkInBlock = 0, oldChunk = block * dev->nChunksPerBlock;
+ chunkInBlock < dev->nChunksPerBlock
+ && yaffs_StillSomeChunkBits(dev, block);
+ chunkInBlock++, oldChunk++) {
+ if (yaffs_CheckChunkBit(dev, block, chunkInBlock)) {
+
+ /* This page is in use and might need to be copied off */
+
+ markNAND = 1;
+
+ yaffs_InitialiseTags(&tags);
+
+ yaffs_ReadChunkWithTagsFromNAND(dev, oldChunk,
+ buffer, &tags);
+
+ object =
+ yaffs_FindObjectByNumber(dev,
+ tags.objectId);
+
+ T(YAFFS_TRACE_GC_DETAIL,
+ (TSTR
+ ("Collecting page %d, %d %d %d " TENDSTR),
+ chunkInBlock, tags.objectId, tags.chunkId,
+ tags.byteCount));
+
+ if(object && !yaffs_SkipVerification(dev)){
+ if(tags.chunkId == 0)
+ matchingChunk = object->chunkId;
+ else if(object->softDeleted)
+ matchingChunk = oldChunk; /* Defeat the test */
+ else
+ matchingChunk = yaffs_FindChunkInFile(object,tags.chunkId,NULL);
+
+ if(oldChunk != matchingChunk)
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
+ oldChunk,matchingChunk,tags.objectId, tags.chunkId));
+
+ }
+
+ if (!object) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("page %d in gc has no object: %d %d %d "
+ TENDSTR), oldChunk,
+ tags.objectId, tags.chunkId, tags.byteCount));
+ }
+
+ if (object && object->deleted
+ && tags.chunkId != 0) {
+ /* Data chunk in a deleted file, throw it away
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget about it and
+ * fix up the object.
+ */
+
+ object->nDataChunks--;
+
+ if (object->nDataChunks <= 0) {
+ /* remeber to clean up the object */
+ dev->gcCleanupList[cleanups] =
+ tags.objectId;
+ cleanups++;
+ }
+ markNAND = 0;
+ } else if (0
+ /* Todo object && object->deleted && object->nDataChunks == 0 */
+ ) {
+ /* Deleted object header with no data chunks.
+ * Can be discarded and the file deleted.
+ */
+ object->chunkId = 0;
+ yaffs_FreeTnode(object->myDev,
+ object->variant.
+ fileVariant.top);
+ object->variant.fileVariant.top = NULL;
+ yaffs_DoGenericObjectDeletion(object);
+
+ } else if (object) {
+ /* It's either a data chunk in a live file or
+ * an ObjectHeader, so we're interested in it.
+ * NB Need to keep the ObjectHeaders of deleted files
+ * until the whole file has been deleted off
+ */
+ tags.serialNumber++;
+
+ dev->nGCCopies++;
+
+ if (tags.chunkId == 0) {
+ /* It is an object Id,
+ * We need to nuke the shrinkheader flags first
+ * We no longer want the shrinkHeader flag since its work is done
+ * and if it is left in place it will mess up scanning.
+ * Also, clear out any shadowing stuff
+ */
+
+ yaffs_ObjectHeader *oh;
+ oh = (yaffs_ObjectHeader *)buffer;
+ oh->isShrink = 0;
+ oh->shadowsObject = -1;
+ tags.extraShadows = 0;
+ tags.extraIsShrinkHeader = 0;
+
+ yaffs_VerifyObjectHeader(object,oh,&tags,1);
+ }
+
+ newChunk =
+ yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1);
+
+ if (newChunk < 0) {
+ retVal = YAFFS_FAIL;
+ } else {
+
+ /* Ok, now fix up the Tnodes etc. */
+
+ if (tags.chunkId == 0) {
+ /* It's a header */
+ object->chunkId = newChunk;
+ object->serial = tags.serialNumber;
+ } else {
+ /* It's a data chunk */
+ yaffs_PutChunkIntoFile
+ (object,
+ tags.chunkId,
+ newChunk, 0);
+ }
+ }
+ }
+
+ yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
+
+ }
+ }
+
+ yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+
+
+ /* Do any required cleanups */
+ for (i = 0; i < cleanups; i++) {
+ /* Time to delete the file too */
+ object =
+ yaffs_FindObjectByNumber(dev,
+ dev->gcCleanupList[i]);
+ if (object) {
+ yaffs_FreeTnode(dev,
+ object->variant.fileVariant.
+ top);
+ object->variant.fileVariant.top = NULL;
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("yaffs: About to finally delete object %d"
+ TENDSTR), object->objectId));
+ yaffs_DoGenericObjectDeletion(object);
+ object->myDev->nDeletedFiles--;
+ }
+
+ }
+
+ }
+
+ yaffs_VerifyCollectedBlock(dev,bi,block);
+
+ if (chunksBefore >= (chunksAfter = yaffs_GetErasedChunks(dev))) {
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("gc did not increase free chunks before %d after %d"
+ TENDSTR), chunksBefore, chunksAfter));
+ }
+
+ dev->isDoingGC = 0;
+
+ return YAFFS_OK;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_CheckGarbageCollection(yaffs_Device * dev)
+{
+ int block;
+ int aggressive;
+ int gcOk = YAFFS_OK;
+ int maxTries = 0;
+
+ int checkpointBlockAdjust;
+
+ if (dev->isDoingGC) {
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+ }
+
+ /* This loop should pass the first time.
+ * We'll only see looping here if the erase of the collected block fails.
+ */
+
+ do {
+ maxTries++;
+
+ checkpointBlockAdjust = (dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint);
+ if(checkpointBlockAdjust < 0)
+ checkpointBlockAdjust = 0;
+
+ if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) {
+ /* We need a block soon...*/
+ aggressive = 1;
+ } else {
+ /* We're in no hurry */
+ aggressive = 0;
+ }
+
+ block = yaffs_FindBlockForGarbageCollection(dev, aggressive);
+
+ if (block > 0) {
+ dev->garbageCollections++;
+ if (!aggressive) {
+ dev->passiveGarbageCollections++;
+ }
+
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
+ dev->nErasedBlocks, aggressive));
+
+ gcOk = yaffs_GarbageCollectBlock(dev, block);
+ }
+
+ if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) {
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
+ TENDSTR), dev->nErasedBlocks, maxTries, block));
+ }
+ } while ((dev->nErasedBlocks < dev->nReservedBlocks) && (block > 0)
+ && (maxTries < 2));
+
+ return aggressive ? gcOk : YAFFS_OK;
+}
+
+/*------------------------- TAGS --------------------------------*/
+
+static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId,
+ int chunkInObject)
+{
+ return (tags->chunkId == chunkInObject &&
+ tags->objectId == objectId && !tags->chunkDeleted) ? 1 : 0;
+
+}
+
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode,
+ yaffs_ExtendedTags * tags)
+{
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ yaffs_Tnode *tn;
+ int theChunk = -1;
+ yaffs_ExtendedTags localTags;
+ int retVal = -1;
+
+ yaffs_Device *dev = in->myDev;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &localTags;
+ }
+
+ tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
+
+ if (tn) {
+ theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
+
+ retVal =
+ yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+ chunkInInode);
+ }
+ return retVal;
+}
+
+static int yaffs_FindAndDeleteChunkInFile(yaffs_Object * in, int chunkInInode,
+ yaffs_ExtendedTags * tags)
+{
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ yaffs_Tnode *tn;
+ int theChunk = -1;
+ yaffs_ExtendedTags localTags;
+
+ yaffs_Device *dev = in->myDev;
+ int retVal = -1;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &localTags;
+ }
+
+ tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
+
+ if (tn) {
+
+ theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
+
+ retVal =
+ yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+ chunkInInode);
+
+ /* Delete the entry in the filestructure (if found) */
+ if (retVal != -1) {
+ yaffs_PutLevel0Tnode(dev,tn,chunkInInode,0);
+ }
+ } else {
+ /*T(("No level 0 found for %d\n", chunkInInode)); */
+ }
+
+ if (retVal == -1) {
+ /* T(("Could not find %d to delete\n",chunkInInode)); */
+ }
+ return retVal;
+}
+
+#ifdef YAFFS_PARANOID
+
+static int yaffs_CheckFileSanity(yaffs_Object * in)
+{
+ int chunk;
+ int nChunks;
+ int fSize;
+ int failed = 0;
+ int objId;
+ yaffs_Tnode *tn;
+ yaffs_Tags localTags;
+ yaffs_Tags *tags = &localTags;
+ int theChunk;
+ int chunkDeleted;
+
+ if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+ /* T(("Object not a file\n")); */
+ return YAFFS_FAIL;
+ }
+
+ objId = in->objectId;
+ fSize = in->variant.fileVariant.fileSize;
+ nChunks =
+ (fSize + in->myDev->nDataBytesPerChunk - 1) / in->myDev->nDataBytesPerChunk;
+
+ for (chunk = 1; chunk <= nChunks; chunk++) {
+ tn = yaffs_FindLevel0Tnode(in->myDev, &in->variant.fileVariant,
+ chunk);
+
+ if (tn) {
+
+ theChunk = yaffs_GetChunkGroupBase(dev,tn,chunk);
+
+ if (yaffs_CheckChunkBits
+ (dev, theChunk / dev->nChunksPerBlock,
+ theChunk % dev->nChunksPerBlock)) {
+
+ yaffs_ReadChunkTagsFromNAND(in->myDev, theChunk,
+ tags,
+ &chunkDeleted);
+ if (yaffs_TagsMatch
+ (tags, in->objectId, chunk, chunkDeleted)) {
+ /* found it; */
+
+ }
+ } else {
+
+ failed = 1;
+ }
+
+ } else {
+ /* T(("No level 0 found for %d\n", chunk)); */
+ }
+ }
+
+ return failed ? YAFFS_FAIL : YAFFS_OK;
+}
+
+#endif
+
+static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode,
+ int chunkInNAND, int inScan)
+{
+ /* NB inScan is zero unless scanning.
+ * For forward scanning, inScan is > 0;
+ * for backward scanning inScan is < 0
+ */
+
+ yaffs_Tnode *tn;
+ yaffs_Device *dev = in->myDev;
+ int existingChunk;
+ yaffs_ExtendedTags existingTags;
+ yaffs_ExtendedTags newTags;
+ unsigned existingSerial, newSerial;
+
+ if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+ /* Just ignore an attempt at putting a chunk into a non-file during scanning
+ * If it is not during Scanning then something went wrong!
+ */
+ if (!inScan) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("yaffs tragedy:attempt to put data chunk into a non-file"
+ TENDSTR)));
+ YBUG();
+ }
+
+ yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+ return YAFFS_OK;
+ }
+
+ tn = yaffs_AddOrFindLevel0Tnode(dev,
+ &in->variant.fileVariant,
+ chunkInInode,
+ NULL);
+ if (!tn) {
+ return YAFFS_FAIL;
+ }
+
+ existingChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
+
+ if (inScan != 0) {
+ /* If we're scanning then we need to test for duplicates
+ * NB This does not need to be efficient since it should only ever
+ * happen when the power fails during a write, then only one
+ * chunk should ever be affected.
+ *
+ * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
+ * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+ */
+
+ if (existingChunk != 0) {
+ /* NB Right now existing chunk will not be real chunkId if the device >= 32MB
+ * thus we have to do a FindChunkInFile to get the real chunk id.
+ *
+ * We have a duplicate now we need to decide which one to use:
+ *
+ * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
+ * Forward scanning YAFFS2: The new one is what we use, dump the old one.
+ * YAFFS1: Get both sets of tags and compare serial numbers.
+ */
+
+ if (inScan > 0) {
+ /* Only do this for forward scanning */
+ yaffs_ReadChunkWithTagsFromNAND(dev,
+ chunkInNAND,
+ NULL, &newTags);
+
+ /* Do a proper find */
+ existingChunk =
+ yaffs_FindChunkInFile(in, chunkInInode,
+ &existingTags);
+ }
+
+ if (existingChunk <= 0) {
+ /*Hoosterman - how did this happen? */
+
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("yaffs tragedy: existing chunk < 0 in scan"
+ TENDSTR)));
+
+ }
+
+ /* NB The deleted flags should be false, otherwise the chunks will
+ * not be loaded during a scan
+ */
+
+ newSerial = newTags.serialNumber;
+ existingSerial = existingTags.serialNumber;
+
+ if ((inScan > 0) &&
+ (in->myDev->isYaffs2 ||
+ existingChunk <= 0 ||
+ ((existingSerial + 1) & 3) == newSerial)) {
+ /* Forward scanning.
+ * Use new
+ * Delete the old one and drop through to update the tnode
+ */
+ yaffs_DeleteChunk(dev, existingChunk, 1,
+ __LINE__);
+ } else {
+ /* Backward scanning or we want to use the existing one
+ * Use existing.
+ * Delete the new one and return early so that the tnode isn't changed
+ */
+ yaffs_DeleteChunk(dev, chunkInNAND, 1,
+ __LINE__);
+ return YAFFS_OK;
+ }
+ }
+
+ }
+
+ if (existingChunk == 0) {
+ in->nDataChunks++;
+ }
+
+ yaffs_PutLevel0Tnode(dev,tn,chunkInInode,chunkInNAND);
+
+ return YAFFS_OK;
+}
+
+static int yaffs_ReadChunkDataFromObject(yaffs_Object * in, int chunkInInode,
+ __u8 * buffer)
+{
+ int chunkInNAND = yaffs_FindChunkInFile(in, chunkInInode, NULL);
+
+ if (chunkInNAND >= 0) {
+ return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND,
+ buffer,NULL);
+ } else {
+ T(YAFFS_TRACE_NANDACCESS,
+ (TSTR("Chunk %d not found zero instead" TENDSTR),
+ chunkInNAND));
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->myDev->nDataBytesPerChunk);
+ return 0;
+ }
+
+}
+
+void yaffs_DeleteChunk(yaffs_Device * dev, int chunkId, int markNAND, int lyn)
+{
+ int block;
+ int page;
+ yaffs_ExtendedTags tags;
+ yaffs_BlockInfo *bi;
+
+ if (chunkId <= 0)
+ return;
+
+
+ dev->nDeletions++;
+ block = chunkId / dev->nChunksPerBlock;
+ page = chunkId % dev->nChunksPerBlock;
+
+
+ if(!yaffs_CheckChunkBit(dev,block,page))
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Deleting invalid chunk %d"TENDSTR),
+ chunkId));
+
+ bi = yaffs_GetBlockInfo(dev, block);
+
+ T(YAFFS_TRACE_DELETION,
+ (TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunkId));
+
+ if (markNAND &&
+ bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && !dev->isYaffs2) {
+
+ yaffs_InitialiseTags(&tags);
+
+ tags.chunkDeleted = 1;
+
+ yaffs_WriteChunkWithTagsToNAND(dev, chunkId, NULL, &tags);
+ yaffs_HandleUpdateChunk(dev, chunkId, &tags);
+ } else {
+ dev->nUnmarkedDeletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->blockState == YAFFS_BLOCK_STATE_FULL ||
+ bi->blockState == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->nFreeChunks++;
+
+ yaffs_ClearChunkBit(dev, block, page);
+
+ bi->pagesInUse--;
+
+ if (bi->pagesInUse == 0 &&
+ !bi->hasShrinkHeader &&
+ bi->blockState != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->blockState != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ yaffs_BlockBecameDirty(dev, block);
+ }
+
+ } else {
+ /* T(("Bad news deleting chunk %d\n",chunkId)); */
+ }
+
+}
+
+static int yaffs_WriteChunkDataToObject(yaffs_Object * in, int chunkInInode,
+ const __u8 * buffer, int nBytes,
+ int useReserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prevChunkId;
+ yaffs_ExtendedTags prevTags;
+
+ int newChunkId;
+ yaffs_ExtendedTags newTags;
+
+ yaffs_Device *dev = in->myDev;
+
+ yaffs_CheckGarbageCollection(dev);
+
+ /* Get the previous chunk at this location in the file if it exists */
+ prevChunkId = yaffs_FindChunkInFile(in, chunkInInode, &prevTags);
+
+ /* Set up new tags */
+ yaffs_InitialiseTags(&newTags);
+
+ newTags.chunkId = chunkInInode;
+ newTags.objectId = in->objectId;
+ newTags.serialNumber =
+ (prevChunkId >= 0) ? prevTags.serialNumber + 1 : 1;
+ newTags.byteCount = nBytes;
+
+ newChunkId =
+ yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
+ useReserve);
+
+ if (newChunkId >= 0) {
+ yaffs_PutChunkIntoFile(in, chunkInInode, newChunkId, 0);
+
+ if (prevChunkId >= 0) {
+ yaffs_DeleteChunk(dev, prevChunkId, 1, __LINE__);
+
+ }
+
+ yaffs_CheckFileSanity(in);
+ }
+ return newChunkId;
+
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name, int force,
+ int isShrink, int shadows)
+{
+
+ yaffs_BlockInfo *bi;
+
+ yaffs_Device *dev = in->myDev;
+
+ int prevChunkId;
+ int retVal = 0;
+ int result = 0;
+
+ int newChunkId;
+ yaffs_ExtendedTags newTags;
+ yaffs_ExtendedTags oldTags;
+
+ __u8 *buffer = NULL;
+ YCHAR oldName[YAFFS_MAX_NAME_LENGTH + 1];
+
+ yaffs_ObjectHeader *oh = NULL;
+
+ yaffs_strcpy(oldName,"silly old name");
+
+ if (!in->fake || force) {
+
+ yaffs_CheckGarbageCollection(dev);
+ yaffs_CheckObjectDetailsLoaded(in);
+
+ buffer = yaffs_GetTempBuffer(in->myDev, __LINE__);
+ oh = (yaffs_ObjectHeader *) buffer;
+
+ prevChunkId = in->chunkId;
+
+ if (prevChunkId >= 0) {
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
+ buffer, &oldTags);
+
+ yaffs_VerifyObjectHeader(in,oh,&oldTags,0);
+
+ memcpy(oldName, oh->name, sizeof(oh->name));
+ }
+
+ memset(buffer, 0xFF, dev->nDataBytesPerChunk);
+
+ oh->type = in->variantType;
+ oh->yst_mode = in->yst_mode;
+ oh->shadowsObject = shadows;
+
+#ifdef CONFIG_YAFFS_WINCE
+ oh->win_atime[0] = in->win_atime[0];
+ oh->win_ctime[0] = in->win_ctime[0];
+ oh->win_mtime[0] = in->win_mtime[0];
+ oh->win_atime[1] = in->win_atime[1];
+ oh->win_ctime[1] = in->win_ctime[1];
+ oh->win_mtime[1] = in->win_mtime[1];
+#else
+ oh->yst_uid = in->yst_uid;
+ oh->yst_gid = in->yst_gid;
+ oh->yst_atime = in->yst_atime;
+ oh->yst_mtime = in->yst_mtime;
+ oh->yst_ctime = in->yst_ctime;
+ oh->yst_rdev = in->yst_rdev;
+#endif
+ if (in->parent) {
+ oh->parentObjectId = in->parent->objectId;
+ } else {
+ oh->parentObjectId = 0;
+ }
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_strncpy(oh->name, name, YAFFS_MAX_NAME_LENGTH);
+ } else if (prevChunkId>=0) {
+ memcpy(oh->name, oldName, sizeof(oh->name));
+ } else {
+ memset(oh->name, 0, sizeof(oh->name));
+ }
+
+ oh->isShrink = isShrink;
+
+ switch (in->variantType) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Should not happen */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ oh->fileSize =
+ (oh->parentObjectId == YAFFS_OBJECTID_DELETED
+ || oh->parentObjectId ==
+ YAFFS_OBJECTID_UNLINKED) ? 0 : in->variant.
+ fileVariant.fileSize;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ oh->equivalentObjectId =
+ in->variant.hardLinkVariant.equivalentObjectId;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_strncpy(oh->alias,
+ in->variant.symLinkVariant.alias,
+ YAFFS_MAX_ALIAS_LENGTH);
+ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+ break;
+ }
+
+ /* Tags */
+ yaffs_InitialiseTags(&newTags);
+ in->serial++;
+ newTags.chunkId = 0;
+ newTags.objectId = in->objectId;
+ newTags.serialNumber = in->serial;
+
+ /* Add extra info for file header */
+
+ newTags.extraHeaderInfoAvailable = 1;
+ newTags.extraParentObjectId = oh->parentObjectId;
+ newTags.extraFileLength = oh->fileSize;
+ newTags.extraIsShrinkHeader = oh->isShrink;
+ newTags.extraEquivalentObjectId = oh->equivalentObjectId;
+ newTags.extraShadows = (oh->shadowsObject > 0) ? 1 : 0;
+ newTags.extraObjectType = in->variantType;
+
+ yaffs_VerifyObjectHeader(in,oh,&newTags,1);
+
+ /* Create new chunk in NAND */
+ newChunkId =
+ yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
+ (prevChunkId >= 0) ? 1 : 0);
+
+ if (newChunkId >= 0) {
+
+ in->chunkId = newChunkId;
+
+ if (prevChunkId >= 0) {
+ yaffs_DeleteChunk(dev, prevChunkId, 1,
+ __LINE__);
+ }
+
+ if(!yaffs_ObjectHasCachedWriteData(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block that the chunk lives on */
+ if (isShrink) {
+ bi = yaffs_GetBlockInfo(in->myDev,
+ newChunkId /in->myDev-> nChunksPerBlock);
+ bi->hasShrinkHeader = 1;
+ }
+
+ }
+
+ retVal = newChunkId;
+
+ }
+
+ if (buffer)
+ yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+
+ return retVal;
+}
+
+/*------------------------ Short Operations Cache ----------------------------------------
+ * In many situations where there is no high level buffering (eg WinCE) a lot of
+ * reads might be short sequential reads, and a lot of writes may be short
+ * sequential writes. eg. scanning/writing a jpeg file.
+ * In these cases, a short read/write cache can provide a huge perfomance benefit
+ * with dumb-as-a-rock code.
+ * In Linux, the page cache provides read buffering aand the short op cache provides write
+ * buffering.
+ *
+ * There are a limited number (~10) of cache chunks per device so that we don't
+ * need a very intelligent search.
+ */
+
+static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj)
+{
+ yaffs_Device *dev = obj->myDev;
+ int i;
+ yaffs_ChunkCache *cache;
+ int nCaches = obj->myDev->nShortOpCaches;
+
+ for(i = 0; i < nCaches; i++){
+ cache = &dev->srCache[i];
+ if (cache->object == obj &&
+ cache->dirty)
+ return 1;
+ }
+
+ return 0;
+}
+
+
+static void yaffs_FlushFilesChunkCache(yaffs_Object * obj)
+{
+ yaffs_Device *dev = obj->myDev;
+ int lowest = -99; /* Stop compiler whining. */
+ int i;
+ yaffs_ChunkCache *cache;
+ int chunkWritten = 0;
+ int nCaches = obj->myDev->nShortOpCaches;
+
+ if (nCaches > 0) {
+ do {
+ cache = NULL;
+
+ /* Find the dirty cache for this object with the lowest chunk id. */
+ for (i = 0; i < nCaches; i++) {
+ if (dev->srCache[i].object == obj &&
+ dev->srCache[i].dirty) {
+ if (!cache
+ || dev->srCache[i].chunkId <
+ lowest) {
+ cache = &dev->srCache[i];
+ lowest = cache->chunkId;
+ }
+ }
+ }
+
+ if (cache && !cache->locked) {
+ /* Write it out and free it up */
+
+ chunkWritten =
+ yaffs_WriteChunkDataToObject(cache->object,
+ cache->chunkId,
+ cache->data,
+ cache->nBytes,
+ 1);
+ cache->dirty = 0;
+ cache->object = NULL;
+ }
+
+ } while (cache && chunkWritten > 0);
+
+ if (cache) {
+ /* Hoosterman, disk full while writing cache out. */
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("yaffs tragedy: no space during cache write" TENDSTR)));
+
+ }
+ }
+
+}
+
+/*yaffs_FlushEntireDeviceCache(dev)
+ *
+ *
+ */
+
+void yaffs_FlushEntireDeviceCache(yaffs_Device *dev)
+{
+ yaffs_Object *obj;
+ int nCaches = dev->nShortOpCaches;
+ int i;
+
+ /* Find a dirty object in the cache and flush it...
+ * until there are no further dirty objects.
+ */
+ do {
+ obj = NULL;
+ for( i = 0; i < nCaches && !obj; i++) {
+ if (dev->srCache[i].object &&
+ dev->srCache[i].dirty)
+ obj = dev->srCache[i].object;
+
+ }
+ if(obj)
+ yaffs_FlushFilesChunkCache(obj);
+
+ } while(obj);
+
+}
+
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device * dev)
+{
+ int i;
+ int usage;
+ int theOne;
+
+ if (dev->nShortOpCaches > 0) {
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (!dev->srCache[i].object)
+ return &dev->srCache[i];
+ }
+
+ return NULL;
+
+ theOne = -1;
+ usage = 0; /* just to stop the compiler grizzling */
+
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (!dev->srCache[i].dirty &&
+ ((dev->srCache[i].lastUse < usage && theOne >= 0) ||
+ theOne < 0)) {
+ usage = dev->srCache[i].lastUse;
+ theOne = i;
+ }
+ }
+
+
+ return theOne >= 0 ? &dev->srCache[theOne] : NULL;
+ } else {
+ return NULL;
+ }
+
+}
+
+static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device * dev)
+{
+ yaffs_ChunkCache *cache;
+ yaffs_Object *theObj;
+ int usage;
+ int i;
+ int pushout;
+
+ if (dev->nShortOpCaches > 0) {
+ /* Try find a non-dirty one... */
+
+ cache = yaffs_GrabChunkCacheWorker(dev);
+
+ if (!cache) {
+ /* They were all dirty, find the last recently used object and flush
+ * its cache, then find again.
+ * NB what's here is not very accurate, we actually flush the object
+ * the last recently used page.
+ */
+
+ /* With locking we can't assume we can use entry zero */
+
+ theObj = NULL;
+ usage = -1;
+ cache = NULL;
+ pushout = -1;
+
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (dev->srCache[i].object &&
+ !dev->srCache[i].locked &&
+ (dev->srCache[i].lastUse < usage || !cache))
+ {
+ usage = dev->srCache[i].lastUse;
+ theObj = dev->srCache[i].object;
+ cache = &dev->srCache[i];
+ pushout = i;
+ }
+ }
+
+ if (!cache || cache->dirty) {
+ /* Flush and try again */
+ yaffs_FlushFilesChunkCache(theObj);
+ cache = yaffs_GrabChunkCacheWorker(dev);
+ }
+
+ }
+ return cache;
+ } else
+ return NULL;
+
+}
+
+/* Find a cached chunk */
+static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object * obj,
+ int chunkId)
+{
+ yaffs_Device *dev = obj->myDev;
+ int i;
+ if (dev->nShortOpCaches > 0) {
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (dev->srCache[i].object == obj &&
+ dev->srCache[i].chunkId == chunkId) {
+ dev->cacheHits++;
+
+ return &dev->srCache[i];
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_UseChunkCache(yaffs_Device * dev, yaffs_ChunkCache * cache,
+ int isAWrite)
+{
+
+ if (dev->nShortOpCaches > 0) {
+ if (dev->srLastUse < 0 || dev->srLastUse > 100000000) {
+ /* Reset the cache usages */
+ int i;
+ for (i = 1; i < dev->nShortOpCaches; i++) {
+ dev->srCache[i].lastUse = 0;
+ }
+ dev->srLastUse = 0;
+ }
+
+ dev->srLastUse++;
+
+ cache->lastUse = dev->srLastUse;
+
+ if (isAWrite) {
+ cache->dirty = 1;
+ }
+ }
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId)
+{
+ if (object->myDev->nShortOpCaches > 0) {
+ yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId);
+
+ if (cache) {
+ cache->object = NULL;
+ }
+ }
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in)
+{
+ int i;
+ yaffs_Device *dev = in->myDev;
+
+ if (dev->nShortOpCaches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (dev->srCache[i].object == in) {
+ dev->srCache[i].object = NULL;
+ }
+ }
+ }
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+
+static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev,int head)
+{
+ yaffs_CheckpointValidity cp;
+
+ memset(&cp,0,sizeof(cp));
+
+ cp.structType = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+ return (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp))?
+ 1 : 0;
+}
+
+static int yaffs_ReadCheckpointValidityMarker(yaffs_Device *dev, int head)
+{
+ yaffs_CheckpointValidity cp;
+ int ok;
+
+ ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
+
+ if(ok)
+ ok = (cp.structType == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+ (cp.head == ((head) ? 1 : 0));
+ return ok ? 1 : 0;
+}
+
+static void yaffs_DeviceToCheckpointDevice(yaffs_CheckpointDevice *cp,
+ yaffs_Device *dev)
+{
+ cp->nErasedBlocks = dev->nErasedBlocks;
+ cp->allocationBlock = dev->allocationBlock;
+ cp->allocationPage = dev->allocationPage;
+ cp->nFreeChunks = dev->nFreeChunks;
+
+ cp->nDeletedFiles = dev->nDeletedFiles;
+ cp->nUnlinkedFiles = dev->nUnlinkedFiles;
+ cp->nBackgroundDeletions = dev->nBackgroundDeletions;
+ cp->sequenceNumber = dev->sequenceNumber;
+ cp->oldestDirtySequence = dev->oldestDirtySequence;
+
+}
+
+static void yaffs_CheckpointDeviceToDevice(yaffs_Device *dev,
+ yaffs_CheckpointDevice *cp)
+{
+ dev->nErasedBlocks = cp->nErasedBlocks;
+ dev->allocationBlock = cp->allocationBlock;
+ dev->allocationPage = cp->allocationPage;
+ dev->nFreeChunks = cp->nFreeChunks;
+
+ dev->nDeletedFiles = cp->nDeletedFiles;
+ dev->nUnlinkedFiles = cp->nUnlinkedFiles;
+ dev->nBackgroundDeletions = cp->nBackgroundDeletions;
+ dev->sequenceNumber = cp->sequenceNumber;
+ dev->oldestDirtySequence = cp->oldestDirtySequence;
+}
+
+
+static int yaffs_WriteCheckpointDevice(yaffs_Device *dev)
+{
+ yaffs_CheckpointDevice cp;
+ __u32 nBytes;
+ __u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
+
+ int ok;
+
+ /* Write device runtime values*/
+ yaffs_DeviceToCheckpointDevice(&cp,dev);
+ cp.structType = sizeof(cp);
+
+ ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
+
+ /* Write block info */
+ if(ok) {
+ nBytes = nBlocks * sizeof(yaffs_BlockInfo);
+ ok = (yaffs_CheckpointWrite(dev,dev->blockInfo,nBytes) == nBytes);
+ }
+
+ /* Write chunk bits */
+ if(ok) {
+ nBytes = nBlocks * dev->chunkBitmapStride;
+ ok = (yaffs_CheckpointWrite(dev,dev->chunkBits,nBytes) == nBytes);
+ }
+ return ok ? 1 : 0;
+
+}
+
+static int yaffs_ReadCheckpointDevice(yaffs_Device *dev)
+{
+ yaffs_CheckpointDevice cp;
+ __u32 nBytes;
+ __u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
+
+ int ok;
+
+ ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
+ if(!ok)
+ return 0;
+
+ if(cp.structType != sizeof(cp))
+ return 0;
+
+
+ yaffs_CheckpointDeviceToDevice(dev,&cp);
+
+ nBytes = nBlocks * sizeof(yaffs_BlockInfo);
+
+ ok = (yaffs_CheckpointRead(dev,dev->blockInfo,nBytes) == nBytes);
+
+ if(!ok)
+ return 0;
+ nBytes = nBlocks * dev->chunkBitmapStride;
+
+ ok = (yaffs_CheckpointRead(dev,dev->chunkBits,nBytes) == nBytes);
+
+ return ok ? 1 : 0;
+}
+
+static void yaffs_ObjectToCheckpointObject(yaffs_CheckpointObject *cp,
+ yaffs_Object *obj)
+{
+
+ cp->objectId = obj->objectId;
+ cp->parentId = (obj->parent) ? obj->parent->objectId : 0;
+ cp->chunkId = obj->chunkId;
+ cp->variantType = obj->variantType;
+ cp->deleted = obj->deleted;
+ cp->softDeleted = obj->softDeleted;
+ cp->unlinked = obj->unlinked;
+ cp->fake = obj->fake;
+ cp->renameAllowed = obj->renameAllowed;
+ cp->unlinkAllowed = obj->unlinkAllowed;
+ cp->serial = obj->serial;
+ cp->nDataChunks = obj->nDataChunks;
+
+ if(obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ cp->fileSizeOrEquivalentObjectId = obj->variant.fileVariant.fileSize;
+ else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->fileSizeOrEquivalentObjectId = obj->variant.hardLinkVariant.equivalentObjectId;
+}
+
+static void yaffs_CheckpointObjectToObject( yaffs_Object *obj,yaffs_CheckpointObject *cp)
+{
+
+ yaffs_Object *parent;
+
+ obj->objectId = cp->objectId;
+
+ if(cp->parentId)
+ parent = yaffs_FindOrCreateObjectByNumber(
+ obj->myDev,
+ cp->parentId,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ else
+ parent = NULL;
+
+ if(parent)
+ yaffs_AddObjectToDirectory(parent, obj);
+
+ obj->chunkId = cp->chunkId;
+ obj->variantType = cp->variantType;
+ obj->deleted = cp->deleted;
+ obj->softDeleted = cp->softDeleted;
+ obj->unlinked = cp->unlinked;
+ obj->fake = cp->fake;
+ obj->renameAllowed = cp->renameAllowed;
+ obj->unlinkAllowed = cp->unlinkAllowed;
+ obj->serial = cp->serial;
+ obj->nDataChunks = cp->nDataChunks;
+
+ if(obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.fileVariant.fileSize = cp->fileSizeOrEquivalentObjectId;
+ else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardLinkVariant.equivalentObjectId = cp->fileSizeOrEquivalentObjectId;
+
+ if(obj->objectId >= YAFFS_NOBJECT_BUCKETS)
+ obj->lazyLoaded = 1;
+}
+
+
+
+static int yaffs_CheckpointTnodeWorker(yaffs_Object * in, yaffs_Tnode * tn,
+ __u32 level, int chunkOffset)
+{
+ int i;
+ yaffs_Device *dev = in->myDev;
+ int ok = 1;
+ int nTnodeBytes = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++){
+ if (tn->internal[i]) {
+ ok = yaffs_CheckpointTnodeWorker(in,
+ tn->internal[i],
+ level - 1,
+ (chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
+ }
+ }
+ } else if (level == 0) {
+ __u32 baseOffset = chunkOffset << YAFFS_TNODES_LEVEL0_BITS;
+ /* printf("write tnode at %d\n",baseOffset); */
+ ok = (yaffs_CheckpointWrite(dev,&baseOffset,sizeof(baseOffset)) == sizeof(baseOffset));
+ if(ok)
+ ok = (yaffs_CheckpointWrite(dev,tn,nTnodeBytes) == nTnodeBytes);
+ }
+ }
+
+ return ok;
+
+}
+
+static int yaffs_WriteCheckpointTnodes(yaffs_Object *obj)
+{
+ __u32 endMarker = ~0;
+ int ok = 1;
+
+ if(obj->variantType == YAFFS_OBJECT_TYPE_FILE){
+ ok = yaffs_CheckpointTnodeWorker(obj,
+ obj->variant.fileVariant.top,
+ obj->variant.fileVariant.topLevel,
+ 0);
+ if(ok)
+ ok = (yaffs_CheckpointWrite(obj->myDev,&endMarker,sizeof(endMarker)) ==
+ sizeof(endMarker));
+ }
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs_ReadCheckpointTnodes(yaffs_Object *obj)
+{
+ __u32 baseChunk;
+ int ok = 1;
+ yaffs_Device *dev = obj->myDev;
+ yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant;
+ yaffs_Tnode *tn;
+ int nread = 0;
+
+ ok = (yaffs_CheckpointRead(dev,&baseChunk,sizeof(baseChunk)) == sizeof(baseChunk));
+
+ while(ok && (~baseChunk)){
+ nread++;
+ /* Read level 0 tnode */
+
+
+ /* printf("read tnode at %d\n",baseChunk); */
+ tn = yaffs_GetTnodeRaw(dev);
+ if(tn)
+ ok = (yaffs_CheckpointRead(dev,tn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8) ==
+ (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
+ else
+ ok = 0;
+
+ if(tn && ok){
+ ok = yaffs_AddOrFindLevel0Tnode(dev,
+ fileStructPtr,
+ baseChunk,
+ tn) ? 1 : 0;
+
+ }
+
+ if(ok)
+ ok = (yaffs_CheckpointRead(dev,&baseChunk,sizeof(baseChunk)) == sizeof(baseChunk));
+
+ }
+
+ T(YAFFS_TRACE_CHECKPOINT,(
+ TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
+ nread,baseChunk,ok));
+
+ return ok ? 1 : 0;
+}
+
+
+static int yaffs_WriteCheckpointObjects(yaffs_Device *dev)
+{
+ yaffs_Object *obj;
+ yaffs_CheckpointObject cp;
+ int i;
+ int ok = 1;
+ struct list_head *lh;
+
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+ for(i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++){
+ list_for_each(lh, &dev->objectBucket[i].list) {
+ if (lh) {
+ obj = list_entry(lh, yaffs_Object, hashLink);
+ if (!obj->deferedFree) {
+ yaffs_ObjectToCheckpointObject(&cp,obj);
+ cp.structType = sizeof(cp);
+
+ T(YAFFS_TRACE_CHECKPOINT,(
+ TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
+ cp.objectId,cp.parentId,cp.variantType,cp.chunkId,(unsigned) obj));
+
+ ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
+
+ if(ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE){
+ ok = yaffs_WriteCheckpointTnodes(obj);
+ }
+ }
+ }
+ }
+ }
+
+ /* Dump end of list */
+ memset(&cp,0xFF,sizeof(yaffs_CheckpointObject));
+ cp.structType = sizeof(cp);
+
+ if(ok)
+ ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs_ReadCheckpointObjects(yaffs_Device *dev)
+{
+ yaffs_Object *obj;
+ yaffs_CheckpointObject cp;
+ int ok = 1;
+ int done = 0;
+ yaffs_Object *hardList = NULL;
+
+ while(ok && !done) {
+ ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
+ if(cp.structType != sizeof(cp)) {
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("struct size %d instead of %d ok %d"TENDSTR),
+ cp.structType,sizeof(cp),ok));
+ ok = 0;
+ }
+
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
+ cp.objectId,cp.parentId,cp.variantType,cp.chunkId));
+
+ if(ok && cp.objectId == ~0)
+ done = 1;
+ else if(ok){
+ obj = yaffs_FindOrCreateObjectByNumber(dev,cp.objectId, cp.variantType);
+ if(obj) {
+ yaffs_CheckpointObjectToObject(obj,&cp);
+ if(obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs_ReadCheckpointTnodes(obj);
+ } else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj->hardLinks.next =
+ (struct list_head *)
+ hardList;
+ hardList = obj;
+ }
+
+ }
+ }
+ }
+
+ if(ok)
+ yaffs_HardlinkFixup(dev,hardList);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs_WriteCheckpointSum(yaffs_Device *dev)
+{
+ __u32 checkpointSum;
+ int ok;
+
+ yaffs_GetCheckpointSum(dev,&checkpointSum);
+
+ ok = (yaffs_CheckpointWrite(dev,&checkpointSum,sizeof(checkpointSum)) == sizeof(checkpointSum));
+
+ if(!ok)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs_ReadCheckpointSum(yaffs_Device *dev)
+{
+ __u32 checkpointSum0;
+ __u32 checkpointSum1;
+ int ok;
+
+ yaffs_GetCheckpointSum(dev,&checkpointSum0);
+
+ ok = (yaffs_CheckpointRead(dev,&checkpointSum1,sizeof(checkpointSum1)) == sizeof(checkpointSum1));
+
+ if(!ok)
+ return 0;
+
+ if(checkpointSum0 != checkpointSum1)
+ return 0;
+
+ return 1;
+}
+
+
+static int yaffs_WriteCheckpointData(yaffs_Device *dev)
+{
+
+ int ok = 1;
+
+ if(dev->skipCheckpointWrite || !dev->isYaffs2){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("skipping checkpoint write" TENDSTR)));
+ ok = 0;
+ }
+
+ if(ok)
+ ok = yaffs_CheckpointOpen(dev,1);
+
+ if(ok){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint validity" TENDSTR)));
+ ok = yaffs_WriteCheckpointValidityMarker(dev,1);
+ }
+ if(ok){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint device" TENDSTR)));
+ ok = yaffs_WriteCheckpointDevice(dev);
+ }
+ if(ok){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint objects" TENDSTR)));
+ ok = yaffs_WriteCheckpointObjects(dev);
+ }
+ if(ok){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint validity" TENDSTR)));
+ ok = yaffs_WriteCheckpointValidityMarker(dev,0);
+ }
+
+ if(ok){
+ ok = yaffs_WriteCheckpointSum(dev);
+ }
+
+
+ if(!yaffs_CheckpointClose(dev))
+ ok = 0;
+
+ if(ok)
+ dev->isCheckpointed = 1;
+ else
+ dev->isCheckpointed = 0;
+
+ return dev->isCheckpointed;
+}
+
+static int yaffs_ReadCheckpointData(yaffs_Device *dev)
+{
+ int ok = 1;
+
+ if(dev->skipCheckpointRead || !dev->isYaffs2){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("skipping checkpoint read" TENDSTR)));
+ ok = 0;
+ }
+
+ if(ok)
+ ok = yaffs_CheckpointOpen(dev,0); /* open for read */
+
+ if(ok){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint validity" TENDSTR)));
+ ok = yaffs_ReadCheckpointValidityMarker(dev,1);
+ }
+ if(ok){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint device" TENDSTR)));
+ ok = yaffs_ReadCheckpointDevice(dev);
+ }
+ if(ok){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint objects" TENDSTR)));
+ ok = yaffs_ReadCheckpointObjects(dev);
+ }
+ if(ok){
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint validity" TENDSTR)));
+ ok = yaffs_ReadCheckpointValidityMarker(dev,0);
+ }
+
+ if(ok){
+ ok = yaffs_ReadCheckpointSum(dev);
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint checksum %d" TENDSTR),ok));
+ }
+
+ if(!yaffs_CheckpointClose(dev))
+ ok = 0;
+
+ if(ok)
+ dev->isCheckpointed = 1;
+ else
+ dev->isCheckpointed = 0;
+
+ return ok ? 1 : 0;
+
+}
+
+static void yaffs_InvalidateCheckpoint(yaffs_Device *dev)
+{
+ if(dev->isCheckpointed ||
+ dev->blocksInCheckpoint > 0){
+ dev->isCheckpointed = 0;
+ yaffs_CheckpointInvalidateStream(dev);
+ if(dev->superBlock && dev->markSuperBlockDirty)
+ dev->markSuperBlockDirty(dev->superBlock);
+ }
+}
+
+
+int yaffs_CheckpointSave(yaffs_Device *dev)
+{
+
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("save entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
+
+ yaffs_VerifyObjects(dev);
+ yaffs_VerifyBlocks(dev);
+ yaffs_VerifyFreeChunks(dev);
+
+ if(!dev->isCheckpointed) {
+ yaffs_InvalidateCheckpoint(dev);
+ yaffs_WriteCheckpointData(dev);
+ }
+
+ T(YAFFS_TRACE_ALWAYS,(TSTR("save exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
+
+ return dev->isCheckpointed;
+}
+
+int yaffs_CheckpointRestore(yaffs_Device *dev)
+{
+ int retval;
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
+
+ retval = yaffs_ReadCheckpointData(dev);
+
+ if(dev->isCheckpointed){
+ yaffs_VerifyObjects(dev);
+ yaffs_VerifyBlocks(dev);
+ yaffs_VerifyFreeChunks(dev);
+ }
+
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
+
+ return retval;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_ReadDataFromFile(yaffs_Object * in, __u8 * buffer, loff_t offset,
+ int nBytes)
+{
+
+ int chunk;
+ int start;
+ int nToCopy;
+ int n = nBytes;
+ int nDone = 0;
+ yaffs_ChunkCache *cache;
+
+ yaffs_Device *dev;
+
+ dev = in->myDev;
+
+ while (n > 0) {
+ //chunk = offset / dev->nDataBytesPerChunk + 1;
+ //start = offset % dev->nDataBytesPerChunk;
+ yaffs_AddrToChunk(dev,offset,&chunk,&start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+ if ((start + n) < dev->nDataBytesPerChunk) {
+ nToCopy = n;
+ } else {
+ nToCopy = dev->nDataBytesPerChunk - start;
+ }
+
+ cache = yaffs_FindChunkCache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than a whole chunk
+ * then use the cache (if there is caching)
+ * else bypass the cache.
+ */
+ if (cache || nToCopy != dev->nDataBytesPerChunk) {
+ if (dev->nShortOpCaches > 0) {
+
+ /* If we can't find the data in the cache, then load it up. */
+
+ if (!cache) {
+ cache = yaffs_GrabChunkCache(in->myDev);
+ cache->object = in;
+ cache->chunkId = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ cache->
+ data);
+ cache->nBytes = 0;
+ }
+
+ yaffs_UseChunkCache(dev, cache, 0);
+
+ cache->locked = 1;
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_UnlockYAFFS(TRUE);
+#endif
+ memcpy(buffer, &cache->data[start], nToCopy);
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_LockYAFFS(TRUE);
+#endif
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy..*/
+
+ __u8 *localBuffer =
+ yaffs_GetTempBuffer(dev, __LINE__);
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ localBuffer);
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_UnlockYAFFS(TRUE);
+#endif
+ memcpy(buffer, &localBuffer[start], nToCopy);
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_LockYAFFS(TRUE);
+#endif
+ yaffs_ReleaseTempBuffer(dev, localBuffer,
+ __LINE__);
+ }
+
+ } else {
+#ifdef CONFIG_YAFFS_WINCE
+ __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
+
+ /* Under WinCE can't do direct transfer. Need to use a local buffer.
+ * This is because we otherwise screw up WinCE's memory mapper
+ */
+ yaffs_ReadChunkDataFromObject(in, chunk, localBuffer);
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_UnlockYAFFS(TRUE);
+#endif
+ memcpy(buffer, localBuffer, dev->nDataBytesPerChunk);
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_LockYAFFS(TRUE);
+ yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+#endif
+
+#else
+ /* A full chunk. Read directly into the supplied buffer. */
+ yaffs_ReadChunkDataFromObject(in, chunk, buffer);
+#endif
+ }
+
+ n -= nToCopy;
+ offset += nToCopy;
+ buffer += nToCopy;
+ nDone += nToCopy;
+
+ }
+
+ return nDone;
+}
+
+int yaffs_WriteDataToFile(yaffs_Object * in, const __u8 * buffer, loff_t offset,
+ int nBytes, int writeThrough)
+{
+
+ int chunk;
+ int start;
+ int nToCopy;
+ int n = nBytes;
+ int nDone = 0;
+ int nToWriteBack;
+ int startOfWrite = offset;
+ int chunkWritten = 0;
+ int nBytesRead;
+
+ yaffs_Device *dev;
+
+ dev = in->myDev;
+
+ while (n > 0 && chunkWritten >= 0) {
+ //chunk = offset / dev->nDataBytesPerChunk + 1;
+ //start = offset % dev->nDataBytesPerChunk;
+ yaffs_AddrToChunk(dev,offset,&chunk,&start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+
+ if ((start + n) < dev->nDataBytesPerChunk) {
+ nToCopy = n;
+
+ /* Now folks, to calculate how many bytes to write back....
+ * If we're overwriting and not writing to then end of file then
+ * we need to write back as much as was there before.
+ */
+
+ nBytesRead =
+ in->variant.fileVariant.fileSize -
+ ((chunk - 1) * dev->nDataBytesPerChunk);
+
+ if (nBytesRead > dev->nDataBytesPerChunk) {
+ nBytesRead = dev->nDataBytesPerChunk;
+ }
+
+ nToWriteBack =
+ (nBytesRead >
+ (start + n)) ? nBytesRead : (start + n);
+
+ } else {
+ nToCopy = dev->nDataBytesPerChunk - start;
+ nToWriteBack = dev->nDataBytesPerChunk;
+ }
+
+ if (nToCopy != dev->nDataBytesPerChunk) {
+ /* An incomplete start or end chunk (or maybe both start and end chunk) */
+ if (dev->nShortOpCaches > 0) {
+ yaffs_ChunkCache *cache;
+ /* If we can't find the data in the cache, then load the cache */
+ cache = yaffs_FindChunkCache(in, chunk);
+
+ if (!cache
+ && yaffs_CheckSpaceForAllocation(in->
+ myDev)) {
+ cache = yaffs_GrabChunkCache(in->myDev);
+ cache->object = in;
+ cache->chunkId = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ cache->
+ data);
+ }
+ else if(cache &&
+ !cache->dirty &&
+ !yaffs_CheckSpaceForAllocation(in->myDev)){
+ /* Drop the cache if it was a read cache item and
+ * no space check has been made for it.
+ */
+ cache = NULL;
+ }
+
+ if (cache) {
+ yaffs_UseChunkCache(dev, cache, 1);
+ cache->locked = 1;
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_UnlockYAFFS(TRUE);
+#endif
+
+ memcpy(&cache->data[start], buffer,
+ nToCopy);
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_LockYAFFS(TRUE);
+#endif
+ cache->locked = 0;
+ cache->nBytes = nToWriteBack;
+
+ if (writeThrough) {
+ chunkWritten =
+ yaffs_WriteChunkDataToObject
+ (cache->object,
+ cache->chunkId,
+ cache->data, cache->nBytes,
+ 1);
+ cache->dirty = 0;
+ }
+
+ } else {
+ chunkWritten = -1; /* fail the write */
+ }
+ } else {
+ /* An incomplete start or end chunk (or maybe both start and end chunk)
+ * Read into the local buffer then copy, then copy over and write back.
+ */
+
+ __u8 *localBuffer =
+ yaffs_GetTempBuffer(dev, __LINE__);
+
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ localBuffer);
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_UnlockYAFFS(TRUE);
+#endif
+
+ memcpy(&localBuffer[start], buffer, nToCopy);
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_LockYAFFS(TRUE);
+#endif
+ chunkWritten =
+ yaffs_WriteChunkDataToObject(in, chunk,
+ localBuffer,
+ nToWriteBack,
+ 0);
+
+ yaffs_ReleaseTempBuffer(dev, localBuffer,
+ __LINE__);
+
+ }
+
+ } else {
+
+#ifdef CONFIG_YAFFS_WINCE
+ /* Under WinCE can't do direct transfer. Need to use a local buffer.
+ * This is because we otherwise screw up WinCE's memory mapper
+ */
+ __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_UnlockYAFFS(TRUE);
+#endif
+ memcpy(localBuffer, buffer, dev->nDataBytesPerChunk);
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_LockYAFFS(TRUE);
+#endif
+ chunkWritten =
+ yaffs_WriteChunkDataToObject(in, chunk, localBuffer,
+ dev->nDataBytesPerChunk,
+ 0);
+ yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+#else
+ /* A full chunk. Write directly from the supplied buffer. */
+ chunkWritten =
+ yaffs_WriteChunkDataToObject(in, chunk, buffer,
+ dev->nDataBytesPerChunk,
+ 0);
+#endif
+ /* Since we've overwritten the cached data, we better invalidate it. */
+ yaffs_InvalidateChunkCache(in, chunk);
+ }
+
+ if (chunkWritten >= 0) {
+ n -= nToCopy;
+ offset += nToCopy;
+ buffer += nToCopy;
+ nDone += nToCopy;
+ }
+
+ }
+
+ /* Update file object */
+
+ if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize) {
+ in->variant.fileVariant.fileSize = (startOfWrite + nDone);
+ }
+
+ in->dirty = 1;
+
+ return nDone;
+}
+
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_PruneResizedChunks(yaffs_Object * in, int newSize)
+{
+
+ yaffs_Device *dev = in->myDev;
+ int oldFileSize = in->variant.fileVariant.fileSize;
+
+ int lastDel = 1 + (oldFileSize - 1) / dev->nDataBytesPerChunk;
+
+ int startDel = 1 + (newSize + dev->nDataBytesPerChunk - 1) /
+ dev->nDataBytesPerChunk;
+ int i;
+ int chunkId;
+
+ /* Delete backwards so that we don't end up with holes if
+ * power is lost part-way through the operation.
+ */
+ for (i = lastDel; i >= startDel; i--) {
+ /* NB this could be optimised somewhat,
+ * eg. could retrieve the tags and write them without
+ * using yaffs_DeleteChunk
+ */
+
+ chunkId = yaffs_FindAndDeleteChunkInFile(in, i, NULL);
+ if (chunkId > 0) {
+ if (chunkId <
+ (dev->internalStartBlock * dev->nChunksPerBlock)
+ || chunkId >=
+ ((dev->internalEndBlock +
+ 1) * dev->nChunksPerBlock)) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("Found daft chunkId %d for %d" TENDSTR),
+ chunkId, i));
+ } else {
+ in->nDataChunks--;
+ yaffs_DeleteChunk(dev, chunkId, 1, __LINE__);
+ }
+ }
+ }
+
+}
+
+int yaffs_ResizeFile(yaffs_Object * in, loff_t newSize)
+{
+
+ int oldFileSize = in->variant.fileVariant.fileSize;
+ int newSizeOfPartialChunk;
+ int newFullChunks;
+
+ yaffs_Device *dev = in->myDev;
+
+ yaffs_AddrToChunk(dev, newSize, &newFullChunks, &newSizeOfPartialChunk);
+
+ yaffs_FlushFilesChunkCache(in);
+ yaffs_InvalidateWholeChunkCache(in);
+
+ yaffs_CheckGarbageCollection(dev);
+
+ if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+ return yaffs_GetFileSize(in);
+ }
+
+ if (newSize == oldFileSize) {
+ return oldFileSize;
+ }
+
+ if (newSize < oldFileSize) {
+
+ yaffs_PruneResizedChunks(in, newSize);
+
+ if (newSizeOfPartialChunk != 0) {
+ int lastChunk = 1 + newFullChunks;
+
+ __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
+
+ /* Got to read and rewrite the last chunk with its new size and zero pad */
+ yaffs_ReadChunkDataFromObject(in, lastChunk,
+ localBuffer);
+
+ memset(localBuffer + newSizeOfPartialChunk, 0,
+ dev->nDataBytesPerChunk - newSizeOfPartialChunk);
+
+ yaffs_WriteChunkDataToObject(in, lastChunk, localBuffer,
+ newSizeOfPartialChunk, 1);
+
+ yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+ }
+
+ in->variant.fileVariant.fileSize = newSize;
+
+ yaffs_PruneFileStructure(dev, &in->variant.fileVariant);
+ } else {
+ /* newsSize > oldFileSize */
+ in->variant.fileVariant.fileSize = newSize;
+ }
+
+
+
+ /* Write a new object header.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories.
+ */
+ if (in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
+ in->parent->objectId != YAFFS_OBJECTID_DELETED) {
+ yaffs_UpdateObjectHeader(in, NULL, 0,
+ (newSize < oldFileSize) ? 1 : 0, 0);
+ }
+
+ return newSize;
+}
+
+loff_t yaffs_GetFileSize(yaffs_Object * obj)
+{
+ obj = yaffs_GetEquivalentObject(obj);
+
+ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return obj->variant.fileVariant.fileSize;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_strlen(obj->variant.symLinkVariant.alias);
+ default:
+ return 0;
+ }
+}
+
+
+
+int yaffs_FlushFile(yaffs_Object * in, int updateTime)
+{
+ int retVal;
+ if (in->dirty) {
+ yaffs_FlushFilesChunkCache(in);
+ if (updateTime) {
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_WinFileTimeNow(in->win_mtime);
+#else
+
+ in->yst_mtime = Y_CURRENT_TIME;
+
+#endif
+ }
+
+ retVal =
+ (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
+ 0) ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ retVal = YAFFS_OK;
+ }
+
+ return retVal;
+
+}
+
+static int yaffs_DoGenericObjectDeletion(yaffs_Object * in)
+{
+
+ /* First off, invalidate the file's data in the cache, without flushing. */
+ yaffs_InvalidateWholeChunkCache(in);
+
+ if (in->myDev->isYaffs2 && (in->parent != in->myDev->deletedDir)) {
+ /* Move to the unlinked directory so we have a record that it was deleted. */
+ yaffs_ChangeObjectName(in, in->myDev->deletedDir,"deleted", 0, 0);
+
+ }
+
+ yaffs_RemoveObjectFromDirectory(in);
+ yaffs_DeleteChunk(in->myDev, in->chunkId, 1, __LINE__);
+ in->chunkId = -1;
+
+ yaffs_FreeObject(in);
+ return YAFFS_OK;
+
+}
+
+/* yaffs_DeleteFile deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_UnlinkFile(yaffs_Object * in)
+{
+
+ int retVal;
+ int immediateDeletion = 0;
+
+ if (1) {
+#ifdef __KERNEL__
+ if (!in->myInode) {
+ immediateDeletion = 1;
+
+ }
+#else
+ if (in->inUse <= 0) {
+ immediateDeletion = 1;
+
+ }
+#endif
+ if (immediateDeletion) {
+ retVal =
+ yaffs_ChangeObjectName(in, in->myDev->deletedDir,
+ "deleted", 0, 0);
+ T(YAFFS_TRACE_TRACING,
+ (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
+ in->objectId));
+ in->deleted = 1;
+ in->myDev->nDeletedFiles++;
+ if (0 && in->myDev->isYaffs2) {
+ yaffs_ResizeFile(in, 0);
+ }
+ yaffs_SoftDeleteFile(in);
+ } else {
+ retVal =
+ yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
+ "unlinked", 0, 0);
+ }
+
+ }
+ return retVal;
+}
+
+int yaffs_DeleteFile(yaffs_Object * in)
+{
+ int retVal = YAFFS_OK;
+
+ if (in->nDataChunks > 0) {
+ /* Use soft deletion if there is data in the file */
+ if (!in->unlinked) {
+ retVal = yaffs_UnlinkFile(in);
+ }
+ if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
+ in->myDev->nDeletedFiles++;
+ yaffs_SoftDeleteFile(in);
+ }
+ return in->deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_FreeTnode(in->myDev, in->variant.fileVariant.top);
+ in->variant.fileVariant.top = NULL;
+ yaffs_DoGenericObjectDeletion(in);
+
+ return YAFFS_OK;
+ }
+}
+
+static int yaffs_DeleteDirectory(yaffs_Object * in)
+{
+ /* First check that the directory is empty. */
+ if (list_empty(&in->variant.directoryVariant.children)) {
+ return yaffs_DoGenericObjectDeletion(in);
+ }
+
+ return YAFFS_FAIL;
+
+}
+
+static int yaffs_DeleteSymLink(yaffs_Object * in)
+{
+ YFREE(in->variant.symLinkVariant.alias);
+
+ return yaffs_DoGenericObjectDeletion(in);
+}
+
+static int yaffs_DeleteHardLink(yaffs_Object * in)
+{
+ /* remove this hardlink from the list assocaited with the equivalent
+ * object
+ */
+ list_del(&in->hardLinks);
+ return yaffs_DoGenericObjectDeletion(in);
+}
+
+static void yaffs_DestroyObject(yaffs_Object * obj)
+{
+ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_DeleteFile(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_DeleteDirectory(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_DeleteSymLink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_DeleteHardLink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_DoGenericObjectDeletion(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ break; /* should not happen. */
+ }
+}
+
+static int yaffs_UnlinkWorker(yaffs_Object * obj)
+{
+
+ if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_DeleteHardLink(obj);
+ } else if (!list_empty(&obj->hardLinks)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+ * The Linux link/inode model.
+ *
+ * We can't really delete the object.
+ * Instead, we do the following:
+ * - Select a hardlink.
+ * - Unhook it from the hard links
+ * - Unhook it from its parent directory (so that the rename can work)
+ * - Rename the object to the hardlink's name.
+ * - Delete the hardlink
+ */
+
+ yaffs_Object *hl;
+ int retVal;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ hl = list_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
+
+ list_del_init(&hl->hardLinks);
+ list_del_init(&hl->siblings);
+
+ yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+
+ retVal = yaffs_ChangeObjectName(obj, hl->parent, name, 0, 0);
+
+ if (retVal == YAFFS_OK) {
+ retVal = yaffs_DoGenericObjectDeletion(hl);
+ }
+ return retVal;
+
+ } else {
+ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return yaffs_UnlinkFile(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return yaffs_DeleteDirectory(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_DeleteSymLink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ return yaffs_DoGenericObjectDeletion(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ return YAFFS_FAIL;
+ }
+ }
+}
+
+
+static int yaffs_UnlinkObject( yaffs_Object *obj)
+{
+
+ if (obj && obj->unlinkAllowed) {
+ return yaffs_UnlinkWorker(obj);
+ }
+
+ return YAFFS_FAIL;
+
+}
+int yaffs_Unlink(yaffs_Object * dir, const YCHAR * name)
+{
+ yaffs_Object *obj;
+
+ obj = yaffs_FindObjectByName(dir, name);
+ return yaffs_UnlinkObject(obj);
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+static void yaffs_HandleShadowedObject(yaffs_Device * dev, int objId,
+ int backwardScanning)
+{
+ yaffs_Object *obj;
+
+ if (!backwardScanning) {
+ /* Handle YAFFS1 forward scanning case
+ * For YAFFS1 we always do the deletion
+ */
+
+ } else {
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+ if (yaffs_FindObjectByNumber(dev, objId)) {
+ return;
+ }
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+ * We put it in unlinked dir to be cleaned up after the scanning
+ */
+ obj =
+ yaffs_FindOrCreateObjectByNumber(dev, objId,
+ YAFFS_OBJECT_TYPE_FILE);
+ yaffs_AddObjectToDirectory(dev->unlinkedDir, obj);
+ obj->variant.fileVariant.shrinkSize = 0;
+ obj->valid = 1; /* So that we don't read any other info for this file */
+
+}
+
+typedef struct {
+ int seq;
+ int block;
+} yaffs_BlockIndex;
+
+
+static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList)
+{
+ yaffs_Object *hl;
+ yaffs_Object *in;
+
+ while (hardList) {
+ hl = hardList;
+ hardList = (yaffs_Object *) (hardList->hardLinks.next);
+
+ in = yaffs_FindObjectByNumber(dev,
+ hl->variant.hardLinkVariant.
+ equivalentObjectId);
+
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardLinkVariant.equivalentObject = in;
+ list_add(&hl->hardLinks, &in->hardLinks);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardLinkVariant.equivalentObject = NULL;
+ INIT_LIST_HEAD(&hl->hardLinks);
+
+ }
+
+ }
+
+}
+
+
+
+
+
+static int ybicmp(const void *a, const void *b){
+ register int aseq = ((yaffs_BlockIndex *)a)->seq;
+ register int bseq = ((yaffs_BlockIndex *)b)->seq;
+ register int ablock = ((yaffs_BlockIndex *)a)->block;
+ register int bblock = ((yaffs_BlockIndex *)b)->block;
+ if( aseq == bseq )
+ return ablock - bblock;
+ else
+ return aseq - bseq;
+
+}
+
+static int yaffs_Scan(yaffs_Device * dev)
+{
+ yaffs_ExtendedTags tags;
+ int blk;
+ int blockIterator;
+ int startIterator;
+ int endIterator;
+ int nBlocksToScan = 0;
+ int result;
+
+ int chunk;
+ int c;
+ int deleted;
+ yaffs_BlockState state;
+ yaffs_Object *hardList = NULL;
+ yaffs_BlockInfo *bi;
+ int sequenceNumber;
+ yaffs_ObjectHeader *oh;
+ yaffs_Object *in;
+ yaffs_Object *parent;
+ int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+
+ int alloc_failed = 0;
+
+
+ __u8 *chunkData;
+
+ yaffs_BlockIndex *blockIndex = NULL;
+
+ if (dev->isYaffs2) {
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("yaffs_Scan is not for YAFFS2!" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ //TODO Throw all the yaffs2 stuuf out of yaffs_Scan since it is only for yaffs1 format.
+
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("yaffs_Scan starts intstartblk %d intendblk %d..." TENDSTR),
+ dev->internalStartBlock, dev->internalEndBlock));
+
+ chunkData = yaffs_GetTempBuffer(dev, __LINE__);
+
+ dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ if (dev->isYaffs2) {
+ blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
+ if(!blockIndex)
+ return YAFFS_FAIL;
+ }
+
+ /* Scan all the blocks to determine their state */
+ for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
+ bi = yaffs_GetBlockInfo(dev, blk);
+ yaffs_ClearChunkBits(dev, blk);
+ bi->pagesInUse = 0;
+ bi->softDeletions = 0;
+
+ yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
+
+ bi->blockState = state;
+ bi->sequenceNumber = sequenceNumber;
+
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+ state, sequenceNumber));
+
+ if (state == YAFFS_BLOCK_STATE_DEAD) {
+ T(YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("block %d is bad" TENDSTR), blk));
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("Block empty " TENDSTR)));
+ dev->nErasedBlocks++;
+ dev->nFreeChunks += dev->nChunksPerBlock;
+ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+
+ /* Determine the highest sequence number */
+ if (dev->isYaffs2 &&
+ sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+
+ blockIndex[nBlocksToScan].seq = sequenceNumber;
+ blockIndex[nBlocksToScan].block = blk;
+
+ nBlocksToScan++;
+
+ if (sequenceNumber >= dev->sequenceNumber) {
+ dev->sequenceNumber = sequenceNumber;
+ }
+ } else if (dev->isYaffs2) {
+ /* TODO: Nasty sequence number! */
+ T(YAFFS_TRACE_SCAN,
+ (TSTR
+ ("Block scanning block %d has bad sequence number %d"
+ TENDSTR), blk, sequenceNumber));
+
+ }
+ }
+ }
+
+ /* Sort the blocks
+ * Dungy old bubble sort for now...
+ */
+ if (dev->isYaffs2) {
+ yaffs_BlockIndex temp;
+ int i;
+ int j;
+
+ for (i = 0; i < nBlocksToScan; i++)
+ for (j = i + 1; j < nBlocksToScan; j++)
+ if (blockIndex[i].seq > blockIndex[j].seq) {
+ temp = blockIndex[j];
+ blockIndex[j] = blockIndex[i];
+ blockIndex[i] = temp;
+ }
+ }
+
+ /* Now scan the blocks looking at the data. */
+ if (dev->isYaffs2) {
+ startIterator = 0;
+ endIterator = nBlocksToScan - 1;
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
+ } else {
+ startIterator = dev->internalStartBlock;
+ endIterator = dev->internalEndBlock;
+ }
+
+ /* For each block.... */
+ for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
+ blockIterator++) {
+
+ if (dev->isYaffs2) {
+ /* get the block to scan in the correct order */
+ blk = blockIndex[blockIterator].block;
+ } else {
+ blk = blockIterator;
+ }
+
+ bi = yaffs_GetBlockInfo(dev, blk);
+ state = bi->blockState;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning....*/
+ for (c = 0; !alloc_failed && c < dev->nChunksPerBlock &&
+ state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
+ /* Read the tags and decide what to do */
+ chunk = blk * dev->nChunksPerBlock + c;
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (!dev->isYaffs2 && tags.chunkDeleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+ deleted++;
+ dev->nFreeChunks++;
+ /*T((" %d %d deleted\n",blk,c)); */
+ } else if (!tags.chunkUsed) {
+ /* An unassigned chunk in the block
+ * This means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->nErasedBlocks++;
+ } else {
+ /* this is the block being allocated from */
+ T(YAFFS_TRACE_SCAN,
+ (TSTR
+ (" Allocating from %d %d" TENDSTR),
+ blk, c));
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->allocationBlock = blk;
+ dev->allocationPage = c;
+ dev->allocationBlockFinder = blk;
+ /* Set it to here to encourage the allocator to go forth from here. */
+
+ /* Yaffs2 sanity check:
+ * This should be the one with the highest sequence number
+ */
+ if (dev->isYaffs2
+ && (dev->sequenceNumber !=
+ bi->sequenceNumber)) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("yaffs: Allocation block %d was not highest sequence id:"
+ " block seq = %d, dev seq = %d"
+ TENDSTR), blk,bi->sequenceNumber,dev->sequenceNumber));
+ }
+ }
+
+ dev->nFreeChunks += (dev->nChunksPerBlock - c);
+ } else if (tags.chunkId > 0) {
+ /* chunkId > 0 so it is a data chunk... */
+ unsigned int endpos;
+
+ yaffs_SetChunkBit(dev, blk, c);
+ bi->pagesInUse++;
+
+ in = yaffs_FindOrCreateObjectByNumber(dev,
+ tags.
+ objectId,
+ YAFFS_OBJECT_TYPE_FILE);
+ /* PutChunkIntoFile checks for a clash (two data chunks with
+ * the same chunkId).
+ */
+
+ if(!in)
+ alloc_failed = 1;
+
+ if(in){
+ if(!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk,1))
+ alloc_failed = 1;
+ }
+
+ endpos =
+ (tags.chunkId - 1) * dev->nDataBytesPerChunk +
+ tags.byteCount;
+ if (in &&
+ in->variantType == YAFFS_OBJECT_TYPE_FILE
+ && in->variant.fileVariant.scannedFileSize <
+ endpos) {
+ in->variant.fileVariant.
+ scannedFileSize = endpos;
+ if (!dev->useHeaderFileSize) {
+ in->variant.fileVariant.
+ fileSize =
+ in->variant.fileVariant.
+ scannedFileSize;
+ }
+
+ }
+ /* T((" %d %d data %d %d\n",blk,c,tags.objectId,tags.chunkId)); */
+ } else {
+ /* chunkId == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make the object
+ */
+ yaffs_SetChunkBit(dev, blk, c);
+ bi->pagesInUse++;
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk,
+ chunkData,
+ NULL);
+
+ oh = (yaffs_ObjectHeader *) chunkData;
+
+ in = yaffs_FindObjectByNumber(dev,
+ tags.objectId);
+ if (in && in->variantType != oh->type) {
+ /* This should not happen, but somehow
+ * Wev'e ended up with an objectId that has been reused but not yet
+ * deleted, and worse still it has changed type. Delete the old object.
+ */
+
+ yaffs_DestroyObject(in);
+
+ in = 0;
+ }
+
+ in = yaffs_FindOrCreateObjectByNumber(dev,
+ tags.
+ objectId,
+ oh->type);
+
+ if(!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadowsObject > 0) {
+ yaffs_HandleShadowedObject(dev,
+ oh->
+ shadowsObject,
+ 0);
+ }
+
+ if (in && in->valid) {
+ /* We have already filled this one. We have a duplicate and need to resolve it. */
+
+ unsigned existingSerial = in->serial;
+ unsigned newSerial = tags.serialNumber;
+
+ if (dev->isYaffs2 ||
+ ((existingSerial + 1) & 3) ==
+ newSerial) {
+ /* Use new one - destroy the exisiting one */
+ yaffs_DeleteChunk(dev,
+ in->chunkId,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+ /* Use existing - destroy this one. */
+ yaffs_DeleteChunk(dev, chunk, 1,
+ __LINE__);
+ }
+ }
+
+ if (in && !in->valid &&
+ (tags.objectId == YAFFS_OBJECTID_ROOT ||
+ tags.objectId == YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+ in->variantType = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+#ifdef CONFIG_YAFFS_WINCE
+ in->win_atime[0] = oh->win_atime[0];
+ in->win_ctime[0] = oh->win_ctime[0];
+ in->win_mtime[0] = oh->win_mtime[0];
+ in->win_atime[1] = oh->win_atime[1];
+ in->win_ctime[1] = oh->win_ctime[1];
+ in->win_mtime[1] = oh->win_mtime[1];
+#else
+ in->yst_uid = oh->yst_uid;
+ in->yst_gid = oh->yst_gid;
+ in->yst_atime = oh->yst_atime;
+ in->yst_mtime = oh->yst_mtime;
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+#endif
+ in->chunkId = chunk;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->variantType = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+#ifdef CONFIG_YAFFS_WINCE
+ in->win_atime[0] = oh->win_atime[0];
+ in->win_ctime[0] = oh->win_ctime[0];
+ in->win_mtime[0] = oh->win_mtime[0];
+ in->win_atime[1] = oh->win_atime[1];
+ in->win_ctime[1] = oh->win_ctime[1];
+ in->win_mtime[1] = oh->win_mtime[1];
+#else
+ in->yst_uid = oh->yst_uid;
+ in->yst_gid = oh->yst_gid;
+ in->yst_atime = oh->yst_atime;
+ in->yst_mtime = oh->yst_mtime;
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+#endif
+ in->chunkId = chunk;
+
+ yaffs_SetObjectName(in, oh->name);
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ parent =
+ yaffs_FindOrCreateObjectByNumber
+ (dev, oh->parentObjectId,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (parent->variantType ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variantType =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->variant.
+ directoryVariant.
+ children);
+ } else if (parent->variantType !=
+ YAFFS_OBJECT_TYPE_DIRECTORY)
+ {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("yaffs tragedy: attempting to use non-directory as"
+ " a directory in scan. Put in lost+found."
+ TENDSTR)));
+ parent = dev->lostNFoundDir;
+ }
+
+ yaffs_AddObjectToDirectory(parent, in);
+
+ if (0 && (parent == dev->deletedDir ||
+ parent == dev->unlinkedDir)) {
+ in->deleted = 1; /* If it is unlinked at start up then it wants deleting */
+ dev->nDeletedFiles++;
+ }
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent object is scanned
+ * we put them all in a list.
+ * After scanning is complete, we should have all the objects, so we run through this
+ * list and fix up all the chains.
+ */
+
+ switch (in->variantType) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (dev->isYaffs2
+ && oh->isShrink) {
+ /* Prune back the shrunken chunks */
+ yaffs_PruneResizedChunks
+ (in, oh->fileSize);
+ /* Mark the block as having a shrinkHeader */
+ bi->hasShrinkHeader = 1;
+ }
+
+ if (dev->useHeaderFileSize)
+
+ in->variant.fileVariant.
+ fileSize =
+ oh->fileSize;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardLinkVariant.
+ equivalentObjectId =
+ oh->equivalentObjectId;
+ in->hardLinks.next =
+ (struct list_head *)
+ hardList;
+ hardList = in;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symLinkVariant.alias =
+ yaffs_CloneString(oh->alias);
+ if(!in->variant.symLinkVariant.alias)
+ alloc_failed = 1;
+ break;
+ }
+
+ if (parent == dev->deletedDir) {
+ yaffs_DestroyObject(in);
+ bi->hasShrinkHeader = 1;
+ }
+ }
+ }
+ }
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ /* If we got this far while scanning, then the block is fully allocated.*/
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ bi->blockState = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pagesInUse == 0 &&
+ !bi->hasShrinkHeader &&
+ bi->blockState == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_BlockBecameDirty(dev, blk);
+ }
+
+ }
+
+ if (blockIndex) {
+ YFREE(blockIndex);
+ }
+
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+
+ yaffs_HardlinkFixup(dev,hardList);
+
+ /* Handle the unlinked files. Since they were left in an unlinked state we should
+ * just delete them.
+ */
+ {
+ struct list_head *i;
+ struct list_head *n;
+
+ yaffs_Object *l;
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinkedDir->variant.directoryVariant.
+ children) {
+ if (i) {
+ l = list_entry(i, yaffs_Object, siblings);
+ yaffs_DestroyObject(l);
+ }
+ }
+ }
+
+ yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+
+ if(alloc_failed){
+ return YAFFS_FAIL;
+ }
+
+ T(YAFFS_TRACE_SCAN, (TSTR("yaffs_Scan ends" TENDSTR)));
+
+
+ return YAFFS_OK;
+}
+
+static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in)
+{
+ __u8 *chunkData;
+ yaffs_ObjectHeader *oh;
+ yaffs_Device *dev = in->myDev;
+ yaffs_ExtendedTags tags;
+ int result;
+ int alloc_failed = 0;
+
+ if(!in)
+ return;
+
+#if 0
+ T(YAFFS_TRACE_SCAN,(TSTR("details for object %d %s loaded" TENDSTR),
+ in->objectId,
+ in->lazyLoaded ? "not yet" : "already"));
+#endif
+
+ if(in->lazyLoaded){
+ in->lazyLoaded = 0;
+ chunkData = yaffs_GetTempBuffer(dev, __LINE__);
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev,in->chunkId,chunkData,&tags);
+ oh = (yaffs_ObjectHeader *) chunkData;
+
+ in->yst_mode = oh->yst_mode;
+#ifdef CONFIG_YAFFS_WINCE
+ in->win_atime[0] = oh->win_atime[0];
+ in->win_ctime[0] = oh->win_ctime[0];
+ in->win_mtime[0] = oh->win_mtime[0];
+ in->win_atime[1] = oh->win_atime[1];
+ in->win_ctime[1] = oh->win_ctime[1];
+ in->win_mtime[1] = oh->win_mtime[1];
+#else
+ in->yst_uid = oh->yst_uid;
+ in->yst_gid = oh->yst_gid;
+ in->yst_atime = oh->yst_atime;
+ in->yst_mtime = oh->yst_mtime;
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+
+#endif
+ yaffs_SetObjectName(in, oh->name);
+
+ if(in->variantType == YAFFS_OBJECT_TYPE_SYMLINK){
+ in->variant.symLinkVariant.alias =
+ yaffs_CloneString(oh->alias);
+ if(!in->variant.symLinkVariant.alias)
+ alloc_failed = 1; /* Not returned to caller */
+ }
+
+ yaffs_ReleaseTempBuffer(dev,chunkData, __LINE__);
+ }
+}
+
+static int yaffs_ScanBackwards(yaffs_Device * dev)
+{
+ yaffs_ExtendedTags tags;
+ int blk;
+ int blockIterator;
+ int startIterator;
+ int endIterator;
+ int nBlocksToScan = 0;
+
+ int chunk;
+ int result;
+ int c;
+ int deleted;
+ yaffs_BlockState state;
+ yaffs_Object *hardList = NULL;
+ yaffs_BlockInfo *bi;
+ int sequenceNumber;
+ yaffs_ObjectHeader *oh;
+ yaffs_Object *in;
+ yaffs_Object *parent;
+ int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+ int itsUnlinked;
+ __u8 *chunkData;
+
+ int fileSize;
+ int isShrink;
+ int foundChunksInBlock;
+ int equivalentObjectId;
+ int alloc_failed = 0;
+
+
+ yaffs_BlockIndex *blockIndex = NULL;
+ int altBlockIndex = 0;
+
+ if (!dev->isYaffs2) {
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("yaffs_ScanBackwards is only for YAFFS2!" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ T(YAFFS_TRACE_SCAN,
+ (TSTR
+ ("yaffs_ScanBackwards starts intstartblk %d intendblk %d..."
+ TENDSTR), dev->internalStartBlock, dev->internalEndBlock));
+
+
+ dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
+
+ if(!blockIndex) {
+ blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
+ altBlockIndex = 1;
+ }
+
+ if(!blockIndex) {
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("yaffs_Scan() could not allocate block index!" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ dev->blocksInCheckpoint = 0;
+
+ chunkData = yaffs_GetTempBuffer(dev, __LINE__);
+
+ /* Scan all the blocks to determine their state */
+ for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
+ bi = yaffs_GetBlockInfo(dev, blk);
+ yaffs_ClearChunkBits(dev, blk);
+ bi->pagesInUse = 0;
+ bi->softDeletions = 0;
+
+ yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
+
+ bi->blockState = state;
+ bi->sequenceNumber = sequenceNumber;
+
+ if(bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->blockState = state = YAFFS_BLOCK_STATE_CHECKPOINT;
+
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+ state, sequenceNumber));
+
+
+ if(state == YAFFS_BLOCK_STATE_CHECKPOINT){
+ dev->blocksInCheckpoint++;
+
+ } else if (state == YAFFS_BLOCK_STATE_DEAD) {
+ T(YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("block %d is bad" TENDSTR), blk));
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("Block empty " TENDSTR)));
+ dev->nErasedBlocks++;
+ dev->nFreeChunks += dev->nChunksPerBlock;
+ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+
+ /* Determine the highest sequence number */
+ if (dev->isYaffs2 &&
+ sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+
+ blockIndex[nBlocksToScan].seq = sequenceNumber;
+ blockIndex[nBlocksToScan].block = blk;
+
+ nBlocksToScan++;
+
+ if (sequenceNumber >= dev->sequenceNumber) {
+ dev->sequenceNumber = sequenceNumber;
+ }
+ } else if (dev->isYaffs2) {
+ /* TODO: Nasty sequence number! */
+ T(YAFFS_TRACE_SCAN,
+ (TSTR
+ ("Block scanning block %d has bad sequence number %d"
+ TENDSTR), blk, sequenceNumber));
+
+ }
+ }
+ }
+
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("%d blocks to be sorted..." TENDSTR), nBlocksToScan));
+
+
+
+ YYIELD();
+
+ /* Sort the blocks */
+#ifndef CONFIG_YAFFS_USE_OWN_SORT
+ yaffs_qsort(blockIndex, nBlocksToScan,
+ sizeof(yaffs_BlockIndex), ybicmp);
+#else
+ {
+ /* Dungy old bubble sort... */
+
+ yaffs_BlockIndex temp;
+ int i;
+ int j;
+
+ for (i = 0; i < nBlocksToScan; i++)
+ for (j = i + 1; j < nBlocksToScan; j++)
+ if (blockIndex[i].seq > blockIndex[j].seq) {
+ temp = blockIndex[j];
+ blockIndex[j] = blockIndex[i];
+ blockIndex[i] = temp;
+ }
+ }
+#endif
+
+ YYIELD();
+
+ T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
+
+ /* Now scan the blocks looking at the data. */
+ startIterator = 0;
+ endIterator = nBlocksToScan - 1;
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
+
+ /* For each block.... backwards */
+ for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
+ blockIterator--) {
+ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+ YYIELD();
+
+ /* get the block to scan in the correct order */
+ blk = blockIndex[blockIterator].block;
+
+ bi = yaffs_GetBlockInfo(dev, blk);
+
+
+ state = bi->blockState;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ foundChunksInBlock = 0;
+ for (c = dev->nChunksPerBlock - 1;
+ !alloc_failed && c >= 0 &&
+ (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
+ /* Scan backwards...
+ * Read the tags and decide what to do
+ */
+
+ chunk = blk * dev->nChunksPerBlock + c;
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (!tags.chunkUsed) {
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing the erased
+ * check. Just skip it so that it can be deleted.
+ * But, more typically, We get here when this is an unallocated
+ * chunk and his means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if(foundChunksInBlock)
+ {
+ /* This is a chunk that was skipped due to failing the erased check */
+
+ } else if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->nErasedBlocks++;
+ } else {
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ if(dev->sequenceNumber == bi->sequenceNumber) {
+ /* this is the block being allocated from */
+
+ T(YAFFS_TRACE_SCAN,
+ (TSTR
+ (" Allocating from %d %d"
+ TENDSTR), blk, c));
+
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->allocationBlock = blk;
+ dev->allocationPage = c;
+ dev->allocationBlockFinder = blk;
+ }
+ else {
+ /* This is a partially written block that is not
+ * the current allocation block. This block must have
+ * had a write failure, so set up for retirement.
+ */
+
+ bi->needsRetiring = 1;
+ bi->gcPrioritise = 1;
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("Partially written block %d being set for retirement" TENDSTR),
+ blk));
+ }
+
+ }
+
+ }
+
+ dev->nFreeChunks++;
+
+ } else if (tags.chunkId > 0) {
+ /* chunkId > 0 so it is a data chunk... */
+ unsigned int endpos;
+ __u32 chunkBase =
+ (tags.chunkId - 1) * dev->nDataBytesPerChunk;
+
+ foundChunksInBlock = 1;
+
+
+ yaffs_SetChunkBit(dev, blk, c);
+ bi->pagesInUse++;
+
+ in = yaffs_FindOrCreateObjectByNumber(dev,
+ tags.
+ objectId,
+ YAFFS_OBJECT_TYPE_FILE);
+ if(!in){
+ /* Out of memory */
+ alloc_failed = 1;
+ }
+
+ if (in &&
+ in->variantType == YAFFS_OBJECT_TYPE_FILE
+ && chunkBase <
+ in->variant.fileVariant.shrinkSize) {
+ /* This has not been invalidated by a resize */
+ if(!yaffs_PutChunkIntoFile(in, tags.chunkId,
+ chunk, -1)){
+ alloc_failed = 1;
+ }
+
+ /* File size is calculated by looking at the data chunks if we have not
+ * seen an object header yet. Stop this practice once we find an object header.
+ */
+ endpos =
+ (tags.chunkId -
+ 1) * dev->nDataBytesPerChunk +
+ tags.byteCount;
+
+ if (!in->valid && /* have not got an object header yet */
+ in->variant.fileVariant.
+ scannedFileSize < endpos) {
+ in->variant.fileVariant.
+ scannedFileSize = endpos;
+ in->variant.fileVariant.
+ fileSize =
+ in->variant.fileVariant.
+ scannedFileSize;
+ }
+
+ } else if(in) {
+ /* This chunk has been invalidated by a resize, so delete */
+ yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
+
+ }
+ } else {
+ /* chunkId == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make the object
+ */
+ foundChunksInBlock = 1;
+
+ yaffs_SetChunkBit(dev, blk, c);
+ bi->pagesInUse++;
+
+ oh = NULL;
+ in = NULL;
+
+ if (tags.extraHeaderInfoAvailable) {
+ in = yaffs_FindOrCreateObjectByNumber
+ (dev, tags.objectId,
+ tags.extraObjectType);
+ }
+
+ if (!in ||
+#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
+ !in->valid ||
+#endif
+ tags.extraShadows ||
+ (!in->valid &&
+ (tags.objectId == YAFFS_OBJECTID_ROOT ||
+ tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))
+ ) {
+
+ /* If we don't have valid info then we need to read the chunk
+ * TODO In future we can probably defer reading the chunk and
+ * living with invalid data until needed.
+ */
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev,
+ chunk,
+ chunkData,
+ NULL);
+
+ oh = (yaffs_ObjectHeader *) chunkData;
+
+ if (!in)
+ in = yaffs_FindOrCreateObjectByNumber(dev, tags.objectId, oh->type);
+
+ }
+
+ if (!in) {
+ /* TODO Hoosterman we have a problem! */
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("yaffs tragedy: Could not make object for object %d "
+ "at chunk %d during scan"
+ TENDSTR), tags.objectId, chunk));
+
+ }
+
+ if (in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate that will be discarded, but
+ * we first have to suck out resize info if it is a file.
+ */
+
+ if ((in->variantType == YAFFS_OBJECT_TYPE_FILE) &&
+ ((oh &&
+ oh-> type == YAFFS_OBJECT_TYPE_FILE)||
+ (tags.extraHeaderInfoAvailable &&
+ tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))
+ ) {
+ __u32 thisSize =
+ (oh) ? oh->fileSize : tags.
+ extraFileLength;
+ __u32 parentObjectId =
+ (oh) ? oh->
+ parentObjectId : tags.
+ extraParentObjectId;
+ unsigned isShrink =
+ (oh) ? oh->isShrink : tags.
+ extraIsShrinkHeader;
+
+ /* If it is deleted (unlinked at start also means deleted)
+ * we treat the file size as being zeroed at this point.
+ */
+ if (parentObjectId ==
+ YAFFS_OBJECTID_DELETED
+ || parentObjectId ==
+ YAFFS_OBJECTID_UNLINKED) {
+ thisSize = 0;
+ isShrink = 1;
+ }
+
+ if (isShrink &&
+ in->variant.fileVariant.
+ shrinkSize > thisSize) {
+ in->variant.fileVariant.
+ shrinkSize =
+ thisSize;
+ }
+
+ if (isShrink) {
+ bi->hasShrinkHeader = 1;
+ }
+
+ }
+ /* Use existing - destroy this one. */
+ yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
+
+ }
+
+ if (!in->valid &&
+ (tags.objectId == YAFFS_OBJECTID_ROOT ||
+ tags.objectId ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+
+ if(oh) {
+ in->variantType = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+#ifdef CONFIG_YAFFS_WINCE
+ in->win_atime[0] = oh->win_atime[0];
+ in->win_ctime[0] = oh->win_ctime[0];
+ in->win_mtime[0] = oh->win_mtime[0];
+ in->win_atime[1] = oh->win_atime[1];
+ in->win_ctime[1] = oh->win_ctime[1];
+ in->win_mtime[1] = oh->win_mtime[1];
+#else
+ in->yst_uid = oh->yst_uid;
+ in->yst_gid = oh->yst_gid;
+ in->yst_atime = oh->yst_atime;
+ in->yst_mtime = oh->yst_mtime;
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+
+#endif
+ } else {
+ in->variantType = tags.extraObjectType;
+ in->lazyLoaded = 1;
+ }
+
+ in->chunkId = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->chunkId = chunk;
+
+ if(oh) {
+ in->variantType = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+#ifdef CONFIG_YAFFS_WINCE
+ in->win_atime[0] = oh->win_atime[0];
+ in->win_ctime[0] = oh->win_ctime[0];
+ in->win_mtime[0] = oh->win_mtime[0];
+ in->win_atime[1] = oh->win_atime[1];
+ in->win_ctime[1] = oh->win_ctime[1];
+ in->win_mtime[1] = oh->win_mtime[1];
+#else
+ in->yst_uid = oh->yst_uid;
+ in->yst_gid = oh->yst_gid;
+ in->yst_atime = oh->yst_atime;
+ in->yst_mtime = oh->yst_mtime;
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+#endif
+
+ if (oh->shadowsObject > 0)
+ yaffs_HandleShadowedObject(dev,
+ oh->
+ shadowsObject,
+ 1);
+
+
+ yaffs_SetObjectName(in, oh->name);
+ parent =
+ yaffs_FindOrCreateObjectByNumber
+ (dev, oh->parentObjectId,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ fileSize = oh->fileSize;
+ isShrink = oh->isShrink;
+ equivalentObjectId = oh->equivalentObjectId;
+
+ }
+ else {
+ in->variantType = tags.extraObjectType;
+ parent =
+ yaffs_FindOrCreateObjectByNumber
+ (dev, tags.extraParentObjectId,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ fileSize = tags.extraFileLength;
+ isShrink = tags.extraIsShrinkHeader;
+ equivalentObjectId = tags.extraEquivalentObjectId;
+ in->lazyLoaded = 1;
+
+ }
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ if (parent->variantType ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variantType =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->variant.
+ directoryVariant.
+ children);
+ } else if (parent->variantType !=
+ YAFFS_OBJECT_TYPE_DIRECTORY)
+ {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("yaffs tragedy: attempting to use non-directory as"
+ " a directory in scan. Put in lost+found."
+ TENDSTR)));
+ parent = dev->lostNFoundDir;
+ }
+
+ yaffs_AddObjectToDirectory(parent, in);
+
+ itsUnlinked = (parent == dev->deletedDir) ||
+ (parent == dev->unlinkedDir);
+
+ if (isShrink) {
+ /* Mark the block as having a shrinkHeader */
+ bi->hasShrinkHeader = 1;
+ }
+
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent object is scanned
+ * we put them all in a list.
+ * After scanning is complete, we should have all the objects, so we run
+ * through this list and fix up all the chains.
+ */
+
+ switch (in->variantType) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+
+ if (in->variant.fileVariant.
+ scannedFileSize < fileSize) {
+ /* This covers the case where the file size is greater
+ * than where the data is
+ * This will happen if the file is resized to be larger
+ * than its current data extents.
+ */
+ in->variant.fileVariant.fileSize = fileSize;
+ in->variant.fileVariant.scannedFileSize =
+ in->variant.fileVariant.fileSize;
+ }
+
+ if (isShrink &&
+ in->variant.fileVariant.shrinkSize > fileSize) {
+ in->variant.fileVariant.shrinkSize = fileSize;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ if(!itsUnlinked) {
+ in->variant.hardLinkVariant.equivalentObjectId =
+ equivalentObjectId;
+ in->hardLinks.next =
+ (struct list_head *) hardList;
+ hardList = in;
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if(oh){
+ in->variant.symLinkVariant.alias =
+ yaffs_CloneString(oh->
+ alias);
+ if(!in->variant.symLinkVariant.alias)
+ alloc_failed = 1;
+ }
+ break;
+ }
+
+ }
+
+ }
+
+ } /* End of scanning for each chunk */
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ /* If we got this far while scanning, then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ bi->blockState = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pagesInUse == 0 &&
+ !bi->hasShrinkHeader &&
+ bi->blockState == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_BlockBecameDirty(dev, blk);
+ }
+
+ }
+
+ if (altBlockIndex)
+ YFREE_ALT(blockIndex);
+ else
+ YFREE(blockIndex);
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+ yaffs_HardlinkFixup(dev,hardList);
+
+
+ /*
+ * Sort out state of unlinked and deleted objects.
+ */
+ {
+ struct list_head *i;
+ struct list_head *n;
+
+ yaffs_Object *l;
+
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinkedDir->variant.directoryVariant.
+ children) {
+ if (i) {
+ l = list_entry(i, yaffs_Object, siblings);
+ yaffs_DestroyObject(l);
+ }
+ }
+
+ /* Soft delete all the deletedDir files */
+ list_for_each_safe(i, n,
+ &dev->deletedDir->variant.directoryVariant.
+ children) {
+ if (i) {
+ l = list_entry(i, yaffs_Object, siblings);
+ yaffs_DestroyObject(l);
+
+ }
+ }
+ }
+
+ yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+
+ if(alloc_failed){
+ return YAFFS_FAIL;
+ }
+
+ T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
+
+ return YAFFS_OK;
+}
+
+/*------------------------------ Directory Functions ----------------------------- */
+
+static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj)
+{
+ yaffs_Device *dev = obj->myDev;
+
+ if(dev && dev->removeObjectCallback)
+ dev->removeObjectCallback(obj);
+
+ list_del_init(&obj->siblings);
+ obj->parent = NULL;
+}
+
+
+static void yaffs_AddObjectToDirectory(yaffs_Object * directory,
+ yaffs_Object * obj)
+{
+
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: Trying to add an object to a null pointer directory"
+ TENDSTR)));
+ YBUG();
+ }
+ if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: Trying to add an object to a non-directory"
+ TENDSTR)));
+ YBUG();
+ }
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+ INIT_LIST_HEAD(&obj->siblings);
+
+ } else if (!list_empty(&obj->siblings)) {
+ /* If it is holed up somewhere else, un hook it */
+ yaffs_RemoveObjectFromDirectory(obj);
+ }
+ /* Now add it */
+ list_add(&obj->siblings, &directory->variant.directoryVariant.children);
+ obj->parent = directory;
+
+ if (directory == obj->myDev->unlinkedDir
+ || directory == obj->myDev->deletedDir) {
+ obj->unlinked = 1;
+ obj->myDev->nUnlinkedFiles++;
+ obj->renameAllowed = 0;
+ }
+}
+
+yaffs_Object *yaffs_FindObjectByName(yaffs_Object * directory,
+ const YCHAR * name)
+{
+ int sum;
+
+ struct list_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+ yaffs_Object *l;
+
+ if (!name) {
+ return NULL;
+ }
+
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: yaffs_FindObjectByName: null pointer directory"
+ TENDSTR)));
+ YBUG();
+ }
+ if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
+ YBUG();
+ }
+
+ sum = yaffs_CalcNameSum(name);
+
+ list_for_each(i, &directory->variant.directoryVariant.children) {
+ if (i) {
+ l = list_entry(i, yaffs_Object, siblings);
+
+ yaffs_CheckObjectDetailsLoaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0) {
+ return l;
+ }
+ } else if (yaffs_SumCompare(l->sum, sum) || l->chunkId <= 0)
+ {
+ /* LostnFound cunk called Objxxx
+ * Do a real check
+ */
+ yaffs_GetObjectName(l, buffer,
+ YAFFS_MAX_NAME_LENGTH);
+ if (yaffs_strncmp(name, buffer,YAFFS_MAX_NAME_LENGTH) == 0) {
+ return l;
+ }
+
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+#if 0
+int yaffs_ApplyToDirectoryChildren(yaffs_Object * theDir,
+ int (*fn) (yaffs_Object *))
+{
+ struct list_head *i;
+ yaffs_Object *l;
+
+ if (!theDir) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: yaffs_FindObjectByName: null pointer directory"
+ TENDSTR)));
+ YBUG();
+ }
+ if (theDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
+ YBUG();
+ }
+
+ list_for_each(i, &theDir->variant.directoryVariant.children) {
+ if (i) {
+ l = list_entry(i, yaffs_Object, siblings);
+ if (l && !fn(l)) {
+ return YAFFS_FAIL;
+ }
+ }
+ }
+
+ return YAFFS_OK;
+
+}
+#endif
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object * obj)
+{
+ if (obj && obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ /* We want the object id of the equivalent object, not this one */
+ obj = obj->variant.hardLinkVariant.equivalentObject;
+ yaffs_CheckObjectDetailsLoaded(obj);
+ }
+ return obj;
+
+}
+
+int yaffs_GetObjectName(yaffs_Object * obj, YCHAR * name, int buffSize)
+{
+ memset(name, 0, buffSize * sizeof(YCHAR));
+
+ yaffs_CheckObjectDetailsLoaded(obj);
+
+ if (obj->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
+ yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffSize - 1);
+ } else if (obj->chunkId <= 0) {
+ YCHAR locName[20];
+ /* make up a name */
+ yaffs_sprintf(locName, _Y("%s%d"), YAFFS_LOSTNFOUND_PREFIX,
+ obj->objectId);
+ yaffs_strncpy(name, locName, buffSize - 1);
+
+ }
+#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+ else if (obj->shortName[0]) {
+ yaffs_strcpy(name, obj->shortName);
+ }
+#endif
+ else {
+ int result;
+ __u8 *buffer = yaffs_GetTempBuffer(obj->myDev, __LINE__);
+
+ yaffs_ObjectHeader *oh = (yaffs_ObjectHeader *) buffer;
+
+ memset(buffer, 0, obj->myDev->nDataBytesPerChunk);
+
+ if (obj->chunkId >= 0) {
+ result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
+ obj->chunkId, buffer,
+ NULL);
+ }
+ yaffs_strncpy(name, oh->name, buffSize - 1);
+
+ yaffs_ReleaseTempBuffer(obj->myDev, buffer, __LINE__);
+ }
+
+ return yaffs_strlen(name);
+}
+
+int yaffs_GetObjectFileLength(yaffs_Object * obj)
+{
+
+ /* Dereference any hard linking */
+ obj = yaffs_GetEquivalentObject(obj);
+
+ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+ return obj->variant.fileVariant.fileSize;
+ }
+ if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
+ return yaffs_strlen(obj->variant.symLinkVariant.alias);
+ } else {
+ /* Only a directory should drop through to here */
+ return obj->myDev->nDataBytesPerChunk;
+ }
+}
+
+int yaffs_GetObjectLinkCount(yaffs_Object * obj)
+{
+ int count = 0;
+ struct list_head *i;
+
+ if (!obj->unlinked) {
+ count++; /* the object itself */
+ }
+ list_for_each(i, &obj->hardLinks) {
+ count++; /* add the hard links; */
+ }
+ return count;
+
+}
+
+int yaffs_GetObjectInode(yaffs_Object * obj)
+{
+ obj = yaffs_GetEquivalentObject(obj);
+
+ return obj->objectId;
+}
+
+unsigned yaffs_GetObjectType(yaffs_Object * obj)
+{
+ obj = yaffs_GetEquivalentObject(obj);
+
+ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return DT_DIR;
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return DT_LNK;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ if (S_ISFIFO(obj->yst_mode))
+ return DT_FIFO;
+ if (S_ISCHR(obj->yst_mode))
+ return DT_CHR;
+ if (S_ISBLK(obj->yst_mode))
+ return DT_BLK;
+ if (S_ISSOCK(obj->yst_mode))
+ return DT_SOCK;
+ default:
+ return DT_REG;
+ break;
+ }
+}
+
+YCHAR *yaffs_GetSymlinkAlias(yaffs_Object * obj)
+{
+ obj = yaffs_GetEquivalentObject(obj);
+ if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
+ return yaffs_CloneString(obj->variant.symLinkVariant.alias);
+ } else {
+ return yaffs_CloneString(_Y(""));
+ }
+}
+
+#ifndef CONFIG_YAFFS_WINCE
+
+int yaffs_SetAttributes(yaffs_Object * obj, struct iattr *attr)
+{
+ unsigned int valid = attr->ia_valid;
+
+ if (valid & ATTR_MODE)
+ obj->yst_mode = attr->ia_mode;
+ if (valid & ATTR_UID)
+ obj->yst_uid = attr->ia_uid;
+ if (valid & ATTR_GID)
+ obj->yst_gid = attr->ia_gid;
+
+ if (valid & ATTR_ATIME)
+ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+ if (valid & ATTR_CTIME)
+ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+ if (valid & ATTR_MTIME)
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+ yaffs_ResizeFile(obj, attr->ia_size);
+
+ yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
+
+ return YAFFS_OK;
+
+}
+int yaffs_GetAttributes(yaffs_Object * obj, struct iattr *attr)
+{
+ unsigned int valid = 0;
+
+ attr->ia_mode = obj->yst_mode;
+ valid |= ATTR_MODE;
+ attr->ia_uid = obj->yst_uid;
+ valid |= ATTR_UID;
+ attr->ia_gid = obj->yst_gid;
+ valid |= ATTR_GID;
+
+ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+ valid |= ATTR_ATIME;
+ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+ valid |= ATTR_CTIME;
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+ attr->ia_size = yaffs_GetFileSize(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+
+}
+
+#endif
+
+#if 0
+int yaffs_DumpObject(yaffs_Object * obj)
+{
+ YCHAR name[257];
+
+ yaffs_GetObjectName(obj, name, 256);
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("Object %d, inode %d \"%s\"\n dirty %d valid %d serial %d sum %d"
+ " chunk %d type %d size %d\n"
+ TENDSTR), obj->objectId, yaffs_GetObjectInode(obj), name,
+ obj->dirty, obj->valid, obj->serial, obj->sum, obj->chunkId,
+ yaffs_GetObjectType(obj), yaffs_GetObjectFileLength(obj)));
+
+ return YAFFS_OK;
+}
+#endif
+
+/*---------------------------- Initialisation code -------------------------------------- */
+
+static int yaffs_CheckDevFunctions(const yaffs_Device * dev)
+{
+
+ /* Common functions, gotta have */
+ if (!dev->eraseBlockInNAND || !dev->initialiseNAND)
+ return 0;
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+ /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
+ if (dev->writeChunkWithTagsToNAND &&
+ dev->readChunkWithTagsFromNAND &&
+ !dev->writeChunkToNAND &&
+ !dev->readChunkFromNAND &&
+ dev->markNANDBlockBad && dev->queryNANDBlock)
+ return 1;
+#endif
+
+ /* Can use the "spare" style interface for yaffs1 */
+ if (!dev->isYaffs2 &&
+ !dev->writeChunkWithTagsToNAND &&
+ !dev->readChunkWithTagsFromNAND &&
+ dev->writeChunkToNAND &&
+ dev->readChunkFromNAND &&
+ !dev->markNANDBlockBad && !dev->queryNANDBlock)
+ return 1;
+
+ return 0; /* bad */
+}
+
+
+static int yaffs_CreateInitialDirectories(yaffs_Device *dev)
+{
+ /* Initialise the unlinked, deleted, root and lost and found directories */
+
+ dev->lostNFoundDir = dev->rootDir = NULL;
+ dev->unlinkedDir = dev->deletedDir = NULL;
+
+ dev->unlinkedDir =
+ yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+
+ dev->deletedDir =
+ yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+
+ dev->rootDir =
+ yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+ dev->lostNFoundDir =
+ yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+ if(dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir){
+ yaffs_AddObjectToDirectory(dev->rootDir, dev->lostNFoundDir);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_GutsInitialise(yaffs_Device * dev)
+{
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+ T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_GutsInitialise()" TENDSTR)));
+
+ /* Check stuff that must be set */
+
+ if (!dev) {
+ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Need a device" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ dev->internalStartBlock = dev->startBlock;
+ dev->internalEndBlock = dev->endBlock;
+ dev->blockOffset = 0;
+ dev->chunkOffset = 0;
+ dev->nFreeChunks = 0;
+
+ if (dev->startBlock == 0) {
+ dev->internalStartBlock = dev->startBlock + 1;
+ dev->internalEndBlock = dev->endBlock + 1;
+ dev->blockOffset = 1;
+ dev->chunkOffset = dev->nChunksPerBlock;
+ }
+
+ /* Check geometry parameters. */
+
+ if ((dev->isYaffs2 && dev->nDataBytesPerChunk < 1024) ||
+ (!dev->isYaffs2 && dev->nDataBytesPerChunk != 512) ||
+ dev->nChunksPerBlock < 2 ||
+ dev->nReservedBlocks < 2 ||
+ dev->internalStartBlock <= 0 ||
+ dev->internalEndBlock <= 0 ||
+ dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2) // otherwise it is too small
+ ) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s "
+ TENDSTR), dev->nDataBytesPerChunk, dev->isYaffs2 ? "2" : ""));
+ return YAFFS_FAIL;
+ }
+
+ if (yaffs_InitialiseNAND(dev) != YAFFS_OK) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: InitialiseNAND failed" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ /* Got the right mix of functions? */
+ if (!yaffs_CheckDevFunctions(dev)) {
+ /* Function missing */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("yaffs: device function(s) missing or wrong\n" TENDSTR)));
+
+ return YAFFS_FAIL;
+ }
+
+ /* This is really a compilation check. */
+ if (!yaffs_CheckStructures()) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs_CheckStructures failed\n" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ if (dev->isMounted) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: device already mounted\n" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ /* Finished with most checks. One or two more checks happen later on too. */
+
+ dev->isMounted = 1;
+
+
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ /* Start off assuming it is a power of 2 */
+ dev->chunkShift = ShiftDiv(dev->nDataBytesPerChunk);
+ dev->chunkMask = (1<<dev->chunkShift) - 1;
+
+ if(dev->nDataBytesPerChunk == (dev->chunkMask + 1)){
+ /* Yes it is a power of 2, disable crumbs */
+ dev->crumbMask = 0;
+ dev->crumbShift = 0;
+ dev->crumbsPerChunk = 0;
+ } else {
+ /* Not a power of 2, use crumbs instead */
+ dev->crumbShift = ShiftDiv(sizeof(yaffs_PackedTags2TagsPart));
+ dev->crumbMask = (1<<dev->crumbShift)-1;
+ dev->crumbsPerChunk = dev->nDataBytesPerChunk/(1 << dev->crumbShift);
+ dev->chunkShift = 0;
+ dev->chunkMask = 0;
+ }
+
+
+ /*
+ * Calculate chunkGroupBits.
+ * We need to find the next power of 2 > than internalEndBlock
+ */
+
+ x = dev->nChunksPerBlock * (dev->internalEndBlock + 1);
+
+ bits = ShiftsGE(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+ if(!dev->wideTnodesDisabled){
+ /* bits must be even so that we end up with 32-bit words */
+ if(bits & 1)
+ bits++;
+ if(bits < 16)
+ dev->tnodeWidth = 16;
+ else
+ dev->tnodeWidth = bits;
+ }
+ else
+ dev->tnodeWidth = 16;
+
+ dev->tnodeMask = (1<<dev->tnodeWidth)-1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+ * to figure out chunk shift and chunkGroupSize
+ */
+
+ if (bits <= dev->tnodeWidth)
+ dev->chunkGroupBits = 0;
+ else
+ dev->chunkGroupBits = bits - dev->tnodeWidth;
+
+
+ dev->chunkGroupSize = 1 << dev->chunkGroupBits;
+
+ if (dev->nChunksPerBlock < dev->chunkGroupSize) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+ */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: chunk group too large\n" TENDSTR)));
+
+ return YAFFS_FAIL;
+ }
+
+ /* OK, we've finished verifying the device, lets continue with initialisation */
+
+ /* More device initialisation */
+ dev->garbageCollections = 0;
+ dev->passiveGarbageCollections = 0;
+ dev->currentDirtyChecker = 0;
+ dev->bufferedBlock = -1;
+ dev->doingBufferedBlockRewrite = 0;
+ dev->nDeletedFiles = 0;
+ dev->nBackgroundDeletions = 0;
+ dev->nUnlinkedFiles = 0;
+ dev->eccFixed = 0;
+ dev->eccUnfixed = 0;
+ dev->tagsEccFixed = 0;
+ dev->tagsEccUnfixed = 0;
+ dev->nErasureFailures = 0;
+ dev->nErasedBlocks = 0;
+ dev->isDoingGC = 0;
+ dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
+
+ /* Initialise temporary buffers and caches. */
+ if(!yaffs_InitialiseTempBuffers(dev))
+ init_failed = 1;
+
+ dev->srCache = NULL;
+ dev->gcCleanupList = NULL;
+
+
+ if (!init_failed &&
+ dev->nShortOpCaches > 0) {
+ int i;
+ __u8 *buf;
+ int srCacheBytes = dev->nShortOpCaches * sizeof(yaffs_ChunkCache);
+
+ if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES) {
+ dev->nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
+ }
+
+ buf = dev->srCache = YMALLOC(srCacheBytes);
+
+ if(dev->srCache)
+ memset(dev->srCache,0,srCacheBytes);
+
+ for (i = 0; i < dev->nShortOpCaches && buf; i++) {
+ dev->srCache[i].object = NULL;
+ dev->srCache[i].lastUse = 0;
+ dev->srCache[i].dirty = 0;
+ dev->srCache[i].data = buf = YMALLOC_DMA(dev->nDataBytesPerChunk);
+ }
+ if(!buf)
+ init_failed = 1;
+
+ dev->srLastUse = 0;
+ }
+
+ dev->cacheHits = 0;
+
+ if(!init_failed){
+ dev->gcCleanupList = YMALLOC(dev->nChunksPerBlock * sizeof(__u32));
+ if(!dev->gcCleanupList)
+ init_failed = 1;
+ }
+
+ if (dev->isYaffs2) {
+ dev->useHeaderFileSize = 1;
+ }
+ if(!init_failed && !yaffs_InitialiseBlocks(dev))
+ init_failed = 1;
+
+ yaffs_InitialiseTnodes(dev);
+ yaffs_InitialiseObjects(dev);
+
+ if(!init_failed && !yaffs_CreateInitialDirectories(dev))
+ init_failed = 1;
+
+
+ if(!init_failed){
+ /* Now scan the flash. */
+ if (dev->isYaffs2) {
+ if(yaffs_CheckpointRestore(dev)) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: restored from checkpoint" TENDSTR)));
+ } else {
+
+ /* Clean up the mess caused by an aborted checkpoint load
+ * and scan backwards.
+ */
+ yaffs_DeinitialiseBlocks(dev);
+ yaffs_DeinitialiseTnodes(dev);
+ yaffs_DeinitialiseObjects(dev);
+
+
+ dev->nErasedBlocks = 0;
+ dev->nFreeChunks = 0;
+ dev->allocationBlock = -1;
+ dev->allocationPage = -1;
+ dev->nDeletedFiles = 0;
+ dev->nUnlinkedFiles = 0;
+ dev->nBackgroundDeletions = 0;
+ dev->oldestDirtySequence = 0;
+
+ if(!init_failed && !yaffs_InitialiseBlocks(dev))
+ init_failed = 1;
+
+ yaffs_InitialiseTnodes(dev);
+ yaffs_InitialiseObjects(dev);
+
+ if(!init_failed && !yaffs_CreateInitialDirectories(dev))
+ init_failed = 1;
+
+ if(!init_failed && !yaffs_ScanBackwards(dev))
+ init_failed = 1;
+ }
+ }else
+ if(!yaffs_Scan(dev))
+ init_failed = 1;
+ }
+
+ if(init_failed){
+ /* Clean up the mess */
+ T(YAFFS_TRACE_TRACING,
+ (TSTR("yaffs: yaffs_GutsInitialise() aborted.\n" TENDSTR)));
+
+ yaffs_Deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+ dev->nPageReads = 0;
+ dev->nPageWrites = 0;
+ dev->nBlockErasures = 0;
+ dev->nGCCopies = 0;
+ dev->nRetriedWrites = 0;
+
+ dev->nRetiredBlocks = 0;
+
+ yaffs_VerifyFreeChunks(dev);
+ yaffs_VerifyBlocks(dev);
+
+
+ T(YAFFS_TRACE_TRACING,
+ (TSTR("yaffs: yaffs_GutsInitialise() done.\n" TENDSTR)));
+ return YAFFS_OK;
+
+}
+
+void yaffs_Deinitialise(yaffs_Device * dev)
+{
+ if (dev->isMounted) {
+ int i;
+
+ yaffs_DeinitialiseBlocks(dev);
+ yaffs_DeinitialiseTnodes(dev);
+ yaffs_DeinitialiseObjects(dev);
+ if (dev->nShortOpCaches > 0 &&
+ dev->srCache) {
+
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if(dev->srCache[i].data)
+ YFREE(dev->srCache[i].data);
+ dev->srCache[i].data = NULL;
+ }
+
+ YFREE(dev->srCache);
+ dev->srCache = NULL;
+ }
+
+ YFREE(dev->gcCleanupList);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ YFREE(dev->tempBuffer[i].buffer);
+ }
+
+ dev->isMounted = 0;
+ }
+
+}
+
+static int yaffs_CountFreeChunks(yaffs_Device * dev)
+{
+ int nFree;
+ int b;
+
+ yaffs_BlockInfo *blk;
+
+ for (nFree = 0, b = dev->internalStartBlock; b <= dev->internalEndBlock;
+ b++) {
+ blk = yaffs_GetBlockInfo(dev, b);
+
+ switch (blk->blockState) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ nFree +=
+ (dev->nChunksPerBlock - blk->pagesInUse +
+ blk->softDeletions);
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ return nFree;
+}
+
+int yaffs_GetNumberOfFreeChunks(yaffs_Device * dev)
+{
+ /* This is what we report to the outside world */
+
+ int nFree;
+ int nDirtyCacheChunks;
+ int blocksForCheckpoint;
+
+#if 1
+ nFree = dev->nFreeChunks;
+#else
+ nFree = yaffs_CountFreeChunks(dev);
+#endif
+
+ nFree += dev->nDeletedFiles;
+
+ /* Now count the number of dirty chunks in the cache and subtract those */
+
+ {
+ int i;
+ for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
+ if (dev->srCache[i].dirty)
+ nDirtyCacheChunks++;
+ }
+ }
+
+ nFree -= nDirtyCacheChunks;
+
+ nFree -= ((dev->nReservedBlocks + 1) * dev->nChunksPerBlock);
+
+ /* Now we figure out how much to reserve for the checkpoint and report that... */
+ blocksForCheckpoint = dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint;
+ if(blocksForCheckpoint < 0)
+ blocksForCheckpoint = 0;
+
+ nFree -= (blocksForCheckpoint * dev->nChunksPerBlock);
+
+ if (nFree < 0)
+ nFree = 0;
+
+ return nFree;
+
+}
+
+static int yaffs_freeVerificationFailures;
+
+static void yaffs_VerifyFreeChunks(yaffs_Device * dev)
+{
+ int counted;
+ int difference;
+
+ if(yaffs_SkipVerification(dev))
+ return;
+
+ counted = yaffs_CountFreeChunks(dev);
+
+ difference = dev->nFreeChunks - counted;
+
+ if (difference) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("Freechunks verification failure %d %d %d" TENDSTR),
+ dev->nFreeChunks, counted, difference));
+ yaffs_freeVerificationFailures++;
+ }
+}
+
+/*---------------------------------------- YAFFS test code ----------------------*/
+
+#define yaffs_CheckStruct(structure,syze, name) \
+ if(sizeof(structure) != syze) \
+ { \
+ T(YAFFS_TRACE_ALWAYS,(TSTR("%s should be %d but is %d\n" TENDSTR),\
+ name,syze,sizeof(structure))); \
+ return YAFFS_FAIL; \
+ }
+
+static int yaffs_CheckStructures(void)
+{
+/* yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags") */
+/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion") */
+/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare") */
+#ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG
+ yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode")
+#endif
+ yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader")
+
+ return YAFFS_OK;
+}
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_guts.h b/target/linux/generic/files/fs/yaffs2/yaffs_guts.h
new file mode 100644
index 000000000..ea06c1a38
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,902 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "devextras.h"
+#include "yportenv.h"
+
+#define YAFFS_OK 1
+#define YAFFS_FAIL 0
+
+/* Give us a Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xFF
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC 0x5941FF53
+
+#define YAFFS_NTNODES_LEVEL0 16
+#define YAFFS_TNODES_LEVEL0_BITS 4
+#define YAFFS_TNODES_LEVEL0_MASK 0xf
+
+#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK 0x7
+#define YAFFS_TNODES_MAX_LEVEL 6
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+#define YAFFS_BYTES_PER_SPARE 16
+#define YAFFS_BYTES_PER_CHUNK 512
+#define YAFFS_CHUNK_SIZE_SHIFT 9
+#define YAFFS_CHUNKS_PER_BLOCK 32
+#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+#endif
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
+
+#define YAFFS_MAX_CHUNK_ID 0x000FFFFF
+
+#define YAFFS_UNUSED_OBJECT_ID 0x0003FFFF
+
+#define YAFFS_ALLOCATION_NOBJECTS 100
+#define YAFFS_ALLOCATION_NTNODES 100
+#define YAFFS_ALLOCATION_NLINKS 100
+
+#define YAFFS_NOBJECT_BUCKETS 256
+
+
+#define YAFFS_OBJECT_SPACE 0x40000
+
+#define YAFFS_CHECKPOINT_VERSION 3
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH 127
+#define YAFFS_MAX_ALIAS_LENGTH 79
+#else
+#define YAFFS_MAX_NAME_LENGTH 255
+#define YAFFS_MAX_ALIAS_LENGTH 159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH 15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT 1
+#define YAFFS_OBJECTID_LOSTNFOUND 2
+#define YAFFS_OBJECTID_UNLINKED 3
+#define YAFFS_OBJECTID_DELETED 4
+
+/* Sseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_SB_HEADER 0x10
+#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+/* */
+
+#define YAFFS_MAX_SHORT_OP_CACHES 20
+
+#define YAFFS_N_TEMP_BUFFERS 4
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS (5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks per second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xEFFFFF00
+
+/* ChunkCache is used for short read/write operations.*/
+typedef struct {
+ struct yaffs_ObjectStruct *object;
+ int chunkId;
+ int lastUse;
+ int dirty;
+ int nBytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+#ifdef CONFIG_YAFFS_YAFFS2
+ __u8 *data;
+#else
+ __u8 data[YAFFS_BYTES_PER_CHUNK];
+#endif
+} yaffs_ChunkCache;
+
+
+
+/* Tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary otherwise
+ * the structure size will get blown out.
+ */
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+typedef struct {
+ unsigned chunkId:20;
+ unsigned serialNumber:2;
+ unsigned byteCount:10;
+ unsigned objectId:18;
+ unsigned ecc:12;
+ unsigned unusedStuff:2;
+
+} yaffs_Tags;
+
+typedef union {
+ yaffs_Tags asTags;
+ __u8 asBytes[8];
+} yaffs_TagsUnion;
+
+#endif
+
+/* Stuff used for extended tags in YAFFS2 */
+
+typedef enum {
+ YAFFS_ECC_RESULT_UNKNOWN,
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+} yaffs_ECCResult;
+
+typedef enum {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+ YAFFS_OBJECT_TYPE_FILE,
+ YAFFS_OBJECT_TYPE_SYMLINK,
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+} yaffs_ObjectType;
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+typedef struct {
+
+ unsigned validMarker0;
+ unsigned chunkUsed; /* Status of the chunk: used or unused */
+ unsigned objectId; /* If 0 then this is not part of an object (unused) */
+ unsigned chunkId; /* If 0 then this is a header, else a data chunk */
+ unsigned byteCount; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+ yaffs_ECCResult eccResult;
+ unsigned blockBad;
+
+ /* YAFFS 1 stuff */
+ unsigned chunkDeleted; /* The chunk is marked deleted */
+ unsigned serialNumber; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+ unsigned sequenceNumber; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+ unsigned extraHeaderInfoAvailable; /* There is extra info available if this is not zero */
+ unsigned extraParentObjectId; /* The parent object */
+ unsigned extraIsShrinkHeader; /* Is it a shrink header? */
+ unsigned extraShadows; /* Does this shadow another object? */
+
+ yaffs_ObjectType extraObjectType; /* What object type? */
+
+ unsigned extraFileLength; /* Length if it is a file */
+ unsigned extraEquivalentObjectId; /* Equivalent object Id if it is a hard link */
+
+ unsigned validMarker1;
+
+} yaffs_ExtendedTags;
+
+/* Spare structure for YAFFS1 */
+typedef struct {
+ __u8 tagByte0;
+ __u8 tagByte1;
+ __u8 tagByte2;
+ __u8 tagByte3;
+ __u8 pageStatus; /* set to 0 to delete the chunk */
+ __u8 blockStatus;
+ __u8 tagByte4;
+ __u8 tagByte5;
+ __u8 ecc1[3];
+ __u8 tagByte6;
+ __u8 tagByte7;
+ __u8 ecc2[3];
+} yaffs_Spare;
+
+/*Special structure for passing through to mtd */
+struct yaffs_NANDSpare {
+ yaffs_Spare spare;
+ int eccres1;
+ int eccres2;
+};
+
+/* Block data in RAM */
+
+typedef enum {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING,
+ /* The block might have something on it (ie it is allocating or full, perhaps empty)
+ * but it needs to be scanned to determine its true state.
+ * This state is only valid during yaffs_Scan.
+ * NB We tolerate empty because the pre-scanner might be incapable of deciding
+ * However, if this state is returned on a YAFFS2 device, then we expect a sequence number
+ */
+
+ YAFFS_BLOCK_STATE_EMPTY,
+ /* This block is empty */
+
+ YAFFS_BLOCK_STATE_ALLOCATING,
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+ * allocation. Should never be more than one of these
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+ /* All pages have been allocated and deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+ /* This block is assigned to holding checkpoint data.
+ */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+} yaffs_BlockState;
+
+#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+
+typedef struct {
+
+ int softDeletions:10; /* number of soft deleted pages */
+ int pagesInUse:10; /* number of pages in use */
+ yaffs_BlockState blockState:4; /* One of the above block states */
+ __u32 needsRetiring:1; /* Data has failed on this block, need to get valid data off */
+ /* and retire the block. */
+ __u32 skipErasedCheck: 1; /* If this is set we can skip the erased check on this block */
+ __u32 gcPrioritise: 1; /* An ECC check or blank check has failed on this block.
+ It should be prioritised for GC */
+ __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+#ifdef CONFIG_YAFFS_YAFFS2
+ __u32 hasShrinkHeader:1; /* This block has at least one shrink object header */
+ __u32 sequenceNumber; /* block sequence number for yaffs2 */
+#endif
+
+} yaffs_BlockInfo;
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+typedef struct {
+ yaffs_ObjectType type;
+
+ /* Apply to everything */
+ int parentObjectId;
+ __u16 sum__NoLongerUsed; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* Thes following apply to directories, files, symlinks - not hard links */
+ __u32 yst_mode; /* protection */
+
+#ifdef CONFIG_YAFFS_WINCE
+ __u32 notForWinCE[5];
+#else
+ __u32 yst_uid;
+ __u32 yst_gid;
+ __u32 yst_atime;
+ __u32 yst_mtime;
+ __u32 yst_ctime;
+#endif
+
+ /* File size applies to files only */
+ int fileSize;
+
+ /* Equivalent object id applies to hard links only. */
+ int equivalentObjectId;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+ __u32 yst_rdev; /* device stuff for block and char devices (major/min) */
+
+#ifdef CONFIG_YAFFS_WINCE
+ __u32 win_ctime[2];
+ __u32 win_atime[2];
+ __u32 win_mtime[2];
+ __u32 roomToGrow[4];
+#else
+ __u32 roomToGrow[10];
+#endif
+
+ int shadowsObject; /* This object header shadows the specified object if > 0 */
+
+ /* isShrink applies to object headers written when we shrink the file (ie resize) */
+ __u32 isShrink;
+
+} yaffs_ObjectHeader;
+
+/*--------------------------- Tnode -------------------------- */
+
+union yaffs_Tnode_union {
+#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+ union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL + 1];
+#else
+ union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL];
+#endif
+/* __u16 level0[YAFFS_NTNODES_LEVEL0]; */
+
+};
+
+typedef union yaffs_Tnode_union yaffs_Tnode;
+
+struct yaffs_TnodeList_struct {
+ struct yaffs_TnodeList_struct *next;
+ yaffs_Tnode *tnodes;
+};
+
+typedef struct yaffs_TnodeList_struct yaffs_TnodeList;
+
+/*------------------------ Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+typedef struct {
+ __u32 fileSize;
+ __u32 scannedFileSize;
+ __u32 shrinkSize;
+ int topLevel;
+ yaffs_Tnode *top;
+} yaffs_FileStructure;
+
+typedef struct {
+ struct list_head children; /* list of child links */
+} yaffs_DirectoryStructure;
+
+typedef struct {
+ YCHAR *alias;
+} yaffs_SymLinkStructure;
+
+typedef struct {
+ struct yaffs_ObjectStruct *equivalentObject;
+ __u32 equivalentObjectId;
+} yaffs_HardLinkStructure;
+
+typedef union {
+ yaffs_FileStructure fileVariant;
+ yaffs_DirectoryStructure directoryVariant;
+ yaffs_SymLinkStructure symLinkVariant;
+ yaffs_HardLinkStructure hardLinkVariant;
+} yaffs_ObjectVariant;
+
+struct yaffs_ObjectStruct {
+ __u8 deleted:1; /* This should only apply to unlinked files. */
+ __u8 softDeleted:1; /* it has also been soft deleted */
+ __u8 unlinked:1; /* An unlinked file. The file should be in the unlinked directory.*/
+ __u8 fake:1; /* A fake object has no presence on NAND. */
+ __u8 renameAllowed:1; /* Some objects are not allowed to be renamed. */
+ __u8 unlinkAllowed:1;
+ __u8 dirty:1; /* the object needs to be written to flash */
+ __u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available (ie. file data records appear before the header).
+ */
+ __u8 lazyLoaded:1; /* This object has been lazy loaded and is missing some detail */
+
+ __u8 deferedFree:1; /* For Linux kernel. Object is removed from NAND, but is
+ * still in the inode cache. Free of object is defered.
+ * until the inode is released.
+ */
+
+ __u8 serial; /* serial number of chunk in NAND. Cached here */
+ __u16 sum; /* sum of the name to speed searching */
+
+ struct yaffs_DeviceStruct *myDev; /* The device I'm on */
+
+ struct list_head hashLink; /* list of objects in this hash bucket */
+
+ struct list_head hardLinks; /* all the equivalent hard linked objects */
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_ObjectStruct *parent;
+ struct list_head siblings;
+
+ /* Where's my object header in NAND? */
+ int chunkId;
+
+ int nDataChunks; /* Number of data chunks attached to the file. */
+
+ __u32 objectId; /* the object id value */
+
+ __u32 yst_mode;
+
+#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+ YCHAR shortName[YAFFS_SHORT_NAME_LENGTH + 1];
+#endif
+
+#ifndef __KERNEL__
+ __u32 inUse;
+#endif
+
+#ifdef CONFIG_YAFFS_WINCE
+ __u32 win_ctime[2];
+ __u32 win_mtime[2];
+ __u32 win_atime[2];
+#else
+ __u32 yst_uid;
+ __u32 yst_gid;
+ __u32 yst_atime;
+ __u32 yst_mtime;
+ __u32 yst_ctime;
+#endif
+
+ __u32 yst_rdev;
+
+#ifdef __KERNEL__
+ struct inode *myInode;
+
+#endif
+
+ yaffs_ObjectType variantType;
+
+ yaffs_ObjectVariant variant;
+
+};
+
+typedef struct yaffs_ObjectStruct yaffs_Object;
+
+struct yaffs_ObjectList_struct {
+ yaffs_Object *objects;
+ struct yaffs_ObjectList_struct *next;
+};
+
+typedef struct yaffs_ObjectList_struct yaffs_ObjectList;
+
+typedef struct {
+ struct list_head list;
+ int count;
+} yaffs_ObjectBucket;
+
+
+/* yaffs_CheckpointObject holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+typedef struct {
+ int structType;
+ __u32 objectId;
+ __u32 parentId;
+ int chunkId;
+
+ yaffs_ObjectType variantType:3;
+ __u8 deleted:1;
+ __u8 softDeleted:1;
+ __u8 unlinked:1;
+ __u8 fake:1;
+ __u8 renameAllowed:1;
+ __u8 unlinkAllowed:1;
+ __u8 serial;
+
+ int nDataChunks;
+ __u32 fileSizeOrEquivalentObjectId;
+
+}yaffs_CheckpointObject;
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few
+ */
+
+typedef struct {
+ __u8 *buffer;
+ int line; /* track from whence this buffer was allocated */
+ int maxLine;
+} yaffs_TempBuffer;
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_DeviceStruct {
+ struct list_head devList;
+ const char *name;
+
+ /* Entry parameters set up way early. Yaffs sets up the rest.*/
+ int nDataBytesPerChunk; /* Should be a power of 2 >= 512 */
+ int nChunksPerBlock; /* does not need to be a power of 2 */
+ int nBytesPerSpare; /* spare area size */
+ int startBlock; /* Start block we're allowed to use */
+ int endBlock; /* End block we're allowed to use */
+ int nReservedBlocks; /* We want this tuneable so that we can reduce */
+ /* reserved blocks on NOR and RAM. */
+
+
+ /* Stuff used by the shared space checkpointing mechanism */
+ /* If this value is zero, then this mechanism is disabled */
+
+ int nCheckpointReservedBlocks; /* Blocks to reserve for checkpoint data */
+
+
+
+
+ int nShortOpCaches; /* If <= 0, then short op caching is disabled, else
+ * the number of short op caches (don't use too many)
+ */
+
+ int useHeaderFileSize; /* Flag to determine if we should use file sizes from the header */
+
+ int useNANDECC; /* Flag to decide whether or not to use NANDECC */
+
+ void *genericDevice; /* Pointer to device context
+ * On an mtd this holds the mtd pointer.
+ */
+ void *superBlock;
+
+ /* NAND access functions (Must be set before calling YAFFS)*/
+
+ int (*writeChunkToNAND) (struct yaffs_DeviceStruct * dev,
+ int chunkInNAND, const __u8 * data,
+ const yaffs_Spare * spare);
+ int (*readChunkFromNAND) (struct yaffs_DeviceStruct * dev,
+ int chunkInNAND, __u8 * data,
+ yaffs_Spare * spare);
+ int (*eraseBlockInNAND) (struct yaffs_DeviceStruct * dev,
+ int blockInNAND);
+ int (*initialiseNAND) (struct yaffs_DeviceStruct * dev);
+
+#ifdef CONFIG_YAFFS_YAFFS2
+ int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct * dev,
+ int chunkInNAND, const __u8 * data,
+ const yaffs_ExtendedTags * tags);
+ int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct * dev,
+ int chunkInNAND, __u8 * data,
+ yaffs_ExtendedTags * tags);
+ int (*markNANDBlockBad) (struct yaffs_DeviceStruct * dev, int blockNo);
+ int (*queryNANDBlock) (struct yaffs_DeviceStruct * dev, int blockNo,
+ yaffs_BlockState * state, int *sequenceNumber);
+#endif
+
+ int isYaffs2;
+
+ /* The removeObjectCallback function must be supplied by OS flavours that
+ * need it. The Linux kernel does not use this, but yaffs direct does use
+ * it to implement the faster readdir
+ */
+ void (*removeObjectCallback)(struct yaffs_ObjectStruct *obj);
+
+ /* Callback to mark the superblock dirsty */
+ void (*markSuperBlockDirty)(void * superblock);
+
+ int wideTnodesDisabled; /* Set to disable wide tnodes */
+
+
+ /* End of stuff that must be set before initialisation. */
+
+ /* Checkpoint control. Can be set before or after initialisation */
+ __u8 skipCheckpointRead;
+ __u8 skipCheckpointWrite;
+
+ /* Runtime parameters. Set up by YAFFS. */
+
+ __u16 chunkGroupBits; /* 0 for devices <= 32MB. else log2(nchunks) - 16 */
+ __u16 chunkGroupSize; /* == 2^^chunkGroupBits */
+
+ /* Stuff to support wide tnodes */
+ __u32 tnodeWidth;
+ __u32 tnodeMask;
+
+ /* Stuff to support various file offses to chunk/offset translations */
+ /* "Crumbs" for nDataBytesPerChunk not being a power of 2 */
+ __u32 crumbMask;
+ __u32 crumbShift;
+ __u32 crumbsPerChunk;
+
+ /* Straight shifting for nDataBytesPerChunk being a power of 2 */
+ __u32 chunkShift;
+ __u32 chunkMask;
+
+
+#ifdef __KERNEL__
+
+ struct semaphore sem; /* Semaphore for waiting on erasure.*/
+ struct semaphore grossLock; /* Gross locking semaphore */
+ __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer
+ * at compile time so we have to allocate it.
+ */
+ void (*putSuperFunc) (struct super_block * sb);
+#endif
+
+ int isMounted;
+
+ int isCheckpointed;
+
+
+ /* Stuff to support block offsetting to support start block zero */
+ int internalStartBlock;
+ int internalEndBlock;
+ int blockOffset;
+ int chunkOffset;
+
+
+ /* Runtime checkpointing stuff */
+ int checkpointPageSequence; /* running sequence number of checkpoint pages */
+ int checkpointByteCount;
+ int checkpointByteOffset;
+ __u8 *checkpointBuffer;
+ int checkpointOpenForWrite;
+ int blocksInCheckpoint;
+ int checkpointCurrentChunk;
+ int checkpointCurrentBlock;
+ int checkpointNextBlock;
+ int *checkpointBlockList;
+ int checkpointMaxBlocks;
+ __u32 checkpointSum;
+ __u32 checkpointXor;
+
+ /* Block Info */
+ yaffs_BlockInfo *blockInfo;
+ __u8 *chunkBits; /* bitmap of chunks in use */
+ unsigned blockInfoAlt:1; /* was allocated using alternative strategy */
+ unsigned chunkBitsAlt:1; /* was allocated using alternative strategy */
+ int chunkBitmapStride; /* Number of bytes of chunkBits per block.
+ * Must be consistent with nChunksPerBlock.
+ */
+
+ int nErasedBlocks;
+ int allocationBlock; /* Current block being allocated off */
+ __u32 allocationPage;
+ int allocationBlockFinder; /* Used to search for next allocation block */
+
+ /* Runtime state */
+ int nTnodesCreated;
+ yaffs_Tnode *freeTnodes;
+ int nFreeTnodes;
+ yaffs_TnodeList *allocatedTnodeList;
+
+ int isDoingGC;
+
+ int nObjectsCreated;
+ yaffs_Object *freeObjects;
+ int nFreeObjects;
+
+ yaffs_ObjectList *allocatedObjectList;
+
+ yaffs_ObjectBucket objectBucket[YAFFS_NOBJECT_BUCKETS];
+
+ int nFreeChunks;
+
+ int currentDirtyChecker; /* Used to find current dirtiest block */
+
+ __u32 *gcCleanupList; /* objects to delete at the end of a GC. */
+ int nonAggressiveSkip; /* GC state/mode */
+
+ /* Statistcs */
+ int nPageWrites;
+ int nPageReads;
+ int nBlockErasures;
+ int nErasureFailures;
+ int nGCCopies;
+ int garbageCollections;
+ int passiveGarbageCollections;
+ int nRetriedWrites;
+ int nRetiredBlocks;
+ int eccFixed;
+ int eccUnfixed;
+ int tagsEccFixed;
+ int tagsEccUnfixed;
+ int nDeletions;
+ int nUnmarkedDeletions;
+
+ int hasPendingPrioritisedGCs; /* We think this device might have pending prioritised gcs */
+
+ /* Special directories */
+ yaffs_Object *rootDir;
+ yaffs_Object *lostNFoundDir;
+
+ /* Buffer areas for storing data to recover from write failures TODO
+ * __u8 bufferedData[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
+ * yaffs_Spare bufferedSpare[YAFFS_CHUNKS_PER_BLOCK];
+ */
+
+ int bufferedBlock; /* Which block is buffered here? */
+ int doingBufferedBlockRewrite;
+
+ yaffs_ChunkCache *srCache;
+ int srLastUse;
+
+ int cacheHits;
+
+ /* Stuff for background deletion and unlinked files.*/
+ yaffs_Object *unlinkedDir; /* Directory where unlinked and deleted files live. */
+ yaffs_Object *deletedDir; /* Directory where deleted objects are sent to disappear. */
+ yaffs_Object *unlinkedDeletion; /* Current file being background deleted.*/
+ int nDeletedFiles; /* Count of files awaiting deletion;*/
+ int nUnlinkedFiles; /* Count of unlinked files. */
+ int nBackgroundDeletions; /* Count of background deletions. */
+
+
+ yaffs_TempBuffer tempBuffer[YAFFS_N_TEMP_BUFFERS];
+ int maxTemp;
+ int unmanagedTempAllocations;
+ int unmanagedTempDeallocations;
+
+ /* yaffs2 runtime stuff */
+ unsigned sequenceNumber; /* Sequence number of currently allocating block */
+ unsigned oldestDirtySequence;
+
+};
+
+typedef struct yaffs_DeviceStruct yaffs_Device;
+
+/* The static layout of bllock usage etc is stored in the super block header */
+typedef struct {
+ int StructType;
+ int version;
+ int checkpointStartBlock;
+ int checkpointEndBlock;
+ int startBlock;
+ int endBlock;
+ int rfu[100];
+} yaffs_SuperBlockHeader;
+
+/* The CheckpointDevice structure holds the device information that changes at runtime and
+ * must be preserved over unmount/mount cycles.
+ */
+typedef struct {
+ int structType;
+ int nErasedBlocks;
+ int allocationBlock; /* Current block being allocated off */
+ __u32 allocationPage;
+ int nFreeChunks;
+
+ int nDeletedFiles; /* Count of files awaiting deletion;*/
+ int nUnlinkedFiles; /* Count of unlinked files. */
+ int nBackgroundDeletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+ unsigned sequenceNumber; /* Sequence number of currently allocating block */
+ unsigned oldestDirtySequence;
+
+} yaffs_CheckpointDevice;
+
+
+typedef struct {
+ int structType;
+ __u32 magic;
+ __u32 version;
+ __u32 head;
+} yaffs_CheckpointValidity;
+
+/* Function to manipulate block info */
+static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
+{
+ if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
+ blk));
+ YBUG();
+ }
+ return &dev->blockInfo[blk - dev->internalStartBlock];
+}
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_GutsInitialise(yaffs_Device * dev);
+void yaffs_Deinitialise(yaffs_Device * dev);
+
+int yaffs_GetNumberOfFreeChunks(yaffs_Device * dev);
+
+int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName,
+ yaffs_Object * newDir, const YCHAR * newName);
+
+int yaffs_Unlink(yaffs_Object * dir, const YCHAR * name);
+int yaffs_DeleteFile(yaffs_Object * obj);
+
+int yaffs_GetObjectName(yaffs_Object * obj, YCHAR * name, int buffSize);
+int yaffs_GetObjectFileLength(yaffs_Object * obj);
+int yaffs_GetObjectInode(yaffs_Object * obj);
+unsigned yaffs_GetObjectType(yaffs_Object * obj);
+int yaffs_GetObjectLinkCount(yaffs_Object * obj);
+
+int yaffs_SetAttributes(yaffs_Object * obj, struct iattr *attr);
+int yaffs_GetAttributes(yaffs_Object * obj, struct iattr *attr);
+
+/* File operations */
+int yaffs_ReadDataFromFile(yaffs_Object * obj, __u8 * buffer, loff_t offset,
+ int nBytes);
+int yaffs_WriteDataToFile(yaffs_Object * obj, const __u8 * buffer, loff_t offset,
+ int nBytes, int writeThrough);
+int yaffs_ResizeFile(yaffs_Object * obj, loff_t newSize);
+
+yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name,
+ __u32 mode, __u32 uid, __u32 gid);
+int yaffs_FlushFile(yaffs_Object * obj, int updateTime);
+
+/* Flushing and checkpointing */
+void yaffs_FlushEntireDeviceCache(yaffs_Device *dev);
+
+int yaffs_CheckpointSave(yaffs_Device *dev);
+int yaffs_CheckpointRestore(yaffs_Device *dev);
+
+/* Directory operations */
+yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name,
+ __u32 mode, __u32 uid, __u32 gid);
+yaffs_Object *yaffs_FindObjectByName(yaffs_Object * theDir, const YCHAR * name);
+int yaffs_ApplyToDirectoryChildren(yaffs_Object * theDir,
+ int (*fn) (yaffs_Object *));
+
+yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number);
+
+/* Link operations */
+yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name,
+ yaffs_Object * equivalentObject);
+
+yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object * obj);
+
+/* Symlink operations */
+yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name,
+ __u32 mode, __u32 uid, __u32 gid,
+ const YCHAR * alias);
+YCHAR *yaffs_GetSymlinkAlias(yaffs_Object * obj);
+
+/* Special inodes (fifos, sockets and devices) */
+yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name,
+ __u32 mode, __u32 uid, __u32 gid, __u32 rdev);
+
+/* Special directories */
+yaffs_Object *yaffs_Root(yaffs_Device * dev);
+yaffs_Object *yaffs_LostNFound(yaffs_Device * dev);
+
+#ifdef CONFIG_YAFFS_WINCE
+/* CONFIG_YAFFS_WINCE special stuff */
+void yfsd_WinFileTimeNow(__u32 target[2]);
+#endif
+
+#ifdef __KERNEL__
+
+void yaffs_HandleDeferedFree(yaffs_Object * obj);
+#endif
+
+/* Debug dump */
+int yaffs_DumpObject(yaffs_Object * obj);
+
+void yaffs_GutsTest(yaffs_Device * dev);
+
+/* A few useful functions */
+void yaffs_InitialiseTags(yaffs_ExtendedTags * tags);
+void yaffs_DeleteChunk(yaffs_Device * dev, int chunkId, int markNAND, int lyn);
+int yaffs_CheckFF(__u8 * buffer, int nBytes);
+void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi);
+
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_mtdif.c b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif.c
new file mode 100644
index 000000000..466e5a44c
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,241 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+const char *yaffs_mtdif_c_version =
+ "$Id: yaffs_mtdif.c,v 1.19 2007-02-14 01:09:06 wookey Exp $";
+
+#include "yportenv.h"
+
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+static struct nand_oobinfo yaffs_oobinfo = {
+ .useecc = 1,
+ .eccbytes = 6,
+ .eccpos = {8, 9, 10, 13, 14, 15}
+};
+
+static struct nand_oobinfo yaffs_noeccinfo = {
+ .useecc = 0,
+};
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+static inline void translate_spare2oob(const yaffs_Spare *spare, __u8 *oob)
+{
+ oob[0] = spare->tagByte0;
+ oob[1] = spare->tagByte1;
+ oob[2] = spare->tagByte2;
+ oob[3] = spare->tagByte3;
+ oob[4] = spare->tagByte4;
+ oob[5] = spare->tagByte5 & 0x3f;
+ oob[5] |= spare->blockStatus == 'Y' ? 0: 0x80;
+ oob[5] |= spare->pageStatus == 0 ? 0: 0x40;
+ oob[6] = spare->tagByte6;
+ oob[7] = spare->tagByte7;
+}
+
+static inline void translate_oob2spare(yaffs_Spare *spare, __u8 *oob)
+{
+ struct yaffs_NANDSpare *nspare = (struct yaffs_NANDSpare *)spare;
+ spare->tagByte0 = oob[0];
+ spare->tagByte1 = oob[1];
+ spare->tagByte2 = oob[2];
+ spare->tagByte3 = oob[3];
+ spare->tagByte4 = oob[4];
+ spare->tagByte5 = oob[5] == 0xff ? 0xff : oob[5] & 0x3f;
+ spare->blockStatus = oob[5] & 0x80 ? 0xff : 'Y';
+ spare->pageStatus = oob[5] & 0x40 ? 0xff : 0;
+ spare->ecc1[0] = spare->ecc1[1] = spare->ecc1[2] = 0xff;
+ spare->tagByte6 = oob[6];
+ spare->tagByte7 = oob[7];
+ spare->ecc2[0] = spare->ecc2[1] = spare->ecc2[2] = 0xff;
+
+ nspare->eccres1 = nspare->eccres2 = 0; /* FIXME */
+}
+#endif
+
+int nandmtd_WriteChunkToNAND(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data, const yaffs_Spare * spare)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ struct mtd_oob_ops ops;
+#endif
+ size_t dummy;
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ __u8 spareAsBytes[8]; /* OOB */
+
+ if (data && !spare)
+ retval = mtd->write(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data);
+ else if (spare) {
+ if (dev->useNANDECC) {
+ translate_spare2oob(spare, spareAsBytes);
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = 8; /* temp hack */
+ } else {
+ ops.mode = MTD_OOB_RAW;
+ ops.ooblen = YAFFS_BYTES_PER_SPARE;
+ }
+ ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
+ ops.datbuf = (u8 *)data;
+ ops.ooboffs = 0;
+ ops.oobbuf = spareAsBytes;
+ retval = mtd->write_oob(mtd, addr, &ops);
+ }
+#else
+ __u8 *spareAsBytes = (__u8 *) spare;
+
+ if (data && spare) {
+ if (dev->useNANDECC)
+ retval =
+ mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, spareAsBytes,
+ &yaffs_oobinfo);
+ else
+ retval =
+ mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, spareAsBytes,
+ &yaffs_noeccinfo);
+ } else {
+ if (data)
+ retval =
+ mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+ data);
+ if (spare)
+ retval =
+ mtd->write_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
+ &dummy, spareAsBytes);
+ }
+#endif
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_ReadChunkFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data,
+ yaffs_Spare * spare)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ struct mtd_oob_ops ops;
+#endif
+ size_t dummy;
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ __u8 spareAsBytes[8]; /* OOB */
+
+ if (data && !spare)
+ retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data);
+ else if (spare) {
+ if (dev->useNANDECC) {
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = 8; /* temp hack */
+ } else {
+ ops.mode = MTD_OOB_RAW;
+ ops.ooblen = YAFFS_BYTES_PER_SPARE;
+ }
+ ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
+ ops.datbuf = data;
+ ops.ooboffs = 0;
+ ops.oobbuf = spareAsBytes;
+ retval = mtd->read_oob(mtd, addr, &ops);
+ if (dev->useNANDECC)
+ translate_oob2spare(spare, spareAsBytes);
+ }
+#else
+ __u8 *spareAsBytes = (__u8 *) spare;
+
+ if (data && spare) {
+ if (dev->useNANDECC) {
+ /* Careful, this call adds 2 ints */
+ /* to the end of the spare data. Calling function */
+ /* should allocate enough memory for spare, */
+ /* i.e. [YAFFS_BYTES_PER_SPARE+2*sizeof(int)]. */
+ retval =
+ mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, spareAsBytes,
+ &yaffs_oobinfo);
+ } else {
+ retval =
+ mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, spareAsBytes,
+ &yaffs_noeccinfo);
+ }
+ } else {
+ if (data)
+ retval =
+ mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+ data);
+ if (spare)
+ retval =
+ mtd->read_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
+ &dummy, spareAsBytes);
+ }
+#endif
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_EraseBlockInNAND(yaffs_Device * dev, int blockNumber)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+ __u32 addr =
+ ((loff_t) blockNumber) * dev->nDataBytesPerChunk
+ * dev->nChunksPerBlock;
+ struct erase_info ei;
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = dev->nDataBytesPerChunk * dev->nChunksPerBlock;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ /* Todo finish off the ei if required */
+
+ sema_init(&dev->sem, 0);
+
+ retval = mtd->erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_InitialiseNAND(yaffs_Device * dev)
+{
+ return YAFFS_OK;
+}
+
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_mtdif.h b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif.h
new file mode 100644
index 000000000..317600cac
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,27 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+int nandmtd_WriteChunkToNAND(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data, const yaffs_Spare * spare);
+int nandmtd_ReadChunkFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data,
+ yaffs_Spare * spare);
+int nandmtd_EraseBlockInNAND(yaffs_Device * dev, int blockNumber);
+int nandmtd_InitialiseNAND(yaffs_Device * dev);
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1-compat.c b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1-compat.c
new file mode 100644
index 000000000..6a376f1f7
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1-compat.c
@@ -0,0 +1,434 @@
+From ian@brightstareng.com Fri May 18 15:06:49 2007
+From ian@brightstareng.com Fri May 18 15:08:21 2007
+Received: from 206.173.66.57.ptr.us.xo.net ([206.173.66.57] helo=zebra.brightstareng.com)
+ by apollo.linkchoose.co.uk with esmtp (Exim 4.60)
+ (envelope-from <ian@brightstareng.com>)
+ id 1Hp380-00011e-T6
+ for david.goodenough@linkchoose.co.uk; Fri, 18 May 2007 15:08:21 +0100
+Received: from localhost (localhost.localdomain [127.0.0.1])
+ by zebra.brightstareng.com (Postfix) with ESMTP
+ id 4819F28C004; Fri, 18 May 2007 10:07:49 -0400 (EDT)
+Received: from zebra.brightstareng.com ([127.0.0.1])
+ by localhost (zebra [127.0.0.1]) (amavisd-new, port 10024) with ESMTP
+ id 05328-06; Fri, 18 May 2007 10:07:16 -0400 (EDT)
+Received: from pippin (unknown [192.168.1.25])
+ by zebra.brightstareng.com (Postfix) with ESMTP
+ id 8BEF528C1BC; Fri, 18 May 2007 10:06:53 -0400 (EDT)
+From: Ian McDonnell <ian@brightstareng.com>
+To: David Goodenough <david.goodenough@linkchoose.co.uk>
+Subject: Re: something tested this time -- yaffs_mtdif1-compat.c
+Date: Fri, 18 May 2007 10:06:49 -0400
+User-Agent: KMail/1.9.1
+References: <200705142207.06909.ian@brightstareng.com> <200705171131.53536.ian@brightstareng.com> <200705181334.32166.david.goodenough@linkchoose.co.uk>
+In-Reply-To: <200705181334.32166.david.goodenough@linkchoose.co.uk>
+Cc: Andrea Conti <alyf@alyf.net>,
+ Charles Manning <manningc2@actrix.gen.nz>
+MIME-Version: 1.0
+Content-Type: Multipart/Mixed;
+ boundary="Boundary-00=_5LbTGmt62YoutxM"
+Message-Id: <200705181006.49860.ian@brightstareng.com>
+X-Virus-Scanned: by amavisd-new at brightstareng.com
+Status: R
+X-Status: NT
+X-KMail-EncryptionState:
+X-KMail-SignatureState:
+X-KMail-MDN-Sent:
+
+--Boundary-00=_5LbTGmt62YoutxM
+Content-Type: text/plain;
+ charset="iso-8859-15"
+Content-Transfer-Encoding: 7bit
+Content-Disposition: inline
+
+David, Andrea,
+
+On Friday 18 May 2007 08:34, you wrote:
+> Yea team. With this fix in place (I put it in the wrong place
+> at first) I can now mount and ls the Yaffs partition without
+> an error messages!
+
+Good news!
+
+Attached is a newer yaffs_mtdif1.c with a bandaid to help the
+2.6.18 and 2.6.19 versions of MTD not trip on the oob read.
+See the LINUX_VERSION_CODE conditional in
+nandmtd1_ReadChunkWithTagsFromNAND.
+
+-imcd
+
+--Boundary-00=_5LbTGmt62YoutxM
+Content-Type: text/x-csrc;
+ charset="iso-8859-15";
+ name="yaffs_mtdif1.c"
+Content-Transfer-Encoding: 7bit
+Content-Disposition: attachment;
+ filename="yaffs_mtdif1.c"
+
+/*
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ * yaffs_mtdif1.c NAND mtd interface functions for small-page NAND.
+ *
+ * Copyright (C) 2002 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This module provides the interface between yaffs_nand.c and the
+ * MTD API. This version is used when the MTD interface supports the
+ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
+ * and we have small-page NAND device.
+ *
+ * These functions are invoked via function pointers in yaffs_nand.c.
+ * This replaces functionality provided by functions in yaffs_mtdif.c
+ * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
+ * called in yaffs_mtdif.c when the function pointers are NULL.
+ * We assume the MTD layer is performing ECC (useNANDECC is true).
+ */
+
+#include "yportenv.h"
+#include "yaffs_guts.h"
+#include "yaffs_packedtags1.h"
+#include "yaffs_tagscompat.h" // for yaffs_CalcTagsECC
+
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+#include "linux/mtd/mtd.h"
+
+/* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+
+const char *yaffs_mtdif1_c_version = "$Id$";
+
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+# define YTAG1_SIZE 8
+#else
+# define YTAG1_SIZE 9
+#endif
+
+#if 0
+/* Use the following nand_ecclayout with MTD when using
+ * CONFIG_YAFFS_9BYTE_TAGS and the older on-NAND tags layout.
+ * If you have existing Yaffs images and the byte order differs from this,
+ * adjust 'oobfree' to match your existing Yaffs data.
+ *
+ * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
+ * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
+ * the 9th byte.
+ *
+ * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
+ * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
+ * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
+ * byte and B is the small-page bad-block indicator byte.
+ */
+static struct nand_ecclayout nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = { 8, 9, 10, 13, 14, 15 },
+ .oobavail = 9,
+ .oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } }
+};
+#endif
+
+/* Write a chunk (page) of data to NAND.
+ *
+ * Caller always provides ExtendedTags data which are converted to a more
+ * compact (packed) form for storage in NAND. A mini-ECC runs over the
+ * contents of the tags meta-data; used to valid the tags when read.
+ *
+ * - Pack ExtendedTags to PackedTags1 form
+ * - Compute mini-ECC for PackedTags1
+ * - Write data and packed tags to NAND.
+ *
+ * Note: Due to the use of the PackedTags1 meta-data which does not include
+ * a full sequence number (as found in the larger PackedTags2 form) it is
+ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
+ * discarded and dirty. This is not ideal: newer NAND parts are supposed
+ * to be written just once. When Yaffs performs this operation, this
+ * function is called with a NULL data pointer -- calling MTD write_oob
+ * without data is valid usage (2.6.17).
+ *
+ * Any underlying MTD error results in YAFFS_FAIL.
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+ int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * etags)
+{
+ struct mtd_info * mtd = dev->genericDevice;
+ int chunkBytes = dev->nDataBytesPerChunk;
+ loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+ struct mtd_oob_ops ops;
+ yaffs_PackedTags1 pt1;
+ int retval;
+
+ /* we assume that PackedTags1 and yaffs_Tags are compatible */
+ compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
+ compile_time_assertion(sizeof(yaffs_Tags) == 8);
+
+ yaffs_PackTags1(&pt1, etags);
+ yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+ * etags, one with chunkDeleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ if (etags->chunkDeleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+#else
+ ((__u8 *)&pt1)[8] = 0xff;
+ if (etags->chunkDeleted) {
+ memset(&pt1, 0xff, 8);
+ /* zero pageStatus byte to indicate deleted */
+ ((__u8 *)&pt1)[8] = 0;
+ }
+#endif
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunkBytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = (__u8 *)data;
+ ops.oobbuf = (__u8 *)&pt1;
+
+ retval = mtd->write_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "write_oob failed, chunk %d, mtd error %d\n",
+ chunkInNAND, retval);
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Return with empty ExtendedTags but add eccResult.
+ */
+static int rettags(yaffs_ExtendedTags * etags, int eccResult, int retval)
+{
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+ etags->eccResult = eccResult;
+ }
+ return retval;
+}
+
+/* Read a chunk (page) from NAND.
+ *
+ * Caller expects ExtendedTags data to be usable even on error; that is,
+ * all members except eccResult and blockBad are zeroed.
+ *
+ * - Check ECC results for data (if applicable)
+ * - Check for blank/erased block (return empty ExtendedTags if blank)
+ * - Check the PackedTags1 mini-ECC (correct if necessary/possible)
+ * - Convert PackedTags1 to ExtendedTags
+ * - Update eccResult and blockBad members to refect state.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
+ int chunkInNAND, __u8 * data, yaffs_ExtendedTags * etags)
+{
+ struct mtd_info * mtd = dev->genericDevice;
+ int chunkBytes = dev->nDataBytesPerChunk;
+ loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+ struct mtd_oob_ops ops;
+ yaffs_PackedTags1 pt1;
+ int retval;
+ int deleted;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunkBytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = data;
+ ops.oobbuf = (__u8 *)&pt1;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+ */
+ ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
+#endif
+ /* Read page and oob using MTD.
+ * Check status and determine ECC result.
+ */
+ retval = mtd->read_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "read_oob failed, chunk %d, mtd error %d\n",
+ chunkInNAND, retval);
+ }
+
+ switch (retval) {
+ case 0:
+ /* no error */
+ break;
+
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ dev->eccFixed++;
+ break;
+
+ case -EBADMSG:
+ /* MTD's ECC could not fix the data */
+ dev->eccUnfixed++;
+ /* fall into... */
+ default:
+ rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+ etags->blockBad = (mtd->block_isbad)(mtd, addr);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk.
+ */
+ if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
+ /* when blank, upper layers want eccResult to be <= NO_ERROR */
+ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+ }
+
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ /* Read deleted status (bit) then return it to it's non-deleted
+ * state before performing tags mini-ECC check. pt1.deleted is
+ * inverted.
+ */
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+#else
+ (void) deleted; /* not used */
+#endif
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible.
+ */
+ retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+ dev->tagsEccFixed++;
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+ dev->tagsEccUnfixed++;
+ return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+ * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
+ */
+ pt1.shouldBeFF = 0xFFFFFFFF;
+ yaffs_UnpackTags1(etags, &pt1);
+ etags->eccResult = eccres;
+
+ /* Set deleted state.
+ */
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ etags->chunkDeleted = deleted;
+#else
+ etags->chunkDeleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
+#endif
+ return YAFFS_OK;
+}
+
+/* Mark a block bad.
+ *
+ * This is a persistant state.
+ * Use of this function should be rare.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
+{
+ struct mtd_info * mtd = dev->genericDevice;
+ int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", blockNo);
+
+ retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Check any MTD prerequists.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+static int nandmtd1_TestPrerequists(struct mtd_info * mtd)
+{
+ /* 2.6.18 has mtd->ecclayout->oobavail */
+ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+ int oobavail = mtd->ecclayout->oobavail;
+
+ if (oobavail < YTAG1_SIZE) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "mtd device has only %d bytes for tags, need %d",
+ oobavail, YTAG1_SIZE);
+ return YAFFS_FAIL;
+ }
+ return YAFFS_OK;
+}
+
+/* Query for the current state of a specific block.
+ *
+ * Examine the tags of the first chunk of the block and return the state:
+ * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
+ * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
+ * - YAFFS_BLOCK_STATE_EMPTY, the block is clean
+ *
+ * Always returns YAFFS_OK.
+ */
+int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+ yaffs_BlockState * pState, int *pSequenceNumber)
+{
+ struct mtd_info * mtd = dev->genericDevice;
+ int chunkNo = blockNo * dev->nChunksPerBlock;
+ yaffs_ExtendedTags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+ int retval;
+
+ /* We don't yet have a good place to test for MTD config prerequists.
+ * Do it here as we are called during the initial scan.
+ */
+ if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) {
+ return YAFFS_FAIL;
+ }
+
+ retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
+ if (etags.blockBad) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is marked bad", blockNo);
+ state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ else if (etags.chunkUsed) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ seqnum = etags.sequenceNumber;
+ }
+ else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+ *pState = state;
+ *pSequenceNumber = seqnum;
+
+ /* query always succeeds */
+ return YAFFS_OK;
+}
+
+#endif /*KERNEL_VERSION*/
+
+--Boundary-00=_5LbTGmt62YoutxM--
+
+
+
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1.c b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1.c
new file mode 100644
index 000000000..36d5adec5
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1.c
@@ -0,0 +1,363 @@
+/*
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ * yaffs_mtdif1.c NAND mtd interface functions for small-page NAND.
+ *
+ * Copyright (C) 2002 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This module provides the interface between yaffs_nand.c and the
+ * MTD API. This version is used when the MTD interface supports the
+ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
+ * and we have small-page NAND device.
+ *
+ * These functions are invoked via function pointers in yaffs_nand.c.
+ * This replaces functionality provided by functions in yaffs_mtdif.c
+ * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
+ * called in yaffs_mtdif.c when the function pointers are NULL.
+ * We assume the MTD layer is performing ECC (useNANDECC is true).
+ */
+
+#include "yportenv.h"
+#include "yaffs_guts.h"
+#include "yaffs_packedtags1.h"
+#include "yaffs_tagscompat.h" // for yaffs_CalcTagsECC
+
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+#include "linux/mtd/mtd.h"
+
+/* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+
+const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.3 2007/05/15 20:16:11 ian Exp $";
+
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+# define YTAG1_SIZE 8
+#else
+# define YTAG1_SIZE 9
+#endif
+
+#if 0
+/* Use the following nand_ecclayout with MTD when using
+ * CONFIG_YAFFS_9BYTE_TAGS and the older on-NAND tags layout.
+ * If you have existing Yaffs images and the byte order differs from this,
+ * adjust 'oobfree' to match your existing Yaffs data.
+ *
+ * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
+ * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
+ * the 9th byte.
+ *
+ * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
+ * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
+ * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
+ * byte and B is the small-page bad-block indicator byte.
+ */
+static struct nand_ecclayout nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = { 8, 9, 10, 13, 14, 15 },
+ .oobavail = 9,
+ .oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } }
+};
+#endif
+
+/* Write a chunk (page) of data to NAND.
+ *
+ * Caller always provides ExtendedTags data which are converted to a more
+ * compact (packed) form for storage in NAND. A mini-ECC runs over the
+ * contents of the tags meta-data; used to valid the tags when read.
+ *
+ * - Pack ExtendedTags to PackedTags1 form
+ * - Compute mini-ECC for PackedTags1
+ * - Write data and packed tags to NAND.
+ *
+ * Note: Due to the use of the PackedTags1 meta-data which does not include
+ * a full sequence number (as found in the larger PackedTags2 form) it is
+ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
+ * discarded and dirty. This is not ideal: newer NAND parts are supposed
+ * to be written just once. When Yaffs performs this operation, this
+ * function is called with a NULL data pointer -- calling MTD write_oob
+ * without data is valid usage (2.6.17).
+ *
+ * Any underlying MTD error results in YAFFS_FAIL.
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+ int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * etags)
+{
+ struct mtd_info * mtd = dev->genericDevice;
+ int chunkBytes = dev->nDataBytesPerChunk;
+ loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+ struct mtd_oob_ops ops;
+ yaffs_PackedTags1 pt1;
+ int retval;
+
+ /* we assume that PackedTags1 and yaffs_Tags are compatible */
+ compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
+ compile_time_assertion(sizeof(yaffs_Tags) == 8);
+
+ dev->nPageWrites++;
+
+ yaffs_PackTags1(&pt1, etags);
+ yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+ * etags, one with chunkDeleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ if (etags->chunkDeleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+#else
+ ((__u8 *)&pt1)[8] = 0xff;
+ if (etags->chunkDeleted) {
+ memset(&pt1, 0xff, 8);
+ /* zero pageStatus byte to indicate deleted */
+ ((__u8 *)&pt1)[8] = 0;
+ }
+#endif
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunkBytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = (__u8 *)data;
+ ops.oobbuf = (__u8 *)&pt1;
+
+ retval = mtd->write_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "write_oob failed, chunk %d, mtd error %d\n",
+ chunkInNAND, retval);
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Return with empty ExtendedTags but add eccResult.
+ */
+static int rettags(yaffs_ExtendedTags * etags, int eccResult, int retval)
+{
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+ etags->eccResult = eccResult;
+ }
+ return retval;
+}
+
+/* Read a chunk (page) from NAND.
+ *
+ * Caller expects ExtendedTags data to be usable even on error; that is,
+ * all members except eccResult and blockBad are zeroed.
+ *
+ * - Check ECC results for data (if applicable)
+ * - Check for blank/erased block (return empty ExtendedTags if blank)
+ * - Check the PackedTags1 mini-ECC (correct if necessary/possible)
+ * - Convert PackedTags1 to ExtendedTags
+ * - Update eccResult and blockBad members to refect state.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
+ int chunkInNAND, __u8 * data, yaffs_ExtendedTags * etags)
+{
+ struct mtd_info * mtd = dev->genericDevice;
+ int chunkBytes = dev->nDataBytesPerChunk;
+ loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+ struct mtd_oob_ops ops;
+ yaffs_PackedTags1 pt1;
+ int retval;
+ int deleted;
+
+ dev->nPageReads++;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunkBytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = data;
+ ops.oobbuf = (__u8 *)&pt1;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+ */
+ ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
+#endif
+ /* Read page and oob using MTD.
+ * Check status and determine ECC result.
+ */
+ retval = mtd->read_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "read_oob failed, chunk %d, mtd error %d\n",
+ chunkInNAND, retval);
+ }
+
+ switch (retval) {
+ case 0:
+ /* no error */
+ break;
+
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ dev->eccFixed++;
+ break;
+
+ case -EBADMSG:
+ /* MTD's ECC could not fix the data */
+ dev->eccUnfixed++;
+ /* fall into... */
+ default:
+ rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+ etags->blockBad = (mtd->block_isbad)(mtd, addr);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk.
+ */
+ if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
+ /* when blank, upper layers want eccResult to be <= NO_ERROR */
+ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+ }
+
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ /* Read deleted status (bit) then return it to it's non-deleted
+ * state before performing tags mini-ECC check. pt1.deleted is
+ * inverted.
+ */
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+#else
+ deleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
+#endif
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible.
+ */
+ retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+ dev->tagsEccFixed++;
+ if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+ dev->tagsEccUnfixed++;
+ return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+ * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
+ */
+ pt1.shouldBeFF = 0xFFFFFFFF;
+ yaffs_UnpackTags1(etags, &pt1);
+ etags->eccResult = eccres;
+
+ /* Set deleted state */
+ etags->chunkDeleted = deleted;
+ return YAFFS_OK;
+}
+
+/* Mark a block bad.
+ *
+ * This is a persistant state.
+ * Use of this function should be rare.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
+{
+ struct mtd_info * mtd = dev->genericDevice;
+ int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", blockNo);
+
+ retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Check any MTD prerequists.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+static int nandmtd1_TestPrerequists(struct mtd_info * mtd)
+{
+ /* 2.6.18 has mtd->ecclayout->oobavail */
+ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+ int oobavail = mtd->ecclayout->oobavail;
+
+ if (oobavail < YTAG1_SIZE) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "mtd device has only %d bytes for tags, need %d\n",
+ oobavail, YTAG1_SIZE);
+ return YAFFS_FAIL;
+ }
+ return YAFFS_OK;
+}
+
+/* Query for the current state of a specific block.
+ *
+ * Examine the tags of the first chunk of the block and return the state:
+ * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
+ * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
+ * - YAFFS_BLOCK_STATE_EMPTY, the block is clean
+ *
+ * Always returns YAFFS_OK.
+ */
+int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+ yaffs_BlockState * pState, int *pSequenceNumber)
+{
+ struct mtd_info * mtd = dev->genericDevice;
+ int chunkNo = blockNo * dev->nChunksPerBlock;
+ yaffs_ExtendedTags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+ int retval;
+
+ /* We don't yet have a good place to test for MTD config prerequists.
+ * Do it here as we are called during the initial scan.
+ */
+ if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) {
+ return YAFFS_FAIL;
+ }
+
+ retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
+ if (etags.blockBad) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is marked bad", blockNo);
+ state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ else if (etags.chunkUsed) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ seqnum = etags.sequenceNumber;
+ }
+ else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+ *pState = state;
+ *pSequenceNumber = seqnum;
+
+ /* query always succeeds */
+ return YAFFS_OK;
+}
+
+#endif /*KERNEL_VERSION*/
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1.h b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1.h
new file mode 100644
index 000000000..c4f6197d6
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif1.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF1_H__
+#define __YAFFS_MTDIF1_H__
+
+int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data, const yaffs_ExtendedTags * tags);
+
+int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+ __u8 * data, yaffs_ExtendedTags * tags);
+
+int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+
+int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+ yaffs_BlockState * state, int *sequenceNumber);
+
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_mtdif2.c b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif2.c
new file mode 100644
index 000000000..cdad0734b
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif2.c
@@ -0,0 +1,232 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* mtd interface for YAFFS2 */
+
+const char *yaffs_mtdif2_c_version =
+ "$Id: yaffs_mtdif2.c,v 1.17 2007-02-14 01:09:06 wookey Exp $";
+
+#include "yportenv.h"
+
+
+#include "yaffs_mtdif2.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+
+#include "yaffs_packedtags2.h"
+
+int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data,
+ const yaffs_ExtendedTags * tags)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ struct mtd_oob_ops ops;
+#else
+ size_t dummy;
+#endif
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+
+ yaffs_PackedTags2 pt;
+
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p"
+ TENDSTR), chunkInNAND, data, tags));
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ if (tags)
+ yaffs_PackTags2(&pt, tags);
+ else
+ BUG(); /* both tags and data should always be present */
+
+ if (data) {
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = sizeof(pt);
+ ops.len = dev->nDataBytesPerChunk;
+ ops.ooboffs = 0;
+ ops.datbuf = (__u8 *)data;
+ ops.oobbuf = (void *)&pt;
+ retval = mtd->write_oob(mtd, addr, &ops);
+ } else
+ BUG(); /* both tags and data should always be present */
+#else
+ if (tags) {
+ yaffs_PackTags2(&pt, tags);
+ }
+
+ if (data && tags) {
+ if (dev->useNANDECC)
+ retval =
+ mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, (__u8 *) & pt, NULL);
+ else
+ retval =
+ mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, (__u8 *) & pt, NULL);
+ } else {
+ if (data)
+ retval =
+ mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+ data);
+ if (tags)
+ retval =
+ mtd->write_oob(mtd, addr, mtd->oobsize, &dummy,
+ (__u8 *) & pt);
+
+ }
+#endif
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+ __u8 * data, yaffs_ExtendedTags * tags)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ struct mtd_oob_ops ops;
+#endif
+ size_t dummy;
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+
+ yaffs_PackedTags2 pt;
+
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p"
+ TENDSTR), chunkInNAND, data, tags));
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+ if (data && !tags)
+ retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = sizeof(pt);
+ ops.len = data ? dev->nDataBytesPerChunk : sizeof(pt);
+ ops.ooboffs = 0;
+ ops.datbuf = data;
+ ops.oobbuf = dev->spareBuffer;
+ retval = mtd->read_oob(mtd, addr, &ops);
+ }
+#else
+ if (data && tags) {
+ if (dev->useNANDECC) {
+ retval =
+ mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, dev->spareBuffer,
+ NULL);
+ } else {
+ retval =
+ mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, dev->spareBuffer,
+ NULL);
+ }
+ } else {
+ if (data)
+ retval =
+ mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+ data);
+ if (tags)
+ retval =
+ mtd->read_oob(mtd, addr, mtd->oobsize, &dummy,
+ dev->spareBuffer);
+ }
+#endif
+
+ memcpy(&pt, dev->spareBuffer, sizeof(pt));
+
+ if (tags)
+ yaffs_UnpackTags2(tags, &pt);
+
+ if(tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
+ tags->eccResult = YAFFS_ECC_RESULT_UNFIXED;
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+ int retval;
+ T(YAFFS_TRACE_MTD,
+ (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), blockNo));
+
+ retval =
+ mtd->block_markbad(mtd,
+ blockNo * dev->nChunksPerBlock *
+ dev->nDataBytesPerChunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+
+}
+
+int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+ yaffs_BlockState * state, int *sequenceNumber)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+ int retval;
+
+ T(YAFFS_TRACE_MTD,
+ (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), blockNo));
+ retval =
+ mtd->block_isbad(mtd,
+ blockNo * dev->nChunksPerBlock *
+ dev->nDataBytesPerChunk);
+
+ if (retval) {
+ T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR)));
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ *sequenceNumber = 0;
+ } else {
+ yaffs_ExtendedTags t;
+ nandmtd2_ReadChunkWithTagsFromNAND(dev,
+ blockNo *
+ dev->nChunksPerBlock, NULL,
+ &t);
+
+ if (t.chunkUsed) {
+ *sequenceNumber = t.sequenceNumber;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else {
+ *sequenceNumber = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+ T(YAFFS_TRACE_MTD,
+ (TSTR("block is bad seq %d state %d" TENDSTR), *sequenceNumber,
+ *state));
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_mtdif2.h b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif2.h
new file mode 100644
index 000000000..b67ba52aa
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_mtdif2.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF2_H__
+#define __YAFFS_MTDIF2_H__
+
+#include "yaffs_guts.h"
+int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data,
+ const yaffs_ExtendedTags * tags);
+int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+ __u8 * data, yaffs_ExtendedTags * tags);
+int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+ yaffs_BlockState * state, int *sequenceNumber);
+
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_nand.c b/target/linux/generic/files/fs/yaffs2/yaffs_nand.c
new file mode 100644
index 000000000..4e250338d
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,134 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+const char *yaffs_nand_c_version =
+ "$Id: yaffs_nand.c,v 1.7 2007-02-14 01:09:06 wookey Exp $";
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsvalidity.h"
+
+
+int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+ __u8 * buffer,
+ yaffs_ExtendedTags * tags)
+{
+ int result;
+ yaffs_ExtendedTags localTags;
+
+ int realignedChunkInNAND = chunkInNAND - dev->chunkOffset;
+
+ /* If there are no tags provided, use local tags to get prioritised gc working */
+ if(!tags)
+ tags = &localTags;
+
+ if (dev->readChunkWithTagsFromNAND)
+ result = dev->readChunkWithTagsFromNAND(dev, realignedChunkInNAND, buffer,
+ tags);
+ else
+ result = yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(dev,
+ realignedChunkInNAND,
+ buffer,
+ tags);
+ if(tags &&
+ tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR){
+
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->nChunksPerBlock);
+ yaffs_HandleChunkError(dev,bi);
+ }
+
+ return result;
+}
+
+int yaffs_WriteChunkWithTagsToNAND(yaffs_Device * dev,
+ int chunkInNAND,
+ const __u8 * buffer,
+ yaffs_ExtendedTags * tags)
+{
+ chunkInNAND -= dev->chunkOffset;
+
+
+ if (tags) {
+ tags->sequenceNumber = dev->sequenceNumber;
+ tags->chunkUsed = 1;
+ if (!yaffs_ValidateTags(tags)) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("Writing uninitialised tags" TENDSTR)));
+ YBUG();
+ }
+ T(YAFFS_TRACE_WRITE,
+ (TSTR("Writing chunk %d tags %d %d" TENDSTR), chunkInNAND,
+ tags->objectId, tags->chunkId));
+ } else {
+ T(YAFFS_TRACE_ERROR, (TSTR("Writing with no tags" TENDSTR)));
+ YBUG();
+ }
+
+ if (dev->writeChunkWithTagsToNAND)
+ return dev->writeChunkWithTagsToNAND(dev, chunkInNAND, buffer,
+ tags);
+ else
+ return yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(dev,
+ chunkInNAND,
+ buffer,
+ tags);
+}
+
+int yaffs_MarkBlockBad(yaffs_Device * dev, int blockNo)
+{
+ blockNo -= dev->blockOffset;
+
+;
+ if (dev->markNANDBlockBad)
+ return dev->markNANDBlockBad(dev, blockNo);
+ else
+ return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo);
+}
+
+int yaffs_QueryInitialBlockState(yaffs_Device * dev,
+ int blockNo,
+ yaffs_BlockState * state,
+ unsigned *sequenceNumber)
+{
+ blockNo -= dev->blockOffset;
+
+ if (dev->queryNANDBlock)
+ return dev->queryNANDBlock(dev, blockNo, state, sequenceNumber);
+ else
+ return yaffs_TagsCompatabilityQueryNANDBlock(dev, blockNo,
+ state,
+ sequenceNumber);
+}
+
+
+int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+ int blockInNAND)
+{
+ int result;
+
+ blockInNAND -= dev->blockOffset;
+
+
+ dev->nBlockErasures++;
+ result = dev->eraseBlockInNAND(dev, blockInNAND);
+
+ return result;
+}
+
+int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev)
+{
+ return dev->initialiseNAND(dev);
+}
+
+
+
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_nand.h b/target/linux/generic/files/fs/yaffs2/yaffs_nand.h
new file mode 100644
index 000000000..5fa334bdc
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+
+
+int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+ __u8 * buffer,
+ yaffs_ExtendedTags * tags);
+
+int yaffs_WriteChunkWithTagsToNAND(yaffs_Device * dev,
+ int chunkInNAND,
+ const __u8 * buffer,
+ yaffs_ExtendedTags * tags);
+
+int yaffs_MarkBlockBad(yaffs_Device * dev, int blockNo);
+
+int yaffs_QueryInitialBlockState(yaffs_Device * dev,
+ int blockNo,
+ yaffs_BlockState * state,
+ unsigned *sequenceNumber);
+
+int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+ int blockInNAND);
+
+int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev);
+
+#endif
+
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_nandemul2k.h b/target/linux/generic/files/fs/yaffs2/yaffs_nandemul2k.h
new file mode 100644
index 000000000..cd2e96f7a
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_nandemul2k.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* Interface to emulated NAND functions (2k page size) */
+
+#ifndef __YAFFS_NANDEMUL2K_H__
+#define __YAFFS_NANDEMUL2K_H__
+
+#include "yaffs_guts.h"
+
+int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND, const __u8 * data,
+ yaffs_ExtendedTags * tags);
+int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND, __u8 * data,
+ yaffs_ExtendedTags * tags);
+int nandemul2k_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+int nandemul2k_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+ yaffs_BlockState * state, int *sequenceNumber);
+int nandemul2k_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+ int blockInNAND);
+int nandemul2k_InitialiseNAND(struct yaffs_DeviceStruct *dev);
+int nandemul2k_GetBytesPerChunk(void);
+int nandemul2k_GetChunksPerBlock(void);
+int nandemul2k_GetNumberOfBlocks(void);
+
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_packedtags1.c b/target/linux/generic/files/fs/yaffs2/yaffs_packedtags1.c
new file mode 100644
index 000000000..f480bf1df
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,52 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+void yaffs_PackTags1(yaffs_PackedTags1 * pt, const yaffs_ExtendedTags * t)
+{
+ pt->chunkId = t->chunkId;
+ pt->serialNumber = t->serialNumber;
+ pt->byteCount = t->byteCount;
+ pt->objectId = t->objectId;
+ pt->ecc = 0;
+ pt->deleted = (t->chunkDeleted) ? 0 : 1;
+ pt->unusedStuff = 0;
+ pt->shouldBeFF = 0xFFFFFFFF;
+
+}
+
+void yaffs_UnpackTags1(yaffs_ExtendedTags * t, const yaffs_PackedTags1 * pt)
+{
+ static const __u8 allFF[] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff };
+
+ if (memcmp(allFF, pt, sizeof(yaffs_PackedTags1))) {
+ t->blockBad = 0;
+ if (pt->shouldBeFF != 0xFFFFFFFF) {
+ t->blockBad = 1;
+ }
+ t->chunkUsed = 1;
+ t->objectId = pt->objectId;
+ t->chunkId = pt->chunkId;
+ t->byteCount = pt->byteCount;
+ t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+ t->chunkDeleted = (pt->deleted) ? 0 : 1;
+ t->serialNumber = pt->serialNumber;
+ } else {
+ memset(t, 0, sizeof(yaffs_ExtendedTags));
+
+ }
+}
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_packedtags1.h b/target/linux/generic/files/fs/yaffs2/yaffs_packedtags1.h
new file mode 100644
index 000000000..776c5c256
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,37 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+typedef struct {
+ unsigned chunkId:20;
+ unsigned serialNumber:2;
+ unsigned byteCount:10;
+ unsigned objectId:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unusedStuff:1;
+ unsigned shouldBeFF;
+
+} yaffs_PackedTags1;
+
+void yaffs_PackTags1(yaffs_PackedTags1 * pt, const yaffs_ExtendedTags * t);
+void yaffs_UnpackTags1(yaffs_ExtendedTags * t, const yaffs_PackedTags1 * pt);
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_packedtags2.c b/target/linux/generic/files/fs/yaffs2/yaffs_packedtags2.c
new file mode 100644
index 000000000..e420f95da
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,182 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_tagsvalidity.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunkId */
+
+#define EXTRA_HEADER_INFO_FLAG 0x80000000
+#define EXTRA_SHRINK_FLAG 0x40000000
+#define EXTRA_SHADOWS_FLAG 0x20000000
+#define EXTRA_SPARE_FLAGS 0x10000000
+
+#define ALL_EXTRA_FLAGS 0xF0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 * pt)
+{
+ T(YAFFS_TRACE_MTD,
+ (TSTR("packed tags obj %d chunk %d byte %d seq %d" TENDSTR),
+ pt->t.objectId, pt->t.chunkId, pt->t.byteCount,
+ pt->t.sequenceNumber));
+}
+
+static void yaffs_DumpTags2(const yaffs_ExtendedTags * t)
+{
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte "
+ "%d del %d ser %d seq %d"
+ TENDSTR), t->eccResult, t->blockBad, t->chunkUsed, t->objectId,
+ t->chunkId, t->byteCount, t->chunkDeleted, t->serialNumber,
+ t->sequenceNumber));
+
+}
+
+void yaffs_PackTags2(yaffs_PackedTags2 * pt, const yaffs_ExtendedTags * t)
+{
+ pt->t.chunkId = t->chunkId;
+ pt->t.sequenceNumber = t->sequenceNumber;
+ pt->t.byteCount = t->byteCount;
+ pt->t.objectId = t->objectId;
+
+ if (t->chunkId == 0 && t->extraHeaderInfoAvailable) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunkId */
+ pt->t.chunkId = EXTRA_HEADER_INFO_FLAG
+ | t->extraParentObjectId;
+ if (t->extraIsShrinkHeader) {
+ pt->t.chunkId |= EXTRA_SHRINK_FLAG;
+ }
+ if (t->extraShadows) {
+ pt->t.chunkId |= EXTRA_SHADOWS_FLAG;
+ }
+
+ pt->t.objectId &= ~EXTRA_OBJECT_TYPE_MASK;
+ pt->t.objectId |=
+ (t->extraObjectType << EXTRA_OBJECT_TYPE_SHIFT);
+
+ if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ pt->t.byteCount = t->extraEquivalentObjectId;
+ } else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE) {
+ pt->t.byteCount = t->extraFileLength;
+ } else {
+ pt->t.byteCount = 0;
+ }
+ }
+
+ yaffs_DumpPackedTags2(pt);
+ yaffs_DumpTags2(t);
+
+#ifndef YAFFS_IGNORE_TAGS_ECC
+ {
+ yaffs_ECCCalculateOther((unsigned char *)&pt->t,
+ sizeof(yaffs_PackedTags2TagsPart),
+ &pt->ecc);
+ }
+#endif
+}
+
+void yaffs_UnpackTags2(yaffs_ExtendedTags * t, yaffs_PackedTags2 * pt)
+{
+
+ memset(t, 0, sizeof(yaffs_ExtendedTags));
+
+ yaffs_InitialiseTags(t);
+
+ if (pt->t.sequenceNumber != 0xFFFFFFFF) {
+ /* Page is in use */
+#ifdef YAFFS_IGNORE_TAGS_ECC
+ {
+ t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+#else
+ {
+ yaffs_ECCOther ecc;
+ int result;
+ yaffs_ECCCalculateOther((unsigned char *)&pt->t,
+ sizeof
+ (yaffs_PackedTags2TagsPart),
+ &ecc);
+ result =
+ yaffs_ECCCorrectOther((unsigned char *)&pt->t,
+ sizeof
+ (yaffs_PackedTags2TagsPart),
+ &pt->ecc, &ecc);
+ switch(result){
+ case 0:
+ t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+ t->eccResult = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+ t->eccResult = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+ t->eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
+#endif
+ t->blockBad = 0;
+ t->chunkUsed = 1;
+ t->objectId = pt->t.objectId;
+ t->chunkId = pt->t.chunkId;
+ t->byteCount = pt->t.byteCount;
+ t->chunkDeleted = 0;
+ t->serialNumber = 0;
+ t->sequenceNumber = pt->t.sequenceNumber;
+
+ /* Do extra header info stuff */
+
+ if (pt->t.chunkId & EXTRA_HEADER_INFO_FLAG) {
+ t->chunkId = 0;
+ t->byteCount = 0;
+
+ t->extraHeaderInfoAvailable = 1;
+ t->extraParentObjectId =
+ pt->t.chunkId & (~(ALL_EXTRA_FLAGS));
+ t->extraIsShrinkHeader =
+ (pt->t.chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
+ t->extraShadows =
+ (pt->t.chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+ t->extraObjectType =
+ pt->t.objectId >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
+
+ if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ t->extraEquivalentObjectId = pt->t.byteCount;
+ } else {
+ t->extraFileLength = pt->t.byteCount;
+ }
+ }
+ }
+
+ yaffs_DumpPackedTags2(pt);
+ yaffs_DumpTags2(t);
+
+}
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_packedtags2.h b/target/linux/generic/files/fs/yaffs2/yaffs_packedtags2.h
new file mode 100644
index 000000000..c2242ffe7
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,38 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+typedef struct {
+ unsigned sequenceNumber;
+ unsigned objectId;
+ unsigned chunkId;
+ unsigned byteCount;
+} yaffs_PackedTags2TagsPart;
+
+typedef struct {
+ yaffs_PackedTags2TagsPart t;
+ yaffs_ECCOther ecc;
+} yaffs_PackedTags2;
+
+void yaffs_PackTags2(yaffs_PackedTags2 * pt, const yaffs_ExtendedTags * t);
+void yaffs_UnpackTags2(yaffs_ExtendedTags * t, yaffs_PackedTags2 * pt);
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_qsort.c b/target/linux/generic/files/fs/yaffs2/yaffs_qsort.c
new file mode 100644
index 000000000..474be9cea
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_qsort.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "yportenv.h"
+//#include <linux/string.h>
+
+/*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+#define swapcode(TYPE, parmi, parmj, n) { \
+ long i = (n) / sizeof (TYPE); \
+ register TYPE *pi = (TYPE *) (parmi); \
+ register TYPE *pj = (TYPE *) (parmj); \
+ do { \
+ register TYPE t = *pi; \
+ *pi++ = *pj; \
+ *pj++ = t; \
+ } while (--i > 0); \
+}
+
+#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+ es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
+
+static __inline void
+swapfunc(char *a, char *b, int n, int swaptype)
+{
+ if (swaptype <= 1)
+ swapcode(long, a, b, n)
+ else
+ swapcode(char, a, b, n)
+}
+
+#define swap(a, b) \
+ if (swaptype == 0) { \
+ long t = *(long *)(a); \
+ *(long *)(a) = *(long *)(b); \
+ *(long *)(b) = t; \
+ } else \
+ swapfunc(a, b, es, swaptype)
+
+#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+static __inline char *
+med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *))
+{
+ return cmp(a, b) < 0 ?
+ (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a ))
+ :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c ));
+}
+
+#ifndef min
+#define min(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+void
+yaffs_qsort(void *aa, size_t n, size_t es,
+ int (*cmp)(const void *, const void *))
+{
+ char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+ int d, r, swaptype, swap_cnt;
+ register char *a = aa;
+
+loop: SWAPINIT(a, es);
+ swap_cnt = 0;
+ if (n < 7) {
+ for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es)
+ for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+ pl -= es)
+ swap(pl, pl - es);
+ return;
+ }
+ pm = (char *)a + (n / 2) * es;
+ if (n > 7) {
+ pl = (char *)a;
+ pn = (char *)a + (n - 1) * es;
+ if (n > 40) {
+ d = (n / 8) * es;
+ pl = med3(pl, pl + d, pl + 2 * d, cmp);
+ pm = med3(pm - d, pm, pm + d, cmp);
+ pn = med3(pn - 2 * d, pn - d, pn, cmp);
+ }
+ pm = med3(pl, pm, pn, cmp);
+ }
+ swap(a, pm);
+ pa = pb = (char *)a + es;
+
+ pc = pd = (char *)a + (n - 1) * es;
+ for (;;) {
+ while (pb <= pc && (r = cmp(pb, a)) <= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+ swap(pa, pb);
+ pa += es;
+ }
+ pb += es;
+ }
+ while (pb <= pc && (r = cmp(pc, a)) >= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+ swap(pc, pd);
+ pd -= es;
+ }
+ pc -= es;
+ }
+ if (pb > pc)
+ break;
+ swap(pb, pc);
+ swap_cnt = 1;
+ pb += es;
+ pc -= es;
+ }
+ if (swap_cnt == 0) { /* Switch to insertion sort */
+ for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
+ for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+ pl -= es)
+ swap(pl, pl - es);
+ return;
+ }
+
+ pn = (char *)a + n * es;
+ r = min(pa - (char *)a, pb - pa);
+ vecswap(a, pb - r, r);
+ r = min((long)(pd - pc), (long)(pn - pd - es));
+ vecswap(pb, pn - r, r);
+ if ((r = pb - pa) > es)
+ yaffs_qsort(a, r / es, es, cmp);
+ if ((r = pd - pc) > es) {
+ /* Iterate rather than recurse to save stack space */
+ a = pn - r;
+ n = r / es;
+ goto loop;
+ }
+/* yaffs_qsort(pn - r, r / es, es, cmp);*/
+}
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_qsort.h b/target/linux/generic/files/fs/yaffs2/yaffs_qsort.h
new file mode 100644
index 000000000..610b7ec84
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_qsort.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+
+#ifndef __YAFFS_QSORT_H__
+#define __YAFFS_QSORT_H__
+
+extern void yaffs_qsort (void *const base, size_t total_elems, size_t size,
+ int (*cmp)(const void *, const void *));
+
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_tagscompat.c b/target/linux/generic/files/fs/yaffs2/yaffs_tagscompat.c
new file mode 100644
index 000000000..7622b1af7
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,530 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+
+static void yaffs_HandleReadDataError(yaffs_Device * dev, int chunkInNAND);
+#ifdef NOTYET
+static void yaffs_CheckWrittenBlock(yaffs_Device * dev, int chunkInNAND);
+static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data,
+ const yaffs_Spare * spare);
+static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+ const yaffs_Spare * spare);
+static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND);
+#endif
+
+static const char yaffs_countBitsTable[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
+};
+
+int yaffs_CountBits(__u8 x)
+{
+ int retVal;
+ retVal = yaffs_countBitsTable[x];
+ return retVal;
+}
+
+/********** Tags ECC calculations *********/
+
+void yaffs_CalcECC(const __u8 * data, yaffs_Spare * spare)
+{
+ yaffs_ECCCalculate(data, spare->ecc1);
+ yaffs_ECCCalculate(&data[256], spare->ecc2);
+}
+
+void yaffs_CalcTagsECC(yaffs_Tags * tags)
+{
+ /* Calculate an ecc */
+
+ unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+
+ tags->ecc = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+ if (b[i] & j) {
+ ecc ^= bit;
+ }
+ }
+ }
+
+ tags->ecc = ecc;
+
+}
+
+int yaffs_CheckECCOnTags(yaffs_Tags * tags)
+{
+ unsigned ecc = tags->ecc;
+
+ yaffs_CalcTagsECC(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+ unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+ yaffs_CalcTagsECC(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+ /* Wierd ecc failure value */
+ /* TODO Need to do somethiong here */
+ return -1; /* unrecovered error */
+ }
+
+ return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_LoadTagsIntoSpare(yaffs_Spare * sparePtr,
+ yaffs_Tags * tagsPtr)
+{
+ yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
+
+ yaffs_CalcTagsECC(tagsPtr);
+
+ sparePtr->tagByte0 = tu->asBytes[0];
+ sparePtr->tagByte1 = tu->asBytes[1];
+ sparePtr->tagByte2 = tu->asBytes[2];
+ sparePtr->tagByte3 = tu->asBytes[3];
+ sparePtr->tagByte4 = tu->asBytes[4];
+ sparePtr->tagByte5 = tu->asBytes[5];
+ sparePtr->tagByte6 = tu->asBytes[6];
+ sparePtr->tagByte7 = tu->asBytes[7];
+}
+
+static void yaffs_GetTagsFromSpare(yaffs_Device * dev, yaffs_Spare * sparePtr,
+ yaffs_Tags * tagsPtr)
+{
+ yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
+ int result;
+
+ tu->asBytes[0] = sparePtr->tagByte0;
+ tu->asBytes[1] = sparePtr->tagByte1;
+ tu->asBytes[2] = sparePtr->tagByte2;
+ tu->asBytes[3] = sparePtr->tagByte3;
+ tu->asBytes[4] = sparePtr->tagByte4;
+ tu->asBytes[5] = sparePtr->tagByte5;
+ tu->asBytes[6] = sparePtr->tagByte6;
+ tu->asBytes[7] = sparePtr->tagByte7;
+
+ result = yaffs_CheckECCOnTags(tagsPtr);
+ if (result > 0) {
+ dev->tagsEccFixed++;
+ } else if (result < 0) {
+ dev->tagsEccUnfixed++;
+ }
+}
+
+static void yaffs_SpareInitialise(yaffs_Spare * spare)
+{
+ memset(spare, 0xFF, sizeof(yaffs_Spare));
+}
+
+static int yaffs_WriteChunkToNAND(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND, const __u8 * data,
+ yaffs_Spare * spare)
+{
+ if (chunkInNAND < dev->startBlock * dev->nChunksPerBlock) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("**>> yaffs chunk %d is not valid" TENDSTR),
+ chunkInNAND));
+ return YAFFS_FAIL;
+ }
+
+ dev->nPageWrites++;
+ return dev->writeChunkToNAND(dev, chunkInNAND, data, spare);
+}
+
+static int yaffs_ReadChunkFromNAND(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND,
+ __u8 * data,
+ yaffs_Spare * spare,
+ yaffs_ECCResult * eccResult,
+ int doErrorCorrection)
+{
+ int retVal;
+ yaffs_Spare localSpare;
+
+ dev->nPageReads++;
+
+ if (!spare && data) {
+ /* If we don't have a real spare, then we use a local one. */
+ /* Need this for the calculation of the ecc */
+ spare = &localSpare;
+ }
+
+ if (!dev->useNANDECC) {
+ retVal = dev->readChunkFromNAND(dev, chunkInNAND, data, spare);
+ if (data && doErrorCorrection) {
+ /* Do ECC correction */
+ /* Todo handle any errors */
+ int eccResult1, eccResult2;
+ __u8 calcEcc[3];
+
+ yaffs_ECCCalculate(data, calcEcc);
+ eccResult1 =
+ yaffs_ECCCorrect(data, spare->ecc1, calcEcc);
+ yaffs_ECCCalculate(&data[256], calcEcc);
+ eccResult2 =
+ yaffs_ECCCorrect(&data[256], spare->ecc2, calcEcc);
+
+ if (eccResult1 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error fix performed on chunk %d:0"
+ TENDSTR), chunkInNAND));
+ dev->eccFixed++;
+ } else if (eccResult1 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error unfixed on chunk %d:0"
+ TENDSTR), chunkInNAND));
+ dev->eccUnfixed++;
+ }
+
+ if (eccResult2 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error fix performed on chunk %d:1"
+ TENDSTR), chunkInNAND));
+ dev->eccFixed++;
+ } else if (eccResult2 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error unfixed on chunk %d:1"
+ TENDSTR), chunkInNAND));
+ dev->eccUnfixed++;
+ }
+
+ if (eccResult1 || eccResult2) {
+ /* We had a data problem on this page */
+ yaffs_HandleReadDataError(dev, chunkInNAND);
+ }
+
+ if (eccResult1 < 0 || eccResult2 < 0)
+ *eccResult = YAFFS_ECC_RESULT_UNFIXED;
+ else if (eccResult1 > 0 || eccResult2 > 0)
+ *eccResult = YAFFS_ECC_RESULT_FIXED;
+ else
+ *eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+ } else {
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+ struct yaffs_NANDSpare nspare;
+ retVal =
+ dev->readChunkFromNAND(dev, chunkInNAND, data,
+ (yaffs_Spare *) & nspare);
+ memcpy(spare, &nspare, sizeof(yaffs_Spare));
+ if (data && doErrorCorrection) {
+ if (nspare.eccres1 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error fix performed on chunk %d:0"
+ TENDSTR), chunkInNAND));
+ } else if (nspare.eccres1 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error unfixed on chunk %d:0"
+ TENDSTR), chunkInNAND));
+ }
+
+ if (nspare.eccres2 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error fix performed on chunk %d:1"
+ TENDSTR), chunkInNAND));
+ } else if (nspare.eccres2 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error unfixed on chunk %d:1"
+ TENDSTR), chunkInNAND));
+ }
+
+ if (nspare.eccres1 || nspare.eccres2) {
+ /* We had a data problem on this page */
+ yaffs_HandleReadDataError(dev, chunkInNAND);
+ }
+
+ if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
+ *eccResult = YAFFS_ECC_RESULT_UNFIXED;
+ else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
+ *eccResult = YAFFS_ECC_RESULT_FIXED;
+ else
+ *eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+
+ }
+ }
+ return retVal;
+}
+
+#ifdef NOTYET
+static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND)
+{
+
+ static int init = 0;
+ static __u8 cmpbuf[YAFFS_BYTES_PER_CHUNK];
+ static __u8 data[YAFFS_BYTES_PER_CHUNK];
+ /* Might as well always allocate the larger size for */
+ /* dev->useNANDECC == true; */
+ static __u8 spare[sizeof(struct yaffs_NANDSpare)];
+
+ dev->readChunkFromNAND(dev, chunkInNAND, data, (yaffs_Spare *) spare);
+
+ if (!init) {
+ memset(cmpbuf, 0xff, YAFFS_BYTES_PER_CHUNK);
+ init = 1;
+ }
+
+ if (memcmp(cmpbuf, data, YAFFS_BYTES_PER_CHUNK))
+ return YAFFS_FAIL;
+ if (memcmp(cmpbuf, spare, 16))
+ return YAFFS_FAIL;
+
+ return YAFFS_OK;
+
+}
+#endif
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_HandleReadDataError(yaffs_Device * dev, int chunkInNAND)
+{
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+
+ /* Mark the block for retirement */
+ yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>>Block %d marked for retirement" TENDSTR), blockInNAND));
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+ * then retire the block
+ * NB recursion
+ */
+}
+
+#ifdef NOTYET
+static void yaffs_CheckWrittenBlock(yaffs_Device * dev, int chunkInNAND)
+{
+}
+
+static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+ const __u8 * data,
+ const yaffs_Spare * spare)
+{
+}
+
+static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+ const yaffs_Spare * spare)
+{
+}
+
+static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND)
+{
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+
+ /* Mark the block for retirement */
+ yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
+ /* Delete the chunk */
+ yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+}
+
+static int yaffs_VerifyCompare(const __u8 * d0, const __u8 * d1,
+ const yaffs_Spare * s0, const yaffs_Spare * s1)
+{
+
+ if (memcmp(d0, d1, YAFFS_BYTES_PER_CHUNK) != 0 ||
+ s0->tagByte0 != s1->tagByte0 ||
+ s0->tagByte1 != s1->tagByte1 ||
+ s0->tagByte2 != s1->tagByte2 ||
+ s0->tagByte3 != s1->tagByte3 ||
+ s0->tagByte4 != s1->tagByte4 ||
+ s0->tagByte5 != s1->tagByte5 ||
+ s0->tagByte6 != s1->tagByte6 ||
+ s0->tagByte7 != s1->tagByte7 ||
+ s0->ecc1[0] != s1->ecc1[0] ||
+ s0->ecc1[1] != s1->ecc1[1] ||
+ s0->ecc1[2] != s1->ecc1[2] ||
+ s0->ecc2[0] != s1->ecc2[0] ||
+ s0->ecc2[1] != s1->ecc2[1] || s0->ecc2[2] != s1->ecc2[2]) {
+ return 0;
+ }
+
+ return 1;
+}
+#endif /* NOTYET */
+
+int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device * dev,
+ int chunkInNAND,
+ const __u8 * data,
+ const yaffs_ExtendedTags *
+ eTags)
+{
+ yaffs_Spare spare;
+ yaffs_Tags tags;
+
+ yaffs_SpareInitialise(&spare);
+
+ if (eTags->chunkDeleted) {
+ spare.pageStatus = 0;
+ } else {
+ tags.objectId = eTags->objectId;
+ tags.chunkId = eTags->chunkId;
+ tags.byteCount = eTags->byteCount;
+ tags.serialNumber = eTags->serialNumber;
+
+ if (!dev->useNANDECC && data) {
+ yaffs_CalcECC(data, &spare);
+ }
+ yaffs_LoadTagsIntoSpare(&spare, &tags);
+
+ }
+
+ return yaffs_WriteChunkToNAND(dev, chunkInNAND, data, &spare);
+}
+
+int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device * dev,
+ int chunkInNAND,
+ __u8 * data,
+ yaffs_ExtendedTags * eTags)
+{
+
+ yaffs_Spare spare;
+ yaffs_Tags tags;
+ yaffs_ECCResult eccResult;
+
+ static yaffs_Spare spareFF;
+ static int init;
+
+ if (!init) {
+ memset(&spareFF, 0xFF, sizeof(spareFF));
+ init = 1;
+ }
+
+ if (yaffs_ReadChunkFromNAND
+ (dev, chunkInNAND, data, &spare, &eccResult, 1)) {
+ /* eTags may be NULL */
+ if (eTags) {
+
+ int deleted =
+ (yaffs_CountBits(spare.pageStatus) < 7) ? 1 : 0;
+
+ eTags->chunkDeleted = deleted;
+ eTags->eccResult = eccResult;
+ eTags->blockBad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+ eTags->chunkUsed =
+ (memcmp(&spareFF, &spare, sizeof(spareFF)) !=
+ 0) ? 1 : 0;
+
+ if (eTags->chunkUsed) {
+ yaffs_GetTagsFromSpare(dev, &spare, &tags);
+
+ eTags->objectId = tags.objectId;
+ eTags->chunkId = tags.chunkId;
+ eTags->byteCount = tags.byteCount;
+ eTags->serialNumber = tags.serialNumber;
+ }
+ }
+
+ return YAFFS_OK;
+ } else {
+ return YAFFS_FAIL;
+ }
+}
+
+int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
+ int blockInNAND)
+{
+
+ yaffs_Spare spare;
+
+ memset(&spare, 0xff, sizeof(yaffs_Spare));
+
+ spare.blockStatus = 'Y';
+
+ yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock, NULL,
+ &spare);
+ yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+ int blockNo, yaffs_BlockState *
+ state,
+ int *sequenceNumber)
+{
+
+ yaffs_Spare spare0, spare1;
+ static yaffs_Spare spareFF;
+ static int init;
+ yaffs_ECCResult dummy;
+
+ if (!init) {
+ memset(&spareFF, 0xFF, sizeof(spareFF));
+ init = 1;
+ }
+
+ *sequenceNumber = 0;
+
+ yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock, NULL,
+ &spare0, &dummy, 1);
+ yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock + 1, NULL,
+ &spare1, &dummy, 1);
+
+ if (yaffs_CountBits(spare0.blockStatus & spare1.blockStatus) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spareFF, &spare0, sizeof(spareFF)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ else
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+
+ return YAFFS_OK;
+}
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_tagscompat.h b/target/linux/generic/files/fs/yaffs2/yaffs_tagscompat.h
new file mode 100644
index 000000000..a61e3ba14
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,40 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+#include "yaffs_guts.h"
+int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device * dev,
+ int chunkInNAND,
+ const __u8 * data,
+ const yaffs_ExtendedTags *
+ tags);
+int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device * dev,
+ int chunkInNAND,
+ __u8 * data,
+ yaffs_ExtendedTags *
+ tags);
+int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
+ int blockNo);
+int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+ int blockNo, yaffs_BlockState *
+ state, int *sequenceNumber);
+
+void yaffs_CalcTagsECC(yaffs_Tags * tags);
+int yaffs_CheckECCOnTags(yaffs_Tags * tags);
+int yaffs_CountBits(__u8 byte);
+
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_tagsvalidity.c b/target/linux/generic/files/fs/yaffs2/yaffs_tagsvalidity.c
new file mode 100644
index 000000000..9e0bd1cf5
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_tagsvalidity.c
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_tagsvalidity.h"
+
+void yaffs_InitialiseTags(yaffs_ExtendedTags * tags)
+{
+ memset(tags, 0, sizeof(yaffs_ExtendedTags));
+ tags->validMarker0 = 0xAAAAAAAA;
+ tags->validMarker1 = 0x55555555;
+}
+
+int yaffs_ValidateTags(yaffs_ExtendedTags * tags)
+{
+ return (tags->validMarker0 == 0xAAAAAAAA &&
+ tags->validMarker1 == 0x55555555);
+
+}
diff --git a/target/linux/generic/files/fs/yaffs2/yaffs_tagsvalidity.h b/target/linux/generic/files/fs/yaffs2/yaffs_tagsvalidity.h
new file mode 100644
index 000000000..2fd0c24ed
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffs_tagsvalidity.h
@@ -0,0 +1,24 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+
+#ifndef __YAFFS_TAGS_VALIDITY_H__
+#define __YAFFS_TAGS_VALIDITY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_InitialiseTags(yaffs_ExtendedTags * tags);
+int yaffs_ValidateTags(yaffs_ExtendedTags * tags);
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yaffsinterface.h b/target/linux/generic/files/fs/yaffs2/yaffsinterface.h
new file mode 100644
index 000000000..810837a32
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yaffsinterface.h
@@ -0,0 +1,21 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFSINTERFACE_H__
+#define __YAFFSINTERFACE_H__
+
+int yaffs_Initialise(unsigned nBlocks);
+
+#endif
diff --git a/target/linux/generic/files/fs/yaffs2/yportenv.h b/target/linux/generic/files/fs/yaffs2/yportenv.h
new file mode 100644
index 000000000..15ac28121
--- /dev/null
+++ b/target/linux/generic/files/fs/yaffs2/yportenv.h
@@ -0,0 +1,187 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+
+#ifndef __YPORTENV_H__
+#define __YPORTENV_H__
+
+#if defined CONFIG_YAFFS_WINCE
+
+#include "ywinceenv.h"
+
+#elif defined __KERNEL__
+
+#include "moduleconfig.h"
+
+/* Linux kernel */
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+#include <linux/config.h>
+#endif
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+#define yaffs_strcpy(a,b) strcpy(a,b)
+#define yaffs_strncpy(a,b,c) strncpy(a,b,c)
+#define yaffs_strncmp(a,b,c) strncmp(a,b,c)
+#define yaffs_strlen(s) strlen(s)
+#define yaffs_sprintf sprintf
+#define yaffs_toupper(a) toupper(a)
+
+#define Y_INLINE inline
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+/* #define YPRINTF(x) printk x */
+#define YMALLOC(x) kmalloc(x,GFP_KERNEL)
+#define YFREE(x) kfree(x)
+#define YMALLOC_ALT(x) vmalloc(x)
+#define YFREE_ALT(x) vfree(x)
+#define YMALLOC_DMA(x) YMALLOC(x)
+
+// KR - added for use in scan so processes aren't blocked indefinitely.
+#define YYIELD() schedule()
+
+#define YAFFS_ROOT_MODE 0666
+#define YAFFS_LOSTNFOUND_MODE 0666
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+#define Y_TIME_CONVERT(x) (x).tv_sec
+#else
+#define Y_CURRENT_TIME CURRENT_TIME
+#define Y_TIME_CONVERT(x) (x)
+#endif
+
+#define yaffs_SumCompare(x,y) ((x) == (y))
+#define yaffs_strcmp(a,b) strcmp(a,b)
+
+#define TENDSTR "\n"
+#define TSTR(x) KERN_WARNING x
+#define TOUT(p) printk p
+
+#define yaffs_trace(mask, fmt, args...) \
+ do { if ((mask) & (yaffs_traceMask|YAFFS_TRACE_ERROR)) \
+ printk(KERN_WARNING "yaffs: " fmt, ## args); \
+ } while (0)
+
+#define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+#elif defined CONFIG_YAFFS_DIRECT
+
+/* Direct interface */
+#include "ydirectenv.h"
+
+#elif defined CONFIG_YAFFS_UTIL
+
+/* Stuff for YAFFS utilities */
+
+#include "stdlib.h"
+#include "stdio.h"
+#include "string.h"
+
+#include "devextras.h"
+
+#define YMALLOC(x) malloc(x)
+#define YFREE(x) free(x)
+#define YMALLOC_ALT(x) malloc(x)
+#define YFREE_ALT(x) free(x)
+
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+#define yaffs_strcpy(a,b) strcpy(a,b)
+#define yaffs_strncpy(a,b,c) strncpy(a,b,c)
+#define yaffs_strlen(s) strlen(s)
+#define yaffs_sprintf sprintf
+#define yaffs_toupper(a) toupper(a)
+
+#define Y_INLINE inline
+
+/* #define YINFO(s) YPRINTF(( __FILE__ " %d %s\n",__LINE__,s)) */
+/* #define YALERT(s) YINFO(s) */
+
+#define TENDSTR "\n"
+#define TSTR(x) x
+#define TOUT(p) printf p
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+/* #define YPRINTF(x) printf x */
+
+#define YAFFS_ROOT_MODE 0666
+#define YAFFS_LOSTNFOUND_MODE 0666
+
+#define yaffs_SumCompare(x,y) ((x) == (y))
+#define yaffs_strcmp(a,b) strcmp(a,b)
+
+#else
+/* Should have specified a configuration type */
+#error Unknown configuration
+
+#endif
+
+/* see yaffs_fs.c */
+extern unsigned int yaffs_traceMask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS 0x00000002
+#define YAFFS_TRACE_ALLOCATE 0x00000004
+#define YAFFS_TRACE_SCAN 0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+#define YAFFS_TRACE_ERASE 0x00000020
+#define YAFFS_TRACE_GC 0x00000040
+#define YAFFS_TRACE_WRITE 0x00000080
+#define YAFFS_TRACE_TRACING 0x00000100
+#define YAFFS_TRACE_DELETION 0x00000200
+#define YAFFS_TRACE_BUFFERS 0x00000400
+#define YAFFS_TRACE_NANDACCESS 0x00000800
+#define YAFFS_TRACE_GC_DETAIL 0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+#define YAFFS_TRACE_MTD 0x00004000
+#define YAFFS_TRACE_CHECKPOINT 0x00008000
+
+#define YAFFS_TRACE_VERIFY 0x00010000
+#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+#define YAFFS_TRACE_VERIFY_ALL 0x000F0000
+
+
+#define YAFFS_TRACE_ERROR 0x40000000
+#define YAFFS_TRACE_BUG 0x80000000
+#define YAFFS_TRACE_ALWAYS 0xF0000000
+
+
+#define T(mask,p) do{ if((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p);} while(0)
+
+#ifndef CONFIG_YAFFS_WINCE
+#define YBUG() T(YAFFS_TRACE_BUG,(TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR),__LINE__))
+#endif
+
+#endif
diff --git a/target/linux/generic/files/include/linux/ar8216_platform.h b/target/linux/generic/files/include/linux/ar8216_platform.h
new file mode 100644
index 000000000..f5c2ef065
--- /dev/null
+++ b/target/linux/generic/files/include/linux/ar8216_platform.h
@@ -0,0 +1,81 @@
+/*
+ * AR8216 switch driver platform data
+ *
+ * Copyright (C) 2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef AR8216_PLATFORM_H
+#define AR8216_PLATFORM_H
+
+enum ar8327_pad_mode {
+ AR8327_PAD_NC = 0,
+ AR8327_PAD_MAC2MAC_MII,
+ AR8327_PAD_MAC2MAC_GMII,
+ AR8327_PAD_MAC_SGMII,
+ AR8327_PAD_MAC2PHY_MII,
+ AR8327_PAD_MAC2PHY_GMII,
+ AR8327_PAD_MAC_RGMII,
+ AR8327_PAD_PHY_GMII,
+ AR8327_PAD_PHY_RGMII,
+ AR8327_PAD_PHY_MII,
+};
+
+enum ar8327_clk_delay_sel {
+ AR8327_CLK_DELAY_SEL0 = 0,
+ AR8327_CLK_DELAY_SEL1,
+ AR8327_CLK_DELAY_SEL2,
+ AR8327_CLK_DELAY_SEL3,
+};
+
+struct ar8327_pad_cfg {
+ enum ar8327_pad_mode mode;
+ bool rxclk_sel;
+ bool txclk_sel;
+ bool pipe_rxclk_sel;
+ bool txclk_delay_en;
+ bool rxclk_delay_en;
+ enum ar8327_clk_delay_sel txclk_delay_sel;
+ enum ar8327_clk_delay_sel rxclk_delay_sel;
+};
+
+enum ar8327_port_speed {
+ AR8327_PORT_SPEED_10 = 0,
+ AR8327_PORT_SPEED_100,
+ AR8327_PORT_SPEED_1000,
+};
+
+struct ar8327_port_cfg {
+ int force_link:1;
+ enum ar8327_port_speed speed;
+ int txpause:1;
+ int rxpause:1;
+ int duplex:1;
+};
+
+struct ar8327_led_cfg {
+ u32 led_ctrl0;
+ u32 led_ctrl1;
+ u32 led_ctrl2;
+ u32 led_ctrl3;
+ bool open_drain;
+};
+
+struct ar8327_platform_data {
+ struct ar8327_pad_cfg *pad0_cfg;
+ struct ar8327_pad_cfg *pad5_cfg;
+ struct ar8327_pad_cfg *pad6_cfg;
+ struct ar8327_port_cfg cpuport_cfg;
+ struct ar8327_led_cfg *led_cfg;
+};
+
+#endif /* AR8216_PLATFORM_H */ \ No newline at end of file
diff --git a/target/linux/generic/files/include/linux/ath5k_platform.h b/target/linux/generic/files/include/linux/ath5k_platform.h
new file mode 100644
index 000000000..ec8522452
--- /dev/null
+++ b/target/linux/generic/files/include/linux/ath5k_platform.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (c) 2010 Daniel Golle <daniel.golle@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _LINUX_ATH5K_PLATFORM_H
+#define _LINUX_ATH5K_PLATFORM_H
+
+#define ATH5K_PLAT_EEP_MAX_WORDS 2048
+
+struct ath5k_platform_data {
+ u16 *eeprom_data;
+ u8 *macaddr;
+};
+
+#endif /* _LINUX_ATH5K_PLATFORM_H */
diff --git a/target/linux/generic/files/include/linux/ath9k_platform.h b/target/linux/generic/files/include/linux/ath9k_platform.h
new file mode 100644
index 000000000..15b913730
--- /dev/null
+++ b/target/linux/generic/files/include/linux/ath9k_platform.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _LINUX_ATH9K_PLATFORM_H
+#define _LINUX_ATH9K_PLATFORM_H
+
+#define ATH9K_PLAT_EEP_MAX_WORDS 2048
+
+struct ath9k_platform_data {
+ u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
+ u8 *macaddr;
+
+ int led_pin;
+ u32 gpio_mask;
+ u32 gpio_val;
+
+ bool endian_check;
+ bool is_clk_25mhz;
+ int (*get_mac_revision)(void);
+ int (*external_reset)(void);
+
+ int num_leds;
+ const struct gpio_led *leds;
+};
+
+#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/target/linux/generic/files/include/linux/glamo-engine.h b/target/linux/generic/files/include/linux/glamo-engine.h
new file mode 100644
index 000000000..516d45fa6
--- /dev/null
+++ b/target/linux/generic/files/include/linux/glamo-engine.h
@@ -0,0 +1,27 @@
+#ifndef __GLAMO_ENGINE_H
+#define __GLAMO_ENGINE_H
+
+enum glamo_engine {
+ GLAMO_ENGINE_CAPTURE = 0,
+ GLAMO_ENGINE_ISP = 1,
+ GLAMO_ENGINE_JPEG = 2,
+ GLAMO_ENGINE_MPEG_ENC = 3,
+ GLAMO_ENGINE_MPEG_DEC = 4,
+ GLAMO_ENGINE_LCD = 5,
+ GLAMO_ENGINE_CMDQ = 6,
+ GLAMO_ENGINE_2D = 7,
+ GLAMO_ENGINE_3D = 8,
+ GLAMO_ENGINE_MMC = 9,
+ GLAMO_ENGINE_MICROP0 = 10,
+ GLAMO_ENGINE_RISC = 11,
+ GLAMO_ENGINE_MICROP1_MPEG_ENC = 12,
+ GLAMO_ENGINE_MICROP1_MPEG_DEC = 13,
+#if 0
+ GLAMO_ENGINE_H264_DEC = 14,
+ GLAMO_ENGINE_RISC1 = 15,
+ GLAMO_ENGINE_SPI = 16,
+#endif
+ __NUM_GLAMO_ENGINES
+};
+
+#endif
diff --git a/target/linux/generic/files/include/linux/glamofb.h b/target/linux/generic/files/include/linux/glamofb.h
new file mode 100644
index 000000000..5f9fab5a4
--- /dev/null
+++ b/target/linux/generic/files/include/linux/glamofb.h
@@ -0,0 +1,35 @@
+#ifndef _LINUX_GLAMOFB_H
+#define _LINUX_GLAMOFB_H
+
+#include <linux/fb.h>
+
+#ifdef __KERNEL__
+
+struct glamo_core;
+struct glamofb_handle;
+
+struct glamo_fb_platform_data {
+ int width, height;
+
+ int num_modes;
+ struct fb_videomode *modes;
+
+ struct glamo_core *core;
+};
+
+int glamofb_cmd_mode(struct glamofb_handle *gfb, int on);
+int glamofb_cmd_write(struct glamofb_handle *gfb, u_int16_t val);
+
+#ifdef CONFIG_MFD_GLAMO
+void glamo_lcm_reset(struct platform_device *pdev, int level);
+#else
+#define glamo_lcm_reset(...) do {} while (0)
+#endif
+
+#endif
+
+#define GLAMOFB_ENGINE_ENABLE _IOW('F', 0x1, __u32)
+#define GLAMOFB_ENGINE_DISABLE _IOW('F', 0x2, __u32)
+#define GLAMOFB_ENGINE_RESET _IOW('F', 0x3, __u32)
+
+#endif
diff --git a/target/linux/generic/files/include/linux/gpio_buttons.h b/target/linux/generic/files/include/linux/gpio_buttons.h
new file mode 100644
index 000000000..f85b993ed
--- /dev/null
+++ b/target/linux/generic/files/include/linux/gpio_buttons.h
@@ -0,0 +1,33 @@
+/*
+ * Definitions for the GPIO buttons interface driver
+ *
+ * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was based on: /include/linux/gpio_keys.h
+ * The original gpio_keys.h seems not to have a license.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _GPIO_BUTTONS_H_
+#define _GPIO_BUTTONS_H_
+
+struct gpio_button {
+ int gpio; /* GPIO line number */
+ int active_low;
+ char *desc; /* button description */
+ int type; /* input event type (EV_KEY, EV_SW) */
+ int code; /* input event code (KEY_*, SW_*) */
+ int threshold; /* count threshold */
+};
+
+struct gpio_buttons_platform_data {
+ struct gpio_button *buttons;
+ int nbuttons; /* number of buttons */
+ int poll_interval; /* polling interval */
+};
+
+#endif /* _GPIO_BUTTONS_H_ */
diff --git a/target/linux/generic/files/include/linux/gpio_dev.h b/target/linux/generic/files/include/linux/gpio_dev.h
new file mode 100644
index 000000000..a2a4b51c7
--- /dev/null
+++ b/target/linux/generic/files/include/linux/gpio_dev.h
@@ -0,0 +1,42 @@
+#ifndef _GPIO_DEV_H__
+#define _GPIO_DEV_H__
+
+/*********************************************************************
+ *
+ * This Linux kernel header is expanded from the original driver
+ * (gpio_dev) by John Crispin. It provides an ioctl based interface to
+ * GPIO pins via the /dev/gpio char device and gpiolib within the kernel.
+ * The third argument to each ioctl is the GPIO pin number.
+ *
+ * This driver has been tested with lk 2.6.31 and works. The original
+ * driver fails quietly with this version. The protocol is now a bit
+ * different: the ioctl(fd, GPIO_REQUEST, <pin>) should be called
+ * after the open("/dev/gpio", O_RDWR) to determine if the <pin> is
+ * already in use. If the ioctl is successful (i.e. returns 0 for not
+ * in use) then the <pin> is claimed by this driver and
+ * ioctl(fd, GPIO_FREE, <pin>) should be called prior to close(fd) .
+ *
+ * See <kernel_source>/Documentation/gpio.txt
+ * Note that kernel designers prefer the use of the sysfs gpio interface.
+ * This char driver is easier to use from code and faster.
+ ********************************************************************/
+
+/* This header can be included in both the user and kernel spaces */
+/* The _IO macro is defined in sys/ioctl.h */
+
+#define IOC_GPIODEV_MAGIC 'B'
+
+#define GPIO_GET _IO(IOC_GPIODEV_MAGIC, 10)
+#define GPIO_SET _IO(IOC_GPIODEV_MAGIC, 11)
+#define GPIO_CLEAR _IO(IOC_GPIODEV_MAGIC, 12)
+#define GPIO_DIR_IN _IO(IOC_GPIODEV_MAGIC, 13)
+#define GPIO_DIR_OUT _IO(IOC_GPIODEV_MAGIC, 14)
+ /* Sets the direction out and clears the <pin> (low) */
+
+#define GPIO_DIR_HIGH _IO(IOC_GPIODEV_MAGIC, 15)
+ /* Sets the direction out and sets the <pin> (high) */
+#define GPIO_REQUEST _IO(IOC_GPIODEV_MAGIC, 16)
+#define GPIO_FREE _IO(IOC_GPIODEV_MAGIC, 17)
+#define GPIO_CAN_SLEEP _IO(IOC_GPIODEV_MAGIC, 18)
+
+#endif
diff --git a/target/linux/generic/files/include/linux/myloader.h b/target/linux/generic/files/include/linux/myloader.h
new file mode 100644
index 000000000..d89e415fb
--- /dev/null
+++ b/target/linux/generic/files/include/linux/myloader.h
@@ -0,0 +1,121 @@
+/*
+ * Compex's MyLoader specific definitions
+ *
+ * Copyright (C) 2006-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#ifndef _MYLOADER_H_
+#define _MYLOADER_H_
+
+/* Myloader specific magic numbers */
+#define MYLO_MAGIC_SYS_PARAMS 0x20021107
+#define MYLO_MAGIC_PARTITIONS 0x20021103
+#define MYLO_MAGIC_BOARD_PARAMS 0x20021103
+
+/* Vendor ID's (seems to be same as the PCI vendor ID's) */
+#define VENID_COMPEX 0x11F6
+
+/* Devices based on the ADM5120 */
+#define DEVID_COMPEX_NP27G 0x0078
+#define DEVID_COMPEX_NP28G 0x044C
+#define DEVID_COMPEX_NP28GHS 0x044E
+#define DEVID_COMPEX_WP54Gv1C 0x0514
+#define DEVID_COMPEX_WP54G 0x0515
+#define DEVID_COMPEX_WP54AG 0x0546
+#define DEVID_COMPEX_WPP54AG 0x0550
+#define DEVID_COMPEX_WPP54G 0x0555
+
+/* Devices based on the Atheros AR2317 */
+#define DEVID_COMPEX_NP25G 0x05E6
+#define DEVID_COMPEX_WPE53G 0x05DC
+
+/* Devices based on the Atheros AR71xx */
+#define DEVID_COMPEX_WP543 0x0640
+#define DEVID_COMPEX_WPE72 0x0672
+
+/* Devices based on the IXP422 */
+#define DEVID_COMPEX_WP18 0x047E
+#define DEVID_COMPEX_NP18A 0x0489
+
+/* Other devices */
+#define DEVID_COMPEX_NP26G8M 0x03E8
+#define DEVID_COMPEX_NP26G16M 0x03E9
+
+struct mylo_partition {
+ uint16_t flags; /* partition flags */
+ uint16_t type; /* type of the partition */
+ uint32_t addr; /* relative address of the partition from the
+ flash start */
+ uint32_t size; /* size of the partition in bytes */
+ uint32_t param; /* if this is the active partition, the
+ MyLoader load code to this address */
+};
+
+#define PARTITION_FLAG_ACTIVE 0x8000 /* this is the active partition,
+ * MyLoader loads firmware from here */
+#define PARTITION_FLAG_ISRAM 0x2000 /* FIXME: this is a RAM partition? */
+#define PARTIIION_FLAG_RAMLOAD 0x1000 /* FIXME: load this partition into the RAM? */
+#define PARTITION_FLAG_PRELOAD 0x0800 /* the partition data preloaded to RAM
+ * before decompression */
+#define PARTITION_FLAG_LZMA 0x0100 /* partition data compressed by LZMA */
+#define PARTITION_FLAG_HAVEHDR 0x0002 /* the partition data have a header */
+
+#define PARTITION_TYPE_FREE 0
+#define PARTITION_TYPE_USED 1
+
+#define MYLO_MAX_PARTITIONS 8 /* maximum number of partitions in the
+ partition table */
+
+struct mylo_partition_table {
+ uint32_t magic; /* must be MYLO_MAGIC_PARTITIONS */
+ uint32_t res0; /* unknown/unused */
+ uint32_t res1; /* unknown/unused */
+ uint32_t res2; /* unknown/unused */
+ struct mylo_partition partitions[MYLO_MAX_PARTITIONS];
+};
+
+struct mylo_partition_header {
+ uint32_t len; /* length of the partition data */
+ uint32_t crc; /* CRC value of the partition data */
+};
+
+struct mylo_system_params {
+ uint32_t magic; /* must be MYLO_MAGIC_SYS_PARAMS */
+ uint32_t res0;
+ uint32_t res1;
+ uint32_t mylo_ver;
+ uint16_t vid; /* Vendor ID */
+ uint16_t did; /* Device ID */
+ uint16_t svid; /* Sub Vendor ID */
+ uint16_t sdid; /* Sub Device ID */
+ uint32_t rev; /* device revision */
+ uint32_t fwhi;
+ uint32_t fwlo;
+ uint32_t tftp_addr;
+ uint32_t prog_start;
+ uint32_t flash_size; /* size of boot FLASH in bytes */
+ uint32_t dram_size; /* size of onboard RAM in bytes */
+};
+
+struct mylo_eth_addr {
+ uint8_t mac[6];
+ uint8_t csum[2];
+};
+
+#define MYLO_ETHADDR_COUNT 8 /* maximum number of ethernet address
+ in the board parameters */
+
+struct mylo_board_params {
+ uint32_t magic; /* must be MYLO_MAGIC_BOARD_PARAMS */
+ uint32_t res0;
+ uint32_t res1;
+ uint32_t res2;
+ struct mylo_eth_addr addr[MYLO_ETHADDR_COUNT];
+};
+
+#endif /* _MYLOADER_H_*/
diff --git a/target/linux/generic/files/include/linux/pwm/pwm.h b/target/linux/generic/files/include/linux/pwm/pwm.h
new file mode 100644
index 000000000..e01cca903
--- /dev/null
+++ b/target/linux/generic/files/include/linux/pwm/pwm.h
@@ -0,0 +1,165 @@
+/*
+ * include/linux/pwm.h
+ *
+ * Copyright (C) 2008 Bill Gatliff < bgat@billgatliff.com>
+ *
+ * This program is free software; you may redistribute and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+#ifndef __LINUX_PWM_H
+#define __LINUX_PWM_H
+
+enum {
+ PWM_CONFIG_DUTY_TICKS = BIT(0),
+ PWM_CONFIG_PERIOD_TICKS = BIT(1),
+ PWM_CONFIG_POLARITY = BIT(2),
+ PWM_CONFIG_START = BIT(3),
+ PWM_CONFIG_STOP = BIT(4),
+
+ PWM_CONFIG_HANDLER = BIT(5),
+
+ PWM_CONFIG_DUTY_NS = BIT(6),
+ PWM_CONFIG_DUTY_PERCENT = BIT(7),
+ PWM_CONFIG_PERIOD_NS = BIT(8),
+};
+
+struct pwm_channel;
+struct work_struct;
+
+typedef int (*pwm_handler_t)(struct pwm_channel *p, void *data);
+typedef void (*pwm_callback_t)(struct pwm_channel *p);
+
+struct pwm_channel_config {
+ int config_mask;
+ unsigned long duty_ticks;
+ unsigned long period_ticks;
+ int polarity;
+
+ pwm_handler_t handler;
+
+ unsigned long duty_ns;
+ unsigned long period_ns;
+ int duty_percent;
+};
+
+struct pwm_device {
+ struct list_head list;
+ spinlock_t list_lock;
+ struct device *dev;
+ struct module *owner;
+ struct pwm_channel *channels;
+
+ const char *bus_id;
+ int nchan;
+
+ int (*request) (struct pwm_channel *p);
+ void (*free) (struct pwm_channel *p);
+ int (*config) (struct pwm_channel *p,
+ struct pwm_channel_config *c);
+ int (*config_nosleep)(struct pwm_channel *p,
+ struct pwm_channel_config *c);
+ int (*synchronize) (struct pwm_channel *p,
+ struct pwm_channel *to_p);
+ int (*unsynchronize)(struct pwm_channel *p,
+ struct pwm_channel *from_p);
+ int (*set_callback) (struct pwm_channel *p,
+ pwm_callback_t callback);
+};
+
+int pwm_register(struct pwm_device *pwm);
+int pwm_unregister(struct pwm_device *pwm);
+
+enum {
+ FLAG_REQUESTED = 0,
+ FLAG_STOP = 1,
+};
+
+struct pwm_channel {
+ struct list_head list;
+ struct pwm_device *pwm;
+ const char *requester;
+ pid_t pid;
+ int chan;
+ unsigned long flags;
+ unsigned long tick_hz;
+
+ spinlock_t lock;
+ struct completion complete;
+
+ pwm_callback_t callback;
+
+ struct work_struct handler_work;
+ pwm_handler_t handler;
+ void *handler_data;
+
+ int active_high;
+ unsigned long period_ticks;
+ unsigned long duty_ticks;
+};
+
+struct gpio_pwm_platform_data {
+ int gpio;
+};
+
+struct pwm_channel *
+pwm_request(const char *bus_id, int chan,
+ const char *requester);
+
+void pwm_free(struct pwm_channel *pwm);
+
+int pwm_config_nosleep(struct pwm_channel *pwm,
+ struct pwm_channel_config *c);
+
+int pwm_config(struct pwm_channel *pwm,
+ struct pwm_channel_config *c);
+
+unsigned long pwm_ns_to_ticks(struct pwm_channel *pwm,
+ unsigned long nsecs);
+
+unsigned long pwm_ticks_to_ns(struct pwm_channel *pwm,
+ unsigned long ticks);
+
+int pwm_set_period_ns(struct pwm_channel *pwm,
+ unsigned long period_ns);
+
+unsigned long int pwm_get_period_ns(struct pwm_channel *pwm);
+
+int pwm_set_duty_ns(struct pwm_channel *pwm,
+ unsigned long duty_ns);
+
+int pwm_set_duty_percent(struct pwm_channel *pwm,
+ int percent);
+
+unsigned long pwm_get_duty_ns(struct pwm_channel *pwm);
+
+int pwm_set_polarity(struct pwm_channel *pwm,
+ int active_high);
+
+int pwm_start(struct pwm_channel *pwm);
+
+int pwm_stop(struct pwm_channel *pwm);
+
+int pwm_set_handler(struct pwm_channel *pwm,
+ pwm_handler_t handler,
+ void *data);
+
+int pwm_synchronize(struct pwm_channel *p,
+ struct pwm_channel *to_p);
+
+
+int pwm_unsynchronize(struct pwm_channel *p,
+ struct pwm_channel *from_p);
+
+
+#endif /* __LINUX_PWM_H */
diff --git a/target/linux/generic/files/include/linux/routerboot.h b/target/linux/generic/files/include/linux/routerboot.h
new file mode 100644
index 000000000..0244843d6
--- /dev/null
+++ b/target/linux/generic/files/include/linux/routerboot.h
@@ -0,0 +1,105 @@
+/*
+ * Mikrotik's RouterBOOT definitions
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ROUTERBOOT_H
+#define _ROUTERBOOT_H
+
+#define RB_MAC_SIZE 6
+
+/*
+ * Magic numbers
+ */
+#define RB_MAGIC_HARD 0x64726148 /* "Hard" */
+#define RB_MAGIC_SOFT 0x74666F53 /* "Soft" */
+#define RB_MAGIC_DAWN 0x6E776144 /* "Dawn" */
+
+#define RB_ID_TERMINATOR 0
+
+/*
+ * ID values for Hardware settings
+ */
+#define RB_ID_HARD_01 1
+#define RB_ID_HARD_02 2
+#define RB_ID_FLASH_INFO 3
+#define RB_ID_MAC_ADDRESS_PACK 4
+#define RB_ID_BOARD_NAME 5
+#define RB_ID_BIOS_VERSION 6
+#define RB_ID_HARD_07 7
+#define RB_ID_SDRAM_TIMINGS 8
+#define RB_ID_DEVICE_TIMINGS 9
+#define RB_ID_SOFTWARE_ID 10
+#define RB_ID_SERIAL_NUMBER 11
+#define RB_ID_HARD_12 12
+#define RB_ID_MEMORY_SIZE 13
+#define RB_ID_MAC_ADDRESS_COUNT 14
+#define RB_ID_WLAN_DATA 22
+
+/*
+ * ID values for Software settings
+ */
+#define RB_ID_UART_SPEED 1
+#define RB_ID_BOOT_DELAY 2
+#define RB_ID_BOOT_DEVICE 3
+#define RB_ID_BOOT_KEY 4
+#define RB_ID_CPU_MODE 5
+#define RB_ID_FW_VERSION 6
+#define RB_ID_SOFT_07 7
+#define RB_ID_SOFT_08 8
+#define RB_ID_BOOT_PROTOCOL 9
+#define RB_ID_SOFT_10 10
+#define RB_ID_SOFT_11 11
+
+/*
+ * UART_SPEED values
+ */
+#define RB_UART_SPEED_115200 0
+#define RB_UART_SPEED_57600 1
+#define RB_UART_SPEED_38400 2
+#define RB_UART_SPEED_19200 3
+#define RB_UART_SPEED_9600 4
+#define RB_UART_SPEED_4800 5
+#define RB_UART_SPEED_2400 6
+#define RB_UART_SPEED_1200 7
+
+/*
+ * BOOT_DELAY values
+ */
+#define RB_BOOT_DELAY_0SEC 0
+#define RB_BOOT_DELAY_1SEC 1
+#define RB_BOOT_DELAY_2SEC 2
+
+/*
+ * BOOT_DEVICE values
+ */
+#define RB_BOOT_DEVICE_ETHER 0
+#define RB_BOOT_DEVICE_NANDETH 1
+#define RB_BOOT_DEVICE_ETHONCE 2
+#define RB_BOOT_DEVICE_NANDONLY 3
+
+/*
+ * BOOT_KEY values
+ */
+#define RB_BOOT_KEY_ANY 0
+#define RB_BOOT_KEY_DEL 1
+
+/*
+ * CPU_MODE values
+ */
+#define RB_CPU_MODE_POWERSAVE 0
+#define RB_CPU_MODE_REGULAR 1
+
+/*
+ * BOOT_PROTOCOL values
+ */
+#define RB_BOOT_PROTOCOL_BOOTP 0
+#define RB_BOOT_PROTOCOL_DHCP 1
+
+#endif /* _ROUTERBOOT_H */
diff --git a/target/linux/generic/files/include/linux/rt2x00_platform.h b/target/linux/generic/files/include/linux/rt2x00_platform.h
new file mode 100644
index 000000000..e10377e21
--- /dev/null
+++ b/target/linux/generic/files/include/linux/rt2x00_platform.h
@@ -0,0 +1,23 @@
+/*
+ * Platform data definition for the rt2x00 driver
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#ifndef _RT2X00_PLATFORM_H
+#define _RT2X00_PLATFORM_H
+
+struct rt2x00_platform_data {
+ char *eeprom_file_name;
+ const u8 *mac_address;
+
+ int disable_2ghz;
+ int disable_5ghz;
+};
+
+#endif /* _RT2X00_PLATFORM_H */
diff --git a/target/linux/generic/files/include/linux/rtl8366.h b/target/linux/generic/files/include/linux/rtl8366.h
new file mode 100644
index 000000000..78daed220
--- /dev/null
+++ b/target/linux/generic/files/include/linux/rtl8366.h
@@ -0,0 +1,40 @@
+/*
+ * Platform data definition for the Realtek RTL8366RB/S ethernet switch driver
+ *
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RTL8366_H
+#define _RTL8366_H
+
+#define RTL8366_DRIVER_NAME "rtl8366"
+#define RTL8366S_DRIVER_NAME "rtl8366s"
+#define RTL8366RB_DRIVER_NAME "rtl8366rb"
+
+enum rtl8366_type {
+ RTL8366_TYPE_UNKNOWN,
+ RTL8366_TYPE_S,
+ RTL8366_TYPE_RB,
+};
+
+struct rtl8366_initval {
+ unsigned reg;
+ u16 val;
+};
+
+struct rtl8366_platform_data {
+ unsigned gpio_sda;
+ unsigned gpio_sck;
+ void (*hw_reset)(bool active);
+
+ unsigned num_initvals;
+ struct rtl8366_initval *initvals;
+};
+
+enum rtl8366_type rtl8366_smi_detect(struct rtl8366_platform_data *pdata);
+
+#endif /* _RTL8366_H */
diff --git a/target/linux/generic/files/include/linux/rtl8367.h b/target/linux/generic/files/include/linux/rtl8367.h
new file mode 100644
index 000000000..470c5f381
--- /dev/null
+++ b/target/linux/generic/files/include/linux/rtl8367.h
@@ -0,0 +1,59 @@
+/*
+ * Platform data definition for the Realtek RTL8367 ethernet switch driver
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RTL8367_H
+#define _RTL8367_H
+
+#define RTL8367_DRIVER_NAME "rtl8367"
+
+enum rtl8367_port_speed {
+ RTL8367_PORT_SPEED_10 = 0,
+ RTL8367_PORT_SPEED_100,
+ RTL8367_PORT_SPEED_1000,
+};
+
+struct rtl8367_port_ability {
+ int force_mode;
+ int nway;
+ int txpause;
+ int rxpause;
+ int link;
+ int duplex;
+ enum rtl8367_port_speed speed;
+};
+
+enum rtl8367_extif_mode {
+ RTL8367_EXTIF_MODE_DISABLED = 0,
+ RTL8367_EXTIF_MODE_RGMII,
+ RTL8367_EXTIF_MODE_MII_MAC,
+ RTL8367_EXTIF_MODE_MII_PHY,
+ RTL8367_EXTIF_MODE_TMII_MAC,
+ RTL8367_EXTIF_MODE_TMII_PHY,
+ RTL8367_EXTIF_MODE_GMII,
+ RTL8367_EXTIF_MODE_RGMII_33V,
+};
+
+struct rtl8367_extif_config {
+ unsigned int txdelay;
+ unsigned int rxdelay;
+ enum rtl8367_extif_mode mode;
+ struct rtl8367_port_ability ability;
+};
+
+struct rtl8367_platform_data {
+ unsigned gpio_sda;
+ unsigned gpio_sck;
+ void (*hw_reset)(bool active);
+
+ struct rtl8367_extif_config *extif0_cfg;
+ struct rtl8367_extif_config *extif1_cfg;
+};
+
+#endif /* _RTL8367_H */
diff --git a/target/linux/generic/files/include/linux/switch.h b/target/linux/generic/files/include/linux/switch.h
new file mode 100644
index 000000000..4f4085ef8
--- /dev/null
+++ b/target/linux/generic/files/include/linux/switch.h
@@ -0,0 +1,237 @@
+/*
+ * switch.h: Switch configuration API
+ *
+ * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_SWITCH_H
+#define __LINUX_SWITCH_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/genetlink.h>
+#ifndef __KERNEL__
+#include <netlink/netlink.h>
+#include <netlink/genl/genl.h>
+#include <netlink/genl/ctrl.h>
+#else
+#include <net/genetlink.h>
+#endif
+
+/* main attributes */
+enum {
+ SWITCH_ATTR_UNSPEC,
+ /* global */
+ SWITCH_ATTR_TYPE,
+ /* device */
+ SWITCH_ATTR_ID,
+ SWITCH_ATTR_DEV_NAME,
+ SWITCH_ATTR_ALIAS,
+ SWITCH_ATTR_NAME,
+ SWITCH_ATTR_VLANS,
+ SWITCH_ATTR_PORTS,
+ SWITCH_ATTR_CPU_PORT,
+ /* attributes */
+ SWITCH_ATTR_OP_ID,
+ SWITCH_ATTR_OP_TYPE,
+ SWITCH_ATTR_OP_NAME,
+ SWITCH_ATTR_OP_PORT,
+ SWITCH_ATTR_OP_VLAN,
+ SWITCH_ATTR_OP_VALUE_INT,
+ SWITCH_ATTR_OP_VALUE_STR,
+ SWITCH_ATTR_OP_VALUE_PORTS,
+ SWITCH_ATTR_OP_DESCRIPTION,
+ /* port lists */
+ SWITCH_ATTR_PORT,
+ SWITCH_ATTR_MAX
+};
+
+/* commands */
+enum {
+ SWITCH_CMD_UNSPEC,
+ SWITCH_CMD_GET_SWITCH,
+ SWITCH_CMD_NEW_ATTR,
+ SWITCH_CMD_LIST_GLOBAL,
+ SWITCH_CMD_GET_GLOBAL,
+ SWITCH_CMD_SET_GLOBAL,
+ SWITCH_CMD_LIST_PORT,
+ SWITCH_CMD_GET_PORT,
+ SWITCH_CMD_SET_PORT,
+ SWITCH_CMD_LIST_VLAN,
+ SWITCH_CMD_GET_VLAN,
+ SWITCH_CMD_SET_VLAN
+};
+
+/* data types */
+enum switch_val_type {
+ SWITCH_TYPE_UNSPEC,
+ SWITCH_TYPE_INT,
+ SWITCH_TYPE_STRING,
+ SWITCH_TYPE_PORTS,
+ SWITCH_TYPE_NOVAL,
+};
+
+/* port nested attributes */
+enum {
+ SWITCH_PORT_UNSPEC,
+ SWITCH_PORT_ID,
+ SWITCH_PORT_FLAG_TAGGED,
+ SWITCH_PORT_ATTR_MAX
+};
+
+#define SWITCH_ATTR_DEFAULTS_OFFSET 0x1000
+
+#ifdef __KERNEL__
+
+struct switch_dev;
+struct switch_op;
+struct switch_val;
+struct switch_attr;
+struct switch_attrlist;
+struct switch_led_trigger;
+
+int register_switch(struct switch_dev *dev, struct net_device *netdev);
+void unregister_switch(struct switch_dev *dev);
+
+/**
+ * struct switch_attrlist - attribute list
+ *
+ * @n_attr: number of attributes
+ * @attr: pointer to the attributes array
+ */
+struct switch_attrlist {
+ int n_attr;
+ const struct switch_attr *attr;
+};
+
+enum switch_port_speed {
+ SWITCH_PORT_SPEED_UNKNOWN = 0,
+ SWITCH_PORT_SPEED_10 = 10,
+ SWITCH_PORT_SPEED_100 = 100,
+ SWITCH_PORT_SPEED_1000 = 1000,
+};
+
+struct switch_port_link {
+ bool link;
+ bool duplex;
+ bool aneg;
+ bool tx_flow;
+ bool rx_flow;
+ enum switch_port_speed speed;
+};
+
+struct switch_port_stats {
+ unsigned long tx_bytes;
+ unsigned long rx_bytes;
+};
+
+/**
+ * struct switch_dev_ops - switch driver operations
+ *
+ * @attr_global: global switch attribute list
+ * @attr_port: port attribute list
+ * @attr_vlan: vlan attribute list
+ *
+ * Callbacks:
+ *
+ * @get_vlan_ports: read the port list of a VLAN
+ * @set_vlan_ports: set the port list of a VLAN
+ *
+ * @get_port_pvid: get the primary VLAN ID of a port
+ * @set_port_pvid: set the primary VLAN ID of a port
+ *
+ * @apply_config: apply all changed settings to the switch
+ * @reset_switch: resetting the switch
+ */
+struct switch_dev_ops {
+ struct switch_attrlist attr_global, attr_port, attr_vlan;
+
+ int (*get_vlan_ports)(struct switch_dev *dev, struct switch_val *val);
+ int (*set_vlan_ports)(struct switch_dev *dev, struct switch_val *val);
+
+ int (*get_port_pvid)(struct switch_dev *dev, int port, int *val);
+ int (*set_port_pvid)(struct switch_dev *dev, int port, int val);
+
+ int (*apply_config)(struct switch_dev *dev);
+ int (*reset_switch)(struct switch_dev *dev);
+
+ int (*get_port_link)(struct switch_dev *dev, int port,
+ struct switch_port_link *link);
+ int (*get_port_stats)(struct switch_dev *dev, int port,
+ struct switch_port_stats *stats);
+};
+
+struct switch_dev {
+ const struct switch_dev_ops *ops;
+ /* will be automatically filled */
+ char devname[IFNAMSIZ];
+
+ const char *name;
+ /* NB: either alias or netdev must be set */
+ const char *alias;
+ struct net_device *netdev;
+
+ int ports;
+ int vlans;
+ int cpu_port;
+
+ /* the following fields are internal for swconfig */
+ int id;
+ struct list_head dev_list;
+ unsigned long def_global, def_port, def_vlan;
+
+ struct mutex sw_mutex;
+ struct switch_port *portbuf;
+
+ char buf[128];
+
+#ifdef CONFIG_SWCONFIG_LEDS
+ struct switch_led_trigger *led_trigger;
+#endif
+};
+
+struct switch_port {
+ u32 id;
+ u32 flags;
+};
+
+struct switch_val {
+ const struct switch_attr *attr;
+ int port_vlan;
+ int len;
+ union {
+ const char *s;
+ u32 i;
+ struct switch_port *ports;
+ } value;
+};
+
+struct switch_attr {
+ int disabled;
+ int type;
+ const char *name;
+ const char *description;
+
+ int (*set)(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val);
+ int (*get)(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val);
+
+ /* for driver internal use */
+ int id;
+ int ofs;
+ int max;
+};
+
+#endif
+
+#endif
diff --git a/target/linux/generic/image/Makefile b/target/linux/generic/image/Makefile
new file mode 100644
index 000000000..e733e0a37
--- /dev/null
+++ b/target/linux/generic/image/Makefile
@@ -0,0 +1,12 @@
+#
+# Copyright (C) 2006-2010 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/image.mk
+
+# use default targets for everything
+
+$(eval $(call BuildImage))
diff --git a/target/linux/generic/image/initramfs-base-files.txt b/target/linux/generic/image/initramfs-base-files.txt
new file mode 100644
index 000000000..eda5d0d27
--- /dev/null
+++ b/target/linux/generic/image/initramfs-base-files.txt
@@ -0,0 +1,9 @@
+nod /dev/console 600 0 0 c 5 1
+nod /dev/null 666 0 0 c 1 3
+nod /dev/zero 666 0 0 c 1 5
+nod /dev/tty 666 0 0 c 5 0
+nod /dev/tty0 660 0 0 c 4 0
+nod /dev/tty1 660 0 0 c 4 1
+nod /dev/random 666 0 0 c 1 8
+nod /dev/urandom 666 0 0 c 1 9
+dir /dev/pts 755 0 0
diff --git a/target/linux/generic/image/lzma-loader/Makefile b/target/linux/generic/image/lzma-loader/Makefile
new file mode 100644
index 000000000..d75a4468b
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/Makefile
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2006 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME := loader
+PKG_VERSION := 0.05
+
+PKG_BUILD_DIR := $(KDIR)/$(PKG_NAME)-$(PKG_VERSION)$(LOADER_TYPE)
+
+$(PKG_BUILD_DIR)/.prepared:
+ mkdir $(PKG_BUILD_DIR)
+ $(CP) ./src/* $(PKG_BUILD_DIR)/
+ touch $@
+
+$(PKG_BUILD_DIR)/lzma.elf: $(PKG_BUILD_DIR)/.prepared $(PKG_BUILD_DIR)/vmlinux.lzma
+ PATH="$(TARGET_PATH)" $(MAKE) -C $(PKG_BUILD_DIR) \
+ CC="$(TARGET_CC)" CROSS_COMPILE="$(TARGET_CROSS)" \
+ RAMSIZE=$(RAMSIZE) \
+ LOADADDR=$(LOADADDR) \
+ KERNEL_ENTRY=$(KERNEL_ENTRY) \
+ IMAGE_COPY=$(IMAGE_COPY)
+
+
+$(PKG_BUILD_DIR)/vmlinux.lzma: $(KDIR)/vmlinux.lzma
+ $(CP) $< $@
+
+$(KDIR)/loader$(LOADER_TYPE).elf: $(PKG_BUILD_DIR)/lzma.elf
+ $(CP) $< $@
+
+$(KDIR)/loader$(LOADER_TYPE).bin: $(PKG_BUILD_DIR)/lzma.bin
+ $(CP) $< $@
+
+download:
+prepare: $(PKG_BUILD_DIR)/.prepared
+compile: $(KDIR)/loader$(LOADER_TYPE).elf $(KDIR)/loader$(LOADER_TYPE).bin
+install:
+
+clean:
+ rm -rf $(PKG_BUILD_DIR)
+ rm -f $(KDIR)/loader.elf
+ rm -f $(KDIR)/loader.bin
diff --git a/target/linux/generic/image/lzma-loader/src/LzmaDecode.c b/target/linux/generic/image/lzma-loader/src/LzmaDecode.c
new file mode 100644
index 000000000..c90a0d3ef
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/LzmaDecode.c
@@ -0,0 +1,590 @@
+/*
+ LzmaDecode.c
+ LZMA Decoder (optimized for Speed version)
+
+ LZMA SDK 4.22 Copyright (c) 1999-2005 Igor Pavlov (2005-06-10)
+ http://www.7-zip.org/
+
+ LZMA SDK is licensed under two licenses:
+ 1) GNU Lesser General Public License (GNU LGPL)
+ 2) Common Public License (CPL)
+ It means that you can select one of these two licenses and
+ follow rules of that license.
+
+ SPECIAL EXCEPTION:
+ Igor Pavlov, as the author of this Code, expressly permits you to
+ statically or dynamically link your Code (or bind by name) to the
+ interfaces of this file without subjecting your linked Code to the
+ terms of the CPL or GNU LGPL. Any modifications or additions
+ to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#include "LzmaDecode.h"
+
+#ifndef Byte
+#define Byte unsigned char
+#endif
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+#define RC_READ_BYTE (*Buffer++)
+
+#define RC_INIT2 Code = 0; Range = 0xFFFFFFFF; \
+ { int i; for(i = 0; i < 5; i++) { RC_TEST; Code = (Code << 8) | RC_READ_BYTE; }}
+
+#ifdef _LZMA_IN_CB
+
+#define RC_TEST { if (Buffer == BufferLim) \
+ { SizeT size; int result = InCallback->Read(InCallback, &Buffer, &size); if (result != LZMA_RESULT_OK) return result; \
+ BufferLim = Buffer + size; if (size == 0) return LZMA_RESULT_DATA_ERROR; }}
+
+#define RC_INIT Buffer = BufferLim = 0; RC_INIT2
+
+#else
+
+#define RC_TEST { if (Buffer == BufferLim) return LZMA_RESULT_DATA_ERROR; }
+
+#define RC_INIT(buffer, bufferSize) Buffer = buffer; BufferLim = buffer + bufferSize; RC_INIT2
+
+#endif
+
+#define RC_NORMALIZE if (Range < kTopValue) { RC_TEST; Range <<= 8; Code = (Code << 8) | RC_READ_BYTE; }
+
+#define IfBit0(p) RC_NORMALIZE; bound = (Range >> kNumBitModelTotalBits) * *(p); if (Code < bound)
+#define UpdateBit0(p) Range = bound; *(p) += (kBitModelTotal - *(p)) >> kNumMoveBits;
+#define UpdateBit1(p) Range -= bound; Code -= bound; *(p) -= (*(p)) >> kNumMoveBits;
+
+#define RC_GET_BIT2(p, mi, A0, A1) IfBit0(p) \
+ { UpdateBit0(p); mi <<= 1; A0; } else \
+ { UpdateBit1(p); mi = (mi + mi) + 1; A1; }
+
+#define RC_GET_BIT(p, mi) RC_GET_BIT2(p, mi, ; , ;)
+
+#define RangeDecoderBitTreeDecode(probs, numLevels, res) \
+ { int i = numLevels; res = 1; \
+ do { CProb *p = probs + res; RC_GET_BIT(p, res) } while(--i != 0); \
+ res -= (1 << numLevels); }
+
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
+
+
+#define kNumStates 12
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+#if 0
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size)
+{
+ unsigned char prop0;
+ if (size < LZMA_PROPERTIES_SIZE)
+ return LZMA_RESULT_DATA_ERROR;
+ prop0 = propsData[0];
+ if (prop0 >= (9 * 5 * 5))
+ return LZMA_RESULT_DATA_ERROR;
+ {
+ for (propsRes->pb = 0; prop0 >= (9 * 5); propsRes->pb++, prop0 -= (9 * 5));
+ for (propsRes->lp = 0; prop0 >= 9; propsRes->lp++, prop0 -= 9);
+ propsRes->lc = prop0;
+ /*
+ unsigned char remainder = (unsigned char)(prop0 / 9);
+ propsRes->lc = prop0 % 9;
+ propsRes->pb = remainder / 5;
+ propsRes->lp = remainder % 5;
+ */
+ }
+
+ #ifdef _LZMA_OUT_READ
+ {
+ int i;
+ propsRes->DictionarySize = 0;
+ for (i = 0; i < 4; i++)
+ propsRes->DictionarySize += (UInt32)(propsData[1 + i]) << (i * 8);
+ if (propsRes->DictionarySize == 0)
+ propsRes->DictionarySize = 1;
+ }
+ #endif
+ return LZMA_RESULT_OK;
+}
+#endif
+
+#define kLzmaStreamWasFinishedId (-1)
+
+int LzmaDecode(CLzmaDecoderState *vs,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *InCallback,
+ #else
+ const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+ #endif
+ unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed)
+{
+ CProb *p = vs->Probs;
+ SizeT nowPos = 0;
+ Byte previousByte = 0;
+ UInt32 posStateMask = (1 << (vs->Properties.pb)) - 1;
+ UInt32 literalPosMask = (1 << (vs->Properties.lp)) - 1;
+ int lc = vs->Properties.lc;
+
+ #ifdef _LZMA_OUT_READ
+
+ UInt32 Range = vs->Range;
+ UInt32 Code = vs->Code;
+ #ifdef _LZMA_IN_CB
+ const Byte *Buffer = vs->Buffer;
+ const Byte *BufferLim = vs->BufferLim;
+ #else
+ const Byte *Buffer = inStream;
+ const Byte *BufferLim = inStream + inSize;
+ #endif
+ int state = vs->State;
+ UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
+ int len = vs->RemainLen;
+ UInt32 globalPos = vs->GlobalPos;
+ UInt32 distanceLimit = vs->DistanceLimit;
+
+ Byte *dictionary = vs->Dictionary;
+ UInt32 dictionarySize = vs->Properties.DictionarySize;
+ UInt32 dictionaryPos = vs->DictionaryPos;
+
+ Byte tempDictionary[4];
+
+ #ifndef _LZMA_IN_CB
+ *inSizeProcessed = 0;
+ #endif
+ *outSizeProcessed = 0;
+ if (len == kLzmaStreamWasFinishedId)
+ return LZMA_RESULT_OK;
+
+ if (dictionarySize == 0)
+ {
+ dictionary = tempDictionary;
+ dictionarySize = 1;
+ tempDictionary[0] = vs->TempDictionary[0];
+ }
+
+ if (len == kLzmaNeedInitId)
+ {
+ {
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+ UInt32 i;
+ for (i = 0; i < numProbs; i++)
+ p[i] = kBitModelTotal >> 1;
+ rep0 = rep1 = rep2 = rep3 = 1;
+ state = 0;
+ globalPos = 0;
+ distanceLimit = 0;
+ dictionaryPos = 0;
+ dictionary[dictionarySize - 1] = 0;
+ #ifdef _LZMA_IN_CB
+ RC_INIT;
+ #else
+ RC_INIT(inStream, inSize);
+ #endif
+ }
+ len = 0;
+ }
+ while(len != 0 && nowPos < outSize)
+ {
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ len--;
+ }
+ if (dictionaryPos == 0)
+ previousByte = dictionary[dictionarySize - 1];
+ else
+ previousByte = dictionary[dictionaryPos - 1];
+
+ #else /* if !_LZMA_OUT_READ */
+
+ int state = 0;
+ UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
+ int len = 0;
+ const Byte *Buffer;
+ const Byte *BufferLim;
+ UInt32 Range;
+ UInt32 Code;
+
+ #ifndef _LZMA_IN_CB
+ *inSizeProcessed = 0;
+ #endif
+ *outSizeProcessed = 0;
+
+ {
+ UInt32 i;
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+ for (i = 0; i < numProbs; i++)
+ p[i] = kBitModelTotal >> 1;
+ }
+
+ #ifdef _LZMA_IN_CB
+ RC_INIT;
+ #else
+ RC_INIT(inStream, inSize);
+ #endif
+
+ #endif /* _LZMA_OUT_READ */
+
+ while(nowPos < outSize)
+ {
+ CProb *prob;
+ UInt32 bound;
+ int posState = (int)(
+ (nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ & posStateMask);
+
+ prob = p + IsMatch + (state << kNumPosBitsMax) + posState;
+ IfBit0(prob)
+ {
+ int symbol = 1;
+ UpdateBit0(prob)
+ prob = p + Literal + (LZMA_LIT_SIZE *
+ (((
+ (nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ & literalPosMask) << lc) + (previousByte >> (8 - lc))));
+
+ if (state >= kNumLitStates)
+ {
+ int matchByte;
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ matchByte = dictionary[pos];
+ #else
+ matchByte = outStream[nowPos - rep0];
+ #endif
+ do
+ {
+ int bit;
+ CProb *probLit;
+ matchByte <<= 1;
+ bit = (matchByte & 0x100);
+ probLit = prob + 0x100 + bit + symbol;
+ RC_GET_BIT2(probLit, symbol, if (bit != 0) break, if (bit == 0) break)
+ }
+ while (symbol < 0x100);
+ }
+ while (symbol < 0x100)
+ {
+ CProb *probLit = prob + symbol;
+ RC_GET_BIT(probLit, symbol)
+ }
+ previousByte = (Byte)symbol;
+
+ outStream[nowPos++] = previousByte;
+ #ifdef _LZMA_OUT_READ
+ if (distanceLimit < dictionarySize)
+ distanceLimit++;
+
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #endif
+ if (state < 4) state = 0;
+ else if (state < 10) state -= 3;
+ else state -= 6;
+ }
+ else
+ {
+ UpdateBit1(prob);
+ prob = p + IsRep + state;
+ IfBit0(prob)
+ {
+ UpdateBit0(prob);
+ rep3 = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+ state = state < kNumLitStates ? 0 : 3;
+ prob = p + LenCoder;
+ }
+ else
+ {
+ UpdateBit1(prob);
+ prob = p + IsRepG0 + state;
+ IfBit0(prob)
+ {
+ UpdateBit0(prob);
+ prob = p + IsRep0Long + (state << kNumPosBitsMax) + posState;
+ IfBit0(prob)
+ {
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos;
+ #endif
+ UpdateBit0(prob);
+
+ #ifdef _LZMA_OUT_READ
+ if (distanceLimit == 0)
+ #else
+ if (nowPos == 0)
+ #endif
+ return LZMA_RESULT_DATA_ERROR;
+
+ state = state < kNumLitStates ? 9 : 11;
+ #ifdef _LZMA_OUT_READ
+ pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ previousByte = dictionary[pos];
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #else
+ previousByte = outStream[nowPos - rep0];
+ #endif
+ outStream[nowPos++] = previousByte;
+ #ifdef _LZMA_OUT_READ
+ if (distanceLimit < dictionarySize)
+ distanceLimit++;
+ #endif
+
+ continue;
+ }
+ else
+ {
+ UpdateBit1(prob);
+ }
+ }
+ else
+ {
+ UInt32 distance;
+ UpdateBit1(prob);
+ prob = p + IsRepG1 + state;
+ IfBit0(prob)
+ {
+ UpdateBit0(prob);
+ distance = rep1;
+ }
+ else
+ {
+ UpdateBit1(prob);
+ prob = p + IsRepG2 + state;
+ IfBit0(prob)
+ {
+ UpdateBit0(prob);
+ distance = rep2;
+ }
+ else
+ {
+ UpdateBit1(prob);
+ distance = rep3;
+ rep3 = rep2;
+ }
+ rep2 = rep1;
+ }
+ rep1 = rep0;
+ rep0 = distance;
+ }
+ state = state < kNumLitStates ? 8 : 11;
+ prob = p + RepLenCoder;
+ }
+ {
+ int numBits, offset;
+ CProb *probLen = prob + LenChoice;
+ IfBit0(probLen)
+ {
+ UpdateBit0(probLen);
+ probLen = prob + LenLow + (posState << kLenNumLowBits);
+ offset = 0;
+ numBits = kLenNumLowBits;
+ }
+ else
+ {
+ UpdateBit1(probLen);
+ probLen = prob + LenChoice2;
+ IfBit0(probLen)
+ {
+ UpdateBit0(probLen);
+ probLen = prob + LenMid + (posState << kLenNumMidBits);
+ offset = kLenNumLowSymbols;
+ numBits = kLenNumMidBits;
+ }
+ else
+ {
+ UpdateBit1(probLen);
+ probLen = prob + LenHigh;
+ offset = kLenNumLowSymbols + kLenNumMidSymbols;
+ numBits = kLenNumHighBits;
+ }
+ }
+ RangeDecoderBitTreeDecode(probLen, numBits, len);
+ len += offset;
+ }
+
+ if (state < 4)
+ {
+ int posSlot;
+ state += kNumLitStates;
+ prob = p + PosSlot +
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
+ kNumPosSlotBits);
+ RangeDecoderBitTreeDecode(prob, kNumPosSlotBits, posSlot);
+ if (posSlot >= kStartPosModelIndex)
+ {
+ int numDirectBits = ((posSlot >> 1) - 1);
+ rep0 = (2 | ((UInt32)posSlot & 1));
+ if (posSlot < kEndPosModelIndex)
+ {
+ rep0 <<= numDirectBits;
+ prob = p + SpecPos + rep0 - posSlot - 1;
+ }
+ else
+ {
+ numDirectBits -= kNumAlignBits;
+ do
+ {
+ RC_NORMALIZE
+ Range >>= 1;
+ rep0 <<= 1;
+ if (Code >= Range)
+ {
+ Code -= Range;
+ rep0 |= 1;
+ }
+ }
+ while (--numDirectBits != 0);
+ prob = p + Align;
+ rep0 <<= kNumAlignBits;
+ numDirectBits = kNumAlignBits;
+ }
+ {
+ int i = 1;
+ int mi = 1;
+ do
+ {
+ CProb *prob3 = prob + mi;
+ RC_GET_BIT2(prob3, mi, ; , rep0 |= i);
+ i <<= 1;
+ }
+ while(--numDirectBits != 0);
+ }
+ }
+ else
+ rep0 = posSlot;
+ if (++rep0 == (UInt32)(0))
+ {
+ /* it's for stream version */
+ len = kLzmaStreamWasFinishedId;
+ break;
+ }
+ }
+
+ len += kMatchMinLen;
+ #ifdef _LZMA_OUT_READ
+ if (rep0 > distanceLimit)
+ #else
+ if (rep0 > nowPos)
+ #endif
+ return LZMA_RESULT_DATA_ERROR;
+
+ #ifdef _LZMA_OUT_READ
+ if (dictionarySize - distanceLimit > (UInt32)len)
+ distanceLimit += len;
+ else
+ distanceLimit = dictionarySize;
+ #endif
+
+ do
+ {
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ previousByte = dictionary[pos];
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #else
+ previousByte = outStream[nowPos - rep0];
+ #endif
+ len--;
+ outStream[nowPos++] = previousByte;
+ }
+ while(len != 0 && nowPos < outSize);
+ }
+ }
+ RC_NORMALIZE;
+
+ #ifdef _LZMA_OUT_READ
+ vs->Range = Range;
+ vs->Code = Code;
+ vs->DictionaryPos = dictionaryPos;
+ vs->GlobalPos = globalPos + (UInt32)nowPos;
+ vs->DistanceLimit = distanceLimit;
+ vs->Reps[0] = rep0;
+ vs->Reps[1] = rep1;
+ vs->Reps[2] = rep2;
+ vs->Reps[3] = rep3;
+ vs->State = state;
+ vs->RemainLen = len;
+ vs->TempDictionary[0] = tempDictionary[0];
+ #endif
+
+ #ifdef _LZMA_IN_CB
+ vs->Buffer = Buffer;
+ vs->BufferLim = BufferLim;
+ #else
+ *inSizeProcessed = (SizeT)(Buffer - inStream);
+ #endif
+ *outSizeProcessed = nowPos;
+ return LZMA_RESULT_OK;
+}
diff --git a/target/linux/generic/image/lzma-loader/src/LzmaDecode.h b/target/linux/generic/image/lzma-loader/src/LzmaDecode.h
new file mode 100644
index 000000000..213062af1
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/LzmaDecode.h
@@ -0,0 +1,131 @@
+/*
+ LzmaDecode.h
+ LZMA Decoder interface
+
+ LZMA SDK 4.21 Copyright (c) 1999-2005 Igor Pavlov (2005-06-08)
+ http://www.7-zip.org/
+
+ LZMA SDK is licensed under two licenses:
+ 1) GNU Lesser General Public License (GNU LGPL)
+ 2) Common Public License (CPL)
+ It means that you can select one of these two licenses and
+ follow rules of that license.
+
+ SPECIAL EXCEPTION:
+ Igor Pavlov, as the author of this code, expressly permits you to
+ statically or dynamically link your code (or bind by name) to the
+ interfaces of this file without subjecting your linked code to the
+ terms of the CPL or GNU LGPL. Any modifications or additions
+ to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#ifndef __LZMADECODE_H
+#define __LZMADECODE_H
+
+/* #define _LZMA_IN_CB */
+/* Use callback for input data */
+
+/* #define _LZMA_OUT_READ */
+/* Use read function for output data */
+
+/* #define _LZMA_PROB32 */
+/* It can increase speed on some 32-bit CPUs,
+ but memory usage will be doubled in that case */
+
+/* #define _LZMA_LOC_OPT */
+/* Enable local speed optimizations inside code */
+
+/* #define _LZMA_SYSTEM_SIZE_T */
+/* Use system's size_t. You can use it to enable 64-bit sizes supporting*/
+
+#ifndef UInt32
+#ifdef _LZMA_UINT32_IS_ULONG
+#define UInt32 unsigned long
+#else
+#define UInt32 unsigned int
+#endif
+#endif
+
+#ifndef SizeT
+#ifdef _LZMA_SYSTEM_SIZE_T
+#include <stddef.h>
+#define SizeT size_t
+#else
+#define SizeT UInt32
+#endif
+#endif
+
+#ifdef _LZMA_PROB32
+#define CProb UInt32
+#else
+#define CProb unsigned short
+#endif
+
+#define LZMA_RESULT_OK 0
+#define LZMA_RESULT_DATA_ERROR 1
+
+#ifdef _LZMA_IN_CB
+typedef struct _ILzmaInCallback
+{
+ int (*Read)(void *object, const unsigned char **buffer, SizeT *bufferSize);
+} ILzmaInCallback;
+#endif
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LZMA_PROPERTIES_SIZE 5
+
+typedef struct _CLzmaProperties
+{
+ int lc;
+ int lp;
+ int pb;
+ #ifdef _LZMA_OUT_READ
+ UInt32 DictionarySize;
+ #endif
+}CLzmaProperties;
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size);
+
+#define LzmaGetNumProbs(Properties) (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((Properties)->lc + (Properties)->lp)))
+
+#define kLzmaNeedInitId (-2)
+
+typedef struct _CLzmaDecoderState
+{
+ CLzmaProperties Properties;
+ CProb *Probs;
+
+ #ifdef _LZMA_IN_CB
+ const unsigned char *Buffer;
+ const unsigned char *BufferLim;
+ #endif
+
+ #ifdef _LZMA_OUT_READ
+ unsigned char *Dictionary;
+ UInt32 Range;
+ UInt32 Code;
+ UInt32 DictionaryPos;
+ UInt32 GlobalPos;
+ UInt32 DistanceLimit;
+ UInt32 Reps[4];
+ int State;
+ int RemainLen;
+ unsigned char TempDictionary[4];
+ #endif
+} CLzmaDecoderState;
+
+#ifdef _LZMA_OUT_READ
+#define LzmaDecoderInit(vs) { (vs)->RemainLen = kLzmaNeedInitId; }
+#endif
+
+int LzmaDecode(CLzmaDecoderState *vs,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback,
+ #else
+ const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+ #endif
+ unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed);
+
+#endif
diff --git a/target/linux/generic/image/lzma-loader/src/Makefile b/target/linux/generic/image/lzma-loader/src/Makefile
new file mode 100644
index 000000000..910172c4f
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/Makefile
@@ -0,0 +1,68 @@
+#
+# Copyright (C) 2006 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+RAMSTART = 0x80000000
+RAMSIZE = 0x00100000 # 1MB
+LOADADDR = 0x80400000 # RAM start + 4M
+KERNEL_ENTRY = 0x80001000
+IMAGE_COPY:=0
+
+CROSS_COMPILE = mips-linux-
+
+OBJCOPY:= $(CROSS_COMPILE)objcopy -O binary -R .reginfo -R .note -R .comment -R .mdebug -S
+CFLAGS := -fno-builtin -Os -G 0 -ffunction-sections -mno-abicalls -fno-pic -mabi=32 -march=mips32 -Wa,-32 -Wa,-march=mips32 -Wa,-mips32 -Wa,--trap -Wall -DRAMSTART=${RAMSTART} -DRAMSIZE=${RAMSIZE} -DKERNEL_ENTRY=${KERNEL_ENTRY} -D_LZMA_IN_CB
+ifeq ($(IMAGE_COPY),1)
+CFLAGS += -DLOADADDR=${LOADADDR} -DIMAGE_COPY=1
+endif
+
+.S.s:
+ $(CPP) $(CFLAGS) $< -o $*.s
+.S.o:
+ $(CC) $(CFLAGS) -c $< -o $*.o
+.c.o:
+ $(CC) $(CFLAGS) -c $< -o $*.o
+
+CC = $(CROSS_COMPILE)gcc
+LD = $(CROSS_COMPILE)ld
+OBJDUMP = $(CROSS_COMPILE)objdump
+
+O_FORMAT = $(shell $(OBJDUMP) -i | head -2 | grep elf32)
+
+# Drop some uninteresting sections in the kernel.
+# This is only relevant for ELF kernels but doesn't hurt a.out
+drop-sections = .reginfo .mdebug .comment
+strip-flags = $(addprefix --remove-section=,$(drop-sections))
+
+all : lzma.elf lzma.bin
+
+lzma.lds: lzma.lds.in
+ sed -e 's,@LOADADDR@,$(LOADADDR),g' -e 's,@ENTRY@,_start,g' $< >$@
+
+kernel.o: vmlinux.lzma lzma.lds
+ $(LD) -r -b binary --oformat $(O_FORMAT) -o $@ $<
+
+lzma.bin: lzma.elf
+ $(OBJCOPY) $< $@
+
+ifeq ($(IMAGE_COPY),1)
+LOADER_ENTRY ?= $(KERNEL_ENTRY)
+lzma.o: decompress.o LzmaDecode.o kernel.o
+ sed -e 's,@LOADADDR@,$(LOADADDR),g' -e 's,@ENTRY@,entry,g' lzma.lds.in >lzma-stage2.lds
+ $(LD) -static --no-warn-mismatch -e entry -Tlzma-stage2.lds -o temp-$@ $^
+ $(OBJCOPY) temp-$@ lzma.tmp
+ @echo "SECTIONS { .data : { code_start = .; *(.data) code_stop = .; }}" > lzma-data.lds
+ $(LD) -no-warn-mismatch -T lzma-data.lds -r -o $@ -b binary lzma.tmp --oformat $(O_FORMAT)
+
+lzma.elf: start.o lzma.o
+ sed -e 's,@LOADADDR@,$(LOADER_ENTRY),g' lzma-copy.lds.in >lzma-copy.lds
+ $(LD) -s -Tlzma-copy.lds -o $@ $^
+else
+lzma.elf: start.o decompress.o LzmaDecode.o kernel.o
+ $(LD) -s -Tlzma.lds -o $@ $^
+endif
+
+clean:
+ rm -f *.o lzma.elf lzma.bin *.tmp *.lds
diff --git a/target/linux/generic/image/lzma-loader/src/decompress.c b/target/linux/generic/image/lzma-loader/src/decompress.c
new file mode 100644
index 000000000..45ac509cd
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/decompress.c
@@ -0,0 +1,157 @@
+/*
+ * LZMA compressed kernel decompressor for bcm947xx boards
+ *
+ * Copyright (C) 2005 by Oleg I. Vdovikin <oleg@cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Please note, this was code based on the bunzip2 decompressor code
+ * by Manuel Novoa III (mjn3@codepoet.org), although the only thing left
+ * is an idea and part of original vendor code
+ *
+ *
+ * 12-Mar-2005 Mineharu Takahara <mtakahar@yahoo.com>
+ * pass actual output size to decoder (stream mode
+ * compressed input is not a requirement anymore)
+ *
+ * 24-Apr-2005 Oleg I. Vdovikin
+ * reordered functions using lds script, removed forward decl
+ *
+ * ??-Nov-2005 Mike Baker
+ * reorder the script as an lzma wrapper; do not depend on flash access
+ */
+
+#include "LzmaDecode.h"
+
+#define KSEG0 0x80000000
+#define KSEG1 0xa0000000
+
+#define KSEG1ADDR(a) ((((unsigned)(a)) & 0x1fffffffU) | KSEG1)
+
+#define Index_Invalidate_I 0x00
+#define Index_Writeback_Inv_D 0x01
+
+#define cache_unroll(base,op) \
+ __asm__ __volatile__( \
+ ".set noreorder;\n" \
+ ".set mips3;\n" \
+ "cache %1, (%0);\n" \
+ ".set mips0;\n" \
+ ".set reorder\n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+
+static __inline__ void blast_icache(unsigned long size, unsigned long lsize)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + size);
+
+ while(start < end) {
+ cache_unroll(start,Index_Invalidate_I);
+ start += lsize;
+ }
+}
+
+static __inline__ void blast_dcache(unsigned long size, unsigned long lsize)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + size);
+
+ while(start < end) {
+ cache_unroll(start,Index_Writeback_Inv_D);
+ start += lsize;
+ }
+}
+
+unsigned char *data;
+
+static int read_byte(void *object, unsigned char **buffer, UInt32 *bufferSize)
+{
+ *bufferSize = 1;
+ *buffer = data;
+ ++data;
+ return LZMA_RESULT_OK;
+}
+
+static __inline__ unsigned char get_byte(void)
+{
+ unsigned char *buffer;
+ UInt32 fake;
+
+ return read_byte(0, &buffer, &fake), *buffer;
+}
+
+/* This puts lzma workspace 128k below RAM end.
+ * That should be enough for both lzma and stack
+ */
+static char *buffer = (char *)(RAMSTART + RAMSIZE - 0x00020000);
+extern char lzma_start[];
+extern char lzma_end[];
+
+/* should be the first function */
+void entry(unsigned long icache_size, unsigned long icache_lsize,
+ unsigned long dcache_size, unsigned long dcache_lsize)
+{
+ unsigned int i; /* temp value */
+ unsigned int osize; /* uncompressed size */
+ volatile unsigned int arg0, arg1, arg2, arg3;
+
+ /* restore argument registers */
+ __asm__ __volatile__ ("ori %0, $12, 0":"=r"(arg0));
+ __asm__ __volatile__ ("ori %0, $13, 0":"=r"(arg1));
+ __asm__ __volatile__ ("ori %0, $14, 0":"=r"(arg2));
+ __asm__ __volatile__ ("ori %0, $15, 0":"=r"(arg3));
+
+ ILzmaInCallback callback;
+ CLzmaDecoderState vs;
+ callback.Read = read_byte;
+
+ data = lzma_start;
+
+ /* lzma args */
+ i = get_byte();
+ vs.Properties.lc = i % 9, i = i / 9;
+ vs.Properties.lp = i % 5, vs.Properties.pb = i / 5;
+
+ vs.Probs = (CProb *)buffer;
+
+ /* skip rest of the LZMA coder property */
+ for (i = 0; i < 4; i++)
+ get_byte();
+
+ /* read the lower half of uncompressed size in the header */
+ osize = ((unsigned int)get_byte()) +
+ ((unsigned int)get_byte() << 8) +
+ ((unsigned int)get_byte() << 16) +
+ ((unsigned int)get_byte() << 24);
+
+ /* skip rest of the header (upper half of uncompressed size) */
+ for (i = 0; i < 4; i++)
+ get_byte();
+
+ /* decompress kernel */
+ if ((i = LzmaDecode(&vs, &callback,
+ (unsigned char*)KERNEL_ENTRY, osize, &osize)) == LZMA_RESULT_OK)
+ {
+ blast_dcache(dcache_size, dcache_lsize);
+ blast_icache(icache_size, icache_lsize);
+
+ /* Jump to load address */
+ ((void (*)(int a0, int a1, int a2, int a3)) KERNEL_ENTRY)(arg0, arg1, arg2, arg3);
+ }
+}
diff --git a/target/linux/generic/image/lzma-loader/src/lzma-copy.lds.in b/target/linux/generic/image/lzma-loader/src/lzma-copy.lds.in
new file mode 100644
index 000000000..fbc87ab8e
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/lzma-copy.lds.in
@@ -0,0 +1,20 @@
+OUTPUT_ARCH(mips)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = @LOADADDR@;
+ .text :
+ {
+ _ftext = . ;
+ *(.text)
+ *(.rodata)
+ } =0
+
+ .reginfo : { *(.reginfo) }
+
+ .bss :
+ {
+ *(.bss)
+ }
+}
diff --git a/target/linux/generic/image/lzma-loader/src/lzma.lds.in b/target/linux/generic/image/lzma-loader/src/lzma.lds.in
new file mode 100644
index 000000000..6021cec01
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/lzma.lds.in
@@ -0,0 +1,24 @@
+OUTPUT_ARCH(mips)
+ENTRY(@ENTRY@)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = @LOADADDR@;
+ .text :
+ {
+ _ftext = . ;
+ *(.text.entry)
+ *(.text)
+ *(.rodata)
+ lzma_start = .;
+ kernel.o
+ lzma_end = .;
+ } =0
+
+ .reginfo : { *(.reginfo) }
+
+ .bss :
+ {
+ *(.bss)
+ }
+}
diff --git a/target/linux/generic/image/lzma-loader/src/print.c b/target/linux/generic/image/lzma-loader/src/print.c
new file mode 100644
index 000000000..950687bef
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/print.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "print.h"
+
+/* macros */
+#define IsDigit(x) ( ((x) >= '0') && ((x) <= '9') )
+#define Ctod(x) ( (x) - '0')
+
+/* forward declaration */
+extern int PrintChar(char *, char, int, int);
+extern int PrintString(char *, char *, int, int);
+extern int PrintNum(char *, unsigned long, int, int, int, int, char, int);
+
+/* private variable */
+static const char theFatalMsg[] = "fatal error in lp_Print!";
+
+/* -*-
+ * A low level printf() function.
+ */
+void
+lp_Print(void (*output)(void *, char *, int),
+ void * arg,
+ char *fmt,
+ va_list ap)
+{
+
+#define OUTPUT(arg, s, l) \
+ { if (((l) < 0) || ((l) > LP_MAX_BUF)) { \
+ (*output)(arg, (char*)theFatalMsg, sizeof(theFatalMsg)-1); for(;;); \
+ } else { \
+ (*output)(arg, s, l); \
+ } \
+ }
+
+ char buf[LP_MAX_BUF];
+
+ char c;
+ char *s;
+ long int num;
+
+ int longFlag;
+ int negFlag;
+ int width;
+ int prec;
+ int ladjust;
+ char padc;
+
+ int length;
+
+ for(;;) {
+ {
+ /* scan for the next '%' */
+ char *fmtStart = fmt;
+ while ( (*fmt != '\0') && (*fmt != '%')) {
+ fmt ++;
+ }
+
+ /* flush the string found so far */
+ OUTPUT(arg, fmtStart, fmt-fmtStart);
+
+ /* are we hitting the end? */
+ if (*fmt == '\0') break;
+ }
+
+ /* we found a '%' */
+ fmt ++;
+
+ /* check for long */
+ if (*fmt == 'l') {
+ longFlag = 1;
+ fmt ++;
+ } else {
+ longFlag = 0;
+ }
+
+ /* check for other prefixes */
+ width = 0;
+ prec = -1;
+ ladjust = 0;
+ padc = ' ';
+
+ if (*fmt == '-') {
+ ladjust = 1;
+ fmt ++;
+ }
+
+ if (*fmt == '0') {
+ padc = '0';
+ fmt++;
+ }
+
+ if (IsDigit(*fmt)) {
+ while (IsDigit(*fmt)) {
+ width = 10 * width + Ctod(*fmt++);
+ }
+ }
+
+ if (*fmt == '.') {
+ fmt ++;
+ if (IsDigit(*fmt)) {
+ prec = 0;
+ while (IsDigit(*fmt)) {
+ prec = prec*10 + Ctod(*fmt++);
+ }
+ }
+ }
+
+
+ /* check format flag */
+ negFlag = 0;
+ switch (*fmt) {
+ case 'b':
+ if (longFlag) {
+ num = va_arg(ap, long int);
+ } else {
+ num = va_arg(ap, int);
+ }
+ length = PrintNum(buf, num, 2, 0, width, ladjust, padc, 0);
+ OUTPUT(arg, buf, length);
+ break;
+
+ case 'd':
+ case 'D':
+ if (longFlag) {
+ num = va_arg(ap, long int);
+ } else {
+ num = va_arg(ap, int);
+ }
+ if (num < 0) {
+ num = - num;
+ negFlag = 1;
+ }
+ length = PrintNum(buf, num, 10, negFlag, width, ladjust, padc, 0);
+ OUTPUT(arg, buf, length);
+ break;
+
+ case 'o':
+ case 'O':
+ if (longFlag) {
+ num = va_arg(ap, long int);
+ } else {
+ num = va_arg(ap, int);
+ }
+ length = PrintNum(buf, num, 8, 0, width, ladjust, padc, 0);
+ OUTPUT(arg, buf, length);
+ break;
+
+ case 'u':
+ case 'U':
+ if (longFlag) {
+ num = va_arg(ap, long int);
+ } else {
+ num = va_arg(ap, int);
+ }
+ length = PrintNum(buf, num, 10, 0, width, ladjust, padc, 0);
+ OUTPUT(arg, buf, length);
+ break;
+
+ case 'x':
+ if (longFlag) {
+ num = va_arg(ap, long int);
+ } else {
+ num = va_arg(ap, int);
+ }
+ length = PrintNum(buf, num, 16, 0, width, ladjust, padc, 0);
+ OUTPUT(arg, buf, length);
+ break;
+
+ case 'X':
+ if (longFlag) {
+ num = va_arg(ap, long int);
+ } else {
+ num = va_arg(ap, int);
+ }
+ length = PrintNum(buf, num, 16, 0, width, ladjust, padc, 1);
+ OUTPUT(arg, buf, length);
+ break;
+
+ case 'c':
+ c = (char)va_arg(ap, int);
+ length = PrintChar(buf, c, width, ladjust);
+ OUTPUT(arg, buf, length);
+ break;
+
+ case 's':
+ s = (char*)va_arg(ap, char *);
+ length = PrintString(buf, s, width, ladjust);
+ OUTPUT(arg, buf, length);
+ break;
+
+ case '\0':
+ fmt --;
+ break;
+
+ default:
+ /* output this char as it is */
+ OUTPUT(arg, fmt, 1);
+ } /* switch (*fmt) */
+
+ fmt ++;
+ } /* for(;;) */
+
+ /* special termination call */
+ OUTPUT(arg, "\0", 1);
+}
+
+
+/* --------------- local help functions --------------------- */
+int
+PrintChar(char * buf, char c, int length, int ladjust)
+{
+ int i;
+
+ if (length < 1) length = 1;
+ if (ladjust) {
+ *buf = c;
+ for (i=1; i< length; i++) buf[i] = ' ';
+ } else {
+ for (i=0; i< length-1; i++) buf[i] = ' ';
+ buf[length - 1] = c;
+ }
+ return length;
+}
+
+int
+PrintString(char * buf, char* s, int length, int ladjust)
+{
+ int i;
+ int len=0;
+ char* s1 = s;
+ while (*s1++) len++;
+ if (length < len) length = len;
+
+ if (ladjust) {
+ for (i=0; i< len; i++) buf[i] = s[i];
+ for (i=len; i< length; i++) buf[i] = ' ';
+ } else {
+ for (i=0; i< length-len; i++) buf[i] = ' ';
+ for (i=length-len; i < length; i++) buf[i] = s[i-length+len];
+ }
+ return length;
+}
+
+int
+PrintNum(char * buf, unsigned long u, int base, int negFlag,
+ int length, int ladjust, char padc, int upcase)
+{
+ /* algorithm :
+ * 1. prints the number from left to right in reverse form.
+ * 2. fill the remaining spaces with padc if length is longer than
+ * the actual length
+ * TRICKY : if left adjusted, no "0" padding.
+ * if negtive, insert "0" padding between "0" and number.
+ * 3. if (!ladjust) we reverse the whole string including paddings
+ * 4. otherwise we only reverse the actual string representing the num.
+ */
+
+ int actualLength =0;
+ char *p = buf;
+ int i;
+
+ do {
+ int tmp = u %base;
+ if (tmp <= 9) {
+ *p++ = '0' + tmp;
+ } else if (upcase) {
+ *p++ = 'A' + tmp - 10;
+ } else {
+ *p++ = 'a' + tmp - 10;
+ }
+ u /= base;
+ } while (u != 0);
+
+ if (negFlag) {
+ *p++ = '-';
+ }
+
+ /* figure out actual length and adjust the maximum length */
+ actualLength = p - buf;
+ if (length < actualLength) length = actualLength;
+
+ /* add padding */
+ if (ladjust) {
+ padc = ' ';
+ }
+ if (negFlag && !ladjust && (padc == '0')) {
+ for (i = actualLength-1; i< length-1; i++) buf[i] = padc;
+ buf[length -1] = '-';
+ } else {
+ for (i = actualLength; i< length; i++) buf[i] = padc;
+ }
+
+
+ /* prepare to reverse the string */
+ {
+ int begin = 0;
+ int end;
+ if (ladjust) {
+ end = actualLength - 1;
+ } else {
+ end = length -1;
+ }
+
+ while (end > begin) {
+ char tmp = buf[begin];
+ buf[begin] = buf[end];
+ buf[end] = tmp;
+ begin ++;
+ end --;
+ }
+ }
+
+ /* adjust the string pointer */
+ return length;
+}
diff --git a/target/linux/generic/image/lzma-loader/src/print.h b/target/linux/generic/image/lzma-loader/src/print.h
new file mode 100644
index 000000000..b05146390
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/print.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _print_h_
+#define _print_h_
+
+#include <stdarg.h>
+
+/* this is the maximum width for a variable */
+#define LP_MAX_BUF 80
+
+/* -*-
+ * output function takes an void pointer which is passed in as the
+ * second argument in lp_Print(). This black-box argument gives output
+ * function a way to track state.
+ *
+ * The second argument in output function is a pointer to char buffer.
+ * The third argument specifies the number of chars to outputed.
+ *
+ * output function cannot assume the buffer is null-terminated after
+ * l number of chars.
+ */
+void lp_Print(void (*output)(void *, char *, int),
+ void * arg,
+ char *fmt,
+ va_list ap);
+
+#endif
diff --git a/target/linux/generic/image/lzma-loader/src/printf.c b/target/linux/generic/image/lzma-loader/src/printf.c
new file mode 100644
index 000000000..49bd50d7c
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/printf.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "printf.h"
+#include "print.h"
+#include "uart16550.h"
+
+static void myoutput(void *arg, char *s, int l)
+{
+ int i;
+
+ // special termination call
+ if ((l==1) && (s[0] == '\0')) return;
+
+ for (i=0; i< l; i++) {
+ Uart16550Put(s[i]);
+ if (s[i] == '\n') Uart16550Put('\r');
+ }
+}
+
+void printf(char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ lp_Print(myoutput, 0, fmt, ap);
+ va_end(ap);
+}
diff --git a/target/linux/generic/image/lzma-loader/src/printf.h b/target/linux/generic/image/lzma-loader/src/printf.h
new file mode 100644
index 000000000..9b1c1df23
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/printf.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _printf_h_
+#define _printf_h_
+
+#include <stdarg.h>
+void printf(char *fmt, ...);
+
+#endif /* _printf_h_ */
diff --git a/target/linux/generic/image/lzma-loader/src/start.S b/target/linux/generic/image/lzma-loader/src/start.S
new file mode 100644
index 000000000..864293379
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/start.S
@@ -0,0 +1,160 @@
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#define KSEG0 0x80000000
+
+#define C0_CONFIG $16
+#define C0_TAGLO $28
+#define C0_TAGHI $29
+
+#define CONF1_DA_SHIFT 7 /* D$ associativity */
+#define CONF1_DA_MASK 0x00000380
+#define CONF1_DA_BASE 1
+#define CONF1_DL_SHIFT 10 /* D$ line size */
+#define CONF1_DL_MASK 0x00001c00
+#define CONF1_DL_BASE 2
+#define CONF1_DS_SHIFT 13 /* D$ sets/way */
+#define CONF1_DS_MASK 0x0000e000
+#define CONF1_DS_BASE 64
+#define CONF1_IA_SHIFT 16 /* I$ associativity */
+#define CONF1_IA_MASK 0x00070000
+#define CONF1_IA_BASE 1
+#define CONF1_IL_SHIFT 19 /* I$ line size */
+#define CONF1_IL_MASK 0x00380000
+#define CONF1_IL_BASE 2
+#define CONF1_IS_SHIFT 22 /* Instruction cache sets/way */
+#define CONF1_IS_MASK 0x01c00000
+#define CONF1_IS_BASE 64
+
+#define Index_Invalidate_I 0x00
+#define Index_Writeback_Inv_D 0x01
+
+LEAF(_start)
+
+ .set mips32
+ .set noreorder
+
+ /* save argument registers */
+ move t4, a0
+ move t5, a1
+ move t6, a2
+ move t7, a3
+
+ /* set up stack */
+ li sp, RAMSTART + RAMSIZE - 16
+
+#ifdef IMAGE_COPY
+ /* Copy decompressor code to the right place */
+ li t2, LOADADDR
+ add a0, t2, 0
+ la a1, code_start
+ la a2, code_stop
+$L1:
+ lw t0, 0(a1)
+ sw t0, 0(a0)
+ add a1, 4
+ add a0, 4
+ blt a1, a2, $L1
+ nop
+#endif
+
+ /* At this point we need to invalidate dcache and */
+ /* icache before jumping to new code */
+
+1: /* Get cache sizes */
+ mfc0 s0,C0_CONFIG,1
+
+ li s1,CONF1_DL_MASK
+ and s1,s0
+ beq s1,zero,nodc
+ nop
+
+ srl s1,CONF1_DL_SHIFT
+ li t0,CONF1_DL_BASE
+ sll s1,t0,s1 /* s1 has D$ cache line size */
+
+ li s2,CONF1_DA_MASK
+ and s2,s0
+ srl s2,CONF1_DA_SHIFT
+ addiu s2,CONF1_DA_BASE /* s2 now has D$ associativity */
+
+ li t0,CONF1_DS_MASK
+ and t0,s0
+ srl t0,CONF1_DS_SHIFT
+ li s3,CONF1_DS_BASE
+ sll s3,s3,t0 /* s3 has D$ sets per way */
+
+ multu s2,s3 /* sets/way * associativity */
+ mflo t0 /* total cache lines */
+
+ multu s1,t0 /* D$ linesize * lines */
+ mflo s2 /* s2 is now D$ size in bytes */
+
+ /* Initilize the D$: */
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ li t0,KSEG0 /* Just an address for the first $ line */
+ addu t1,t0,s2 /* + size of cache == end */
+
+1: cache Index_Writeback_Inv_D,0(t0)
+ bne t0,t1,1b
+ addu t0,s1
+
+nodc:
+ /* Now we get to do it all again for the I$ */
+
+ move s3,zero /* just in case there is no icache */
+ move s4,zero
+
+ li t0,CONF1_IL_MASK
+ and t0,s0
+ beq t0,zero,noic
+ nop
+
+ srl t0,CONF1_IL_SHIFT
+ li s3,CONF1_IL_BASE
+ sll s3,t0 /* s3 has I$ cache line size */
+
+ li t0,CONF1_IA_MASK
+ and t0,s0
+ srl t0,CONF1_IA_SHIFT
+ addiu s4,t0,CONF1_IA_BASE /* s4 now has I$ associativity */
+
+ li t0,CONF1_IS_MASK
+ and t0,s0
+ srl t0,CONF1_IS_SHIFT
+ li s5,CONF1_IS_BASE
+ sll s5,t0 /* s5 has I$ sets per way */
+
+ multu s4,s5 /* sets/way * associativity */
+ mflo t0 /* s4 is now total cache lines */
+
+ multu s3,t0 /* I$ linesize * lines */
+ mflo s4 /* s4 is cache size in bytes */
+
+ /* Initilize the I$: */
+ mtc0 zero,C0_TAGLO
+ mtc0 zero,C0_TAGHI
+
+ li t0,KSEG0 /* Just an address for the first $ line */
+ addu t1,t0,s4 /* + size of cache == end */
+
+1: cache Index_Invalidate_I,0(t0)
+ bne t0,t1,1b
+ addu t0,s3
+noic:
+ /* jump to main */
+ move a0,s3 /* icache line size */
+ move a1,s4 /* icache size */
+ move a2,s1 /* dcache line size */
+#ifdef IMAGE_COPY
+ jal t2
+#else
+ jal entry
+#endif
+ move a3,s2 /* dcache size */
+
+ .set reorder
+END(_start)
+
diff --git a/target/linux/generic/image/lzma-loader/src/uart16550.c b/target/linux/generic/image/lzma-loader/src/uart16550.c
new file mode 100644
index 000000000..7df572760
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/uart16550.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+
+#include "uart16550.h"
+
+/* === CONFIG === */
+
+#define BASE 0xb8058000
+#define MAX_BAUD 1152000
+#define REG_OFFSET 4
+
+/* === END OF CONFIG === */
+
+/* register offset */
+#define OFS_RCV_BUFFER (0*REG_OFFSET)
+#define OFS_TRANS_HOLD (0*REG_OFFSET)
+#define OFS_SEND_BUFFER (0*REG_OFFSET)
+#define OFS_INTR_ENABLE (1*REG_OFFSET)
+#define OFS_INTR_ID (2*REG_OFFSET)
+#define OFS_DATA_FORMAT (3*REG_OFFSET)
+#define OFS_LINE_CONTROL (3*REG_OFFSET)
+#define OFS_MODEM_CONTROL (4*REG_OFFSET)
+#define OFS_RS232_OUTPUT (4*REG_OFFSET)
+#define OFS_LINE_STATUS (5*REG_OFFSET)
+#define OFS_MODEM_STATUS (6*REG_OFFSET)
+#define OFS_RS232_INPUT (6*REG_OFFSET)
+#define OFS_SCRATCH_PAD (7*REG_OFFSET)
+
+#define OFS_DIVISOR_LSB (0*REG_OFFSET)
+#define OFS_DIVISOR_MSB (1*REG_OFFSET)
+
+
+/* memory-mapped read/write of the port */
+#define UART16550_READ(y) (*((volatile uint32*)(BASE + y)))
+#define UART16550_WRITE(y, z) ((*((volatile uint32*)(BASE + y))) = z)
+
+#define DEBUG_LED (*(unsigned short*)0xb7ffffc0)
+#define OutputLED(x) (DEBUG_LED = x)
+
+void Uart16550Init(uint32 baud, uint8 data, uint8 parity, uint8 stop)
+{
+ /* disable interrupts */
+ UART16550_WRITE(OFS_INTR_ENABLE, 0);
+
+ /* set up buad rate */
+ {
+ uint32 divisor;
+
+ /* set DIAB bit */
+ UART16550_WRITE(OFS_LINE_CONTROL, 0x80);
+
+ /* set divisor */
+ divisor = MAX_BAUD / baud;
+ UART16550_WRITE(OFS_DIVISOR_LSB, divisor & 0xff);
+ UART16550_WRITE(OFS_DIVISOR_MSB, (divisor & 0xff00)>>8);
+
+ /* clear DIAB bit */
+ UART16550_WRITE(OFS_LINE_CONTROL, 0x0);
+ }
+
+ /* set data format */
+ UART16550_WRITE(OFS_DATA_FORMAT, data | parity | stop);
+}
+
+uint8 Uart16550GetPoll()
+{
+ while((UART16550_READ(OFS_LINE_STATUS) & 0x1) == 0);
+ return UART16550_READ(OFS_RCV_BUFFER);
+}
+
+
+void Uart16550Put(uint8 byte)
+{
+ while ((UART16550_READ(OFS_LINE_STATUS) &0x20) == 0);
+ UART16550_WRITE(OFS_SEND_BUFFER, byte);
+}
+
diff --git a/target/linux/generic/image/lzma-loader/src/uart16550.h b/target/linux/generic/image/lzma-loader/src/uart16550.h
new file mode 100644
index 000000000..b3fd6fdd7
--- /dev/null
+++ b/target/linux/generic/image/lzma-loader/src/uart16550.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _uart16550_h_
+#define _uart16550_h_
+
+typedef unsigned char uint8;
+typedef unsigned int uint32;
+
+#define UART16550_BAUD_2400 2400
+#define UART16550_BAUD_4800 4800
+#define UART16550_BAUD_9600 9600
+#define UART16550_BAUD_19200 19200
+#define UART16550_BAUD_38400 38400
+#define UART16550_BAUD_57600 57600
+#define UART16550_BAUD_115200 115200
+
+#define UART16550_PARITY_NONE 0
+#define UART16550_PARITY_ODD 0x08
+#define UART16550_PARITY_EVEN 0x18
+#define UART16550_PARITY_MARK 0x28
+#define UART16550_PARITY_SPACE 0x38
+
+#define UART16550_DATA_5BIT 0x0
+#define UART16550_DATA_6BIT 0x1
+#define UART16550_DATA_7BIT 0x2
+#define UART16550_DATA_8BIT 0x3
+
+#define UART16550_STOP_1BIT 0x0
+#define UART16550_STOP_2BIT 0x4
+
+void Uart16550Init(uint32 baud, uint8 data, uint8 parity, uint8 stop);
+
+/* blocking call */
+uint8 Uart16550GetPoll();
+
+void Uart16550Put(uint8 byte);
+
+#endif
diff --git a/target/linux/generic/patches-3.3/006-arm_kernel_xz_support.patch b/target/linux/generic/patches-3.3/006-arm_kernel_xz_support.patch
new file mode 100644
index 000000000..bfb175759
--- /dev/null
+++ b/target/linux/generic/patches-3.3/006-arm_kernel_xz_support.patch
@@ -0,0 +1,96 @@
+From 2d303b4683145f7dbc918bd14d04e1396581b2ce Mon Sep 17 00:00:00 2001
+From: Imre Kaloz <kaloz@openwrt.org>
+Date: Thu, 7 Jul 2011 12:05:21 +0200
+Subject: [PATCH] ARM: support XZ compressed kernels
+
+Wire up support for the XZ decompressor
+
+Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/boot/compressed/Makefile | 11 +++++++++--
+ arch/arm/boot/compressed/decompress.c | 4 ++++
+ arch/arm/boot/compressed/piggy.xzkern.S | 6 ++++++
+ lib/xz/xz_dec_stream.c | 1 +
+ 5 files changed, 21 insertions(+), 2 deletions(-)
+ create mode 100644 arch/arm/boot/compressed/piggy.xzkern.S
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -21,6 +21,7 @@ config ARM
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_LZMA
++ select HAVE_KERNEL_XZ
+ select HAVE_IRQ_WORK
+ select HAVE_PERF_EVENTS
+ select PERF_USE_VMALLOC
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -92,6 +92,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/
+ suffix_$(CONFIG_KERNEL_GZIP) = gzip
+ suffix_$(CONFIG_KERNEL_LZO) = lzo
+ suffix_$(CONFIG_KERNEL_LZMA) = lzma
++suffix_$(CONFIG_KERNEL_XZ) = xzkern
+
+ # Borrowed libfdt files for the ATAG compatibility mode
+
+@@ -115,7 +116,7 @@ targets := vmlinux vmlinux.lds \
+ lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS)
+
+ # Make sure files are removed during clean
+-extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs)
++extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
+
+ ifeq ($(CONFIG_FUNCTION_TRACER),y)
+ ORIG_CFLAGS := $(KBUILD_CFLAGS)
+@@ -171,8 +172,14 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CO
+ false; \
+ fi
+
++# For __aeabi_llsl
++ashldi3 = $(obj)/ashldi3.o
++
++$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S FORCE
++ $(call cmd,shipped)
++
+ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
+- $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE
++ $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE
+ @$(check_for_multiple_zreladdr)
+ $(call if_changed,ld)
+ @$(check_for_bad_syms)
+--- a/arch/arm/boot/compressed/decompress.c
++++ b/arch/arm/boot/compressed/decompress.c
+@@ -44,6 +44,12 @@ extern void error(char *);
+ #include "../../../../lib/decompress_unlzma.c"
+ #endif
+
++#ifdef CONFIG_KERNEL_XZ
++#define memmove memmove
++#define memcpy memcpy
++#include "../../../../lib/decompress_unxz.c"
++#endif
++
+ int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
+ {
+ return decompress(input, len, NULL, NULL, output, NULL, error);
+--- /dev/null
++++ b/arch/arm/boot/compressed/piggy.xzkern.S
+@@ -0,0 +1,6 @@
++ .section .piggydata,#alloc
++ .globl input_data
++input_data:
++ .incbin "arch/arm/boot/compressed/piggy.xzkern"
++ .globl input_data_end
++input_data_end:
+--- a/lib/xz/xz_dec_stream.c
++++ b/lib/xz/xz_dec_stream.c
+@@ -9,6 +9,7 @@
+
+ #include "xz_private.h"
+ #include "xz_stream.h"
++#include <linux/kernel.h>
+
+ /* Hash used to validate the Index field */
+ struct xz_dec_hash {
diff --git a/target/linux/generic/patches-3.3/020-ssb_update.patch b/target/linux/generic/patches-3.3/020-ssb_update.patch
new file mode 100644
index 000000000..e427574f7
--- /dev/null
+++ b/target/linux/generic/patches-3.3/020-ssb_update.patch
@@ -0,0 +1,837 @@
+--- a/drivers/ssb/b43_pci_bridge.c
++++ b/drivers/ssb/b43_pci_bridge.c
+@@ -29,11 +29,14 @@ static const struct pci_device_id b43_pc
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
++ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4322) },
++ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43222) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4325) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
++ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
+ { 0, },
+ };
+ MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
+--- a/drivers/ssb/driver_chipcommon_pmu.c
++++ b/drivers/ssb/driver_chipcommon_pmu.c
+@@ -13,6 +13,9 @@
+ #include <linux/ssb/ssb_driver_chipcommon.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
++#ifdef CONFIG_BCM47XX
++#include <asm/mach-bcm47xx/nvram.h>
++#endif
+
+ #include "ssb_private.h"
+
+@@ -92,10 +95,6 @@ static void ssb_pmu0_pllinit_r0(struct s
+ u32 pmuctl, tmp, pllctl;
+ unsigned int i;
+
+- if ((bus->chip_id == 0x5354) && !crystalfreq) {
+- /* The 5354 crystal freq is 25MHz */
+- crystalfreq = 25000;
+- }
+ if (crystalfreq)
+ e = pmu0_plltab_find_entry(crystalfreq);
+ if (!e)
+@@ -321,7 +320,11 @@ static void ssb_pmu_pll_init(struct ssb_
+ u32 crystalfreq = 0; /* in kHz. 0 = keep default freq. */
+
+ if (bus->bustype == SSB_BUSTYPE_SSB) {
+- /* TODO: The user may override the crystal frequency. */
++#ifdef CONFIG_BCM47XX
++ char buf[20];
++ if (nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
++ crystalfreq = simple_strtoul(buf, NULL, 0);
++#endif
+ }
+
+ switch (bus->chip_id) {
+@@ -330,7 +333,11 @@ static void ssb_pmu_pll_init(struct ssb_
+ ssb_pmu1_pllinit_r0(cc, crystalfreq);
+ break;
+ case 0x4328:
++ ssb_pmu0_pllinit_r0(cc, crystalfreq);
++ break;
+ case 0x5354:
++ if (crystalfreq == 0)
++ crystalfreq = 25000;
+ ssb_pmu0_pllinit_r0(cc, crystalfreq);
+ break;
+ case 0x4322:
+@@ -607,3 +614,34 @@ void ssb_pmu_set_ldo_paref(struct ssb_ch
+
+ EXPORT_SYMBOL(ssb_pmu_set_ldo_voltage);
+ EXPORT_SYMBOL(ssb_pmu_set_ldo_paref);
++
++u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc)
++{
++ struct ssb_bus *bus = cc->dev->bus;
++
++ switch (bus->chip_id) {
++ case 0x5354:
++ /* 5354 chip uses a non programmable PLL of frequency 240MHz */
++ return 240000000;
++ default:
++ ssb_printk(KERN_ERR PFX
++ "ERROR: PMU cpu clock unknown for device %04X\n",
++ bus->chip_id);
++ return 0;
++ }
++}
++
++u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
++{
++ struct ssb_bus *bus = cc->dev->bus;
++
++ switch (bus->chip_id) {
++ case 0x5354:
++ return 120000000;
++ default:
++ ssb_printk(KERN_ERR PFX
++ "ERROR: PMU controlclock unknown for device %04X\n",
++ bus->chip_id);
++ return 0;
++ }
++}
+--- a/drivers/ssb/driver_mipscore.c
++++ b/drivers/ssb/driver_mipscore.c
+@@ -208,6 +208,9 @@ u32 ssb_cpu_clock(struct ssb_mipscore *m
+ struct ssb_bus *bus = mcore->dev->bus;
+ u32 pll_type, n, m, rate = 0;
+
++ if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)
++ return ssb_pmu_get_cpu_clock(&bus->chipco);
++
+ if (bus->extif.dev) {
+ ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m);
+ } else if (bus->chipco.dev) {
+--- a/drivers/ssb/main.c
++++ b/drivers/ssb/main.c
+@@ -140,19 +140,6 @@ static void ssb_device_put(struct ssb_de
+ put_device(dev->dev);
+ }
+
+-static inline struct ssb_driver *ssb_driver_get(struct ssb_driver *drv)
+-{
+- if (drv)
+- get_driver(&drv->drv);
+- return drv;
+-}
+-
+-static inline void ssb_driver_put(struct ssb_driver *drv)
+-{
+- if (drv)
+- put_driver(&drv->drv);
+-}
+-
+ static int ssb_device_resume(struct device *dev)
+ {
+ struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
+@@ -250,11 +237,9 @@ int ssb_devices_freeze(struct ssb_bus *b
+ ssb_device_put(sdev);
+ continue;
+ }
+- sdrv = ssb_driver_get(drv_to_ssb_drv(sdev->dev->driver));
+- if (!sdrv || SSB_WARN_ON(!sdrv->remove)) {
+- ssb_device_put(sdev);
++ sdrv = drv_to_ssb_drv(sdev->dev->driver);
++ if (SSB_WARN_ON(!sdrv->remove))
+ continue;
+- }
+ sdrv->remove(sdev);
+ ctx->device_frozen[i] = 1;
+ }
+@@ -293,7 +278,6 @@ int ssb_devices_thaw(struct ssb_freeze_c
+ dev_name(sdev->dev));
+ result = err;
+ }
+- ssb_driver_put(sdrv);
+ ssb_device_put(sdev);
+ }
+
+@@ -1094,6 +1078,9 @@ u32 ssb_clockspeed(struct ssb_bus *bus)
+ u32 plltype;
+ u32 clkctl_n, clkctl_m;
+
++ if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)
++ return ssb_pmu_get_controlclock(&bus->chipco);
++
+ if (ssb_extif_available(&bus->extif))
+ ssb_extif_get_clockcontrol(&bus->extif, &plltype,
+ &clkctl_n, &clkctl_m);
+--- a/drivers/ssb/pci.c
++++ b/drivers/ssb/pci.c
+@@ -178,6 +178,18 @@ err_pci:
+ #define SPEX(_outvar, _offset, _mask, _shift) \
+ SPEX16(_outvar, _offset, _mask, _shift)
+
++#define SPEX_ARRAY8(_field, _offset, _mask, _shift) \
++ do { \
++ SPEX(_field[0], _offset + 0, _mask, _shift); \
++ SPEX(_field[1], _offset + 2, _mask, _shift); \
++ SPEX(_field[2], _offset + 4, _mask, _shift); \
++ SPEX(_field[3], _offset + 6, _mask, _shift); \
++ SPEX(_field[4], _offset + 8, _mask, _shift); \
++ SPEX(_field[5], _offset + 10, _mask, _shift); \
++ SPEX(_field[6], _offset + 12, _mask, _shift); \
++ SPEX(_field[7], _offset + 14, _mask, _shift); \
++ } while (0)
++
+
+ static inline u8 ssb_crc8(u8 crc, u8 data)
+ {
+@@ -331,7 +343,6 @@ static void sprom_extract_r123(struct ss
+ {
+ int i;
+ u16 v;
+- s8 gain;
+ u16 loc[3];
+
+ if (out->revision == 3) /* rev 3 moved MAC */
+@@ -361,8 +372,9 @@ static void sprom_extract_r123(struct ss
+ SPEX(et0mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0M, 14);
+ SPEX(et1mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1M, 15);
+ SPEX(board_rev, SSB_SPROM1_BINF, SSB_SPROM1_BINF_BREV, 0);
+- SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE,
+- SSB_SPROM1_BINF_CCODE_SHIFT);
++ if (out->revision == 1)
++ SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE,
++ SSB_SPROM1_BINF_CCODE_SHIFT);
+ SPEX(ant_available_a, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTA,
+ SSB_SPROM1_BINF_ANTA_SHIFT);
+ SPEX(ant_available_bg, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTBG,
+@@ -388,22 +400,16 @@ static void sprom_extract_r123(struct ss
+ SPEX(boardflags_lo, SSB_SPROM1_BFLLO, 0xFFFF, 0);
+ if (out->revision >= 2)
+ SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0);
++ SPEX(alpha2[0], SSB_SPROM1_CCODE, 0xff00, 8);
++ SPEX(alpha2[1], SSB_SPROM1_CCODE, 0x00ff, 0);
+
+ /* Extract the antenna gain values. */
+- gain = r123_extract_antgain(out->revision, in,
+- SSB_SPROM1_AGAIN_BG,
+- SSB_SPROM1_AGAIN_BG_SHIFT);
+- out->antenna_gain.ghz24.a0 = gain;
+- out->antenna_gain.ghz24.a1 = gain;
+- out->antenna_gain.ghz24.a2 = gain;
+- out->antenna_gain.ghz24.a3 = gain;
+- gain = r123_extract_antgain(out->revision, in,
+- SSB_SPROM1_AGAIN_A,
+- SSB_SPROM1_AGAIN_A_SHIFT);
+- out->antenna_gain.ghz5.a0 = gain;
+- out->antenna_gain.ghz5.a1 = gain;
+- out->antenna_gain.ghz5.a2 = gain;
+- out->antenna_gain.ghz5.a3 = gain;
++ out->antenna_gain.a0 = r123_extract_antgain(out->revision, in,
++ SSB_SPROM1_AGAIN_BG,
++ SSB_SPROM1_AGAIN_BG_SHIFT);
++ out->antenna_gain.a1 = r123_extract_antgain(out->revision, in,
++ SSB_SPROM1_AGAIN_A,
++ SSB_SPROM1_AGAIN_A_SHIFT);
+ }
+
+ /* Revs 4 5 and 8 have partially shared layout */
+@@ -464,14 +470,17 @@ static void sprom_extract_r45(struct ssb
+ SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
+ SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
+ SSB_SPROM4_ETHPHY_ET1A_SHIFT);
++ SPEX(board_rev, SSB_SPROM4_BOARDREV, 0xFFFF, 0);
+ if (out->revision == 4) {
+- SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
++ SPEX(alpha2[0], SSB_SPROM4_CCODE, 0xff00, 8);
++ SPEX(alpha2[1], SSB_SPROM4_CCODE, 0x00ff, 0);
+ SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
+ SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
+ SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0);
+ SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0);
+ } else {
+- SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
++ SPEX(alpha2[0], SSB_SPROM5_CCODE, 0xff00, 8);
++ SPEX(alpha2[1], SSB_SPROM5_CCODE, 0x00ff, 0);
+ SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
+ SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
+ SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0);
+@@ -504,16 +513,14 @@ static void sprom_extract_r45(struct ssb
+ }
+
+ /* Extract the antenna gain values. */
+- SPEX(antenna_gain.ghz24.a0, SSB_SPROM4_AGAIN01,
++ SPEX(antenna_gain.a0, SSB_SPROM4_AGAIN01,
+ SSB_SPROM4_AGAIN0, SSB_SPROM4_AGAIN0_SHIFT);
+- SPEX(antenna_gain.ghz24.a1, SSB_SPROM4_AGAIN01,
++ SPEX(antenna_gain.a1, SSB_SPROM4_AGAIN01,
+ SSB_SPROM4_AGAIN1, SSB_SPROM4_AGAIN1_SHIFT);
+- SPEX(antenna_gain.ghz24.a2, SSB_SPROM4_AGAIN23,
++ SPEX(antenna_gain.a2, SSB_SPROM4_AGAIN23,
+ SSB_SPROM4_AGAIN2, SSB_SPROM4_AGAIN2_SHIFT);
+- SPEX(antenna_gain.ghz24.a3, SSB_SPROM4_AGAIN23,
++ SPEX(antenna_gain.a3, SSB_SPROM4_AGAIN23,
+ SSB_SPROM4_AGAIN3, SSB_SPROM4_AGAIN3_SHIFT);
+- memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
+- sizeof(out->antenna_gain.ghz5));
+
+ sprom_extract_r458(out, in);
+
+@@ -523,14 +530,22 @@ static void sprom_extract_r45(struct ssb
+ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
+ {
+ int i;
+- u16 v;
++ u16 v, o;
++ u16 pwr_info_offset[] = {
++ SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1,
++ SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3
++ };
++ BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) !=
++ ARRAY_SIZE(out->core_pwr_info));
+
+ /* extract the MAC address */
+ for (i = 0; i < 3; i++) {
+ v = in[SPOFF(SSB_SPROM8_IL0MAC) + i];
+ *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
+ }
+- SPEX(country_code, SSB_SPROM8_CCODE, 0xFFFF, 0);
++ SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0);
++ SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
++ SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
+ SPEX(boardflags_lo, SSB_SPROM8_BFLLO, 0xFFFF, 0);
+ SPEX(boardflags_hi, SSB_SPROM8_BFLHI, 0xFFFF, 0);
+ SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, 0xFFFF, 0);
+@@ -596,16 +611,46 @@ static void sprom_extract_r8(struct ssb_
+ SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, 0xFFFFFFFF, 0);
+
+ /* Extract the antenna gain values. */
+- SPEX(antenna_gain.ghz24.a0, SSB_SPROM8_AGAIN01,
++ SPEX(antenna_gain.a0, SSB_SPROM8_AGAIN01,
+ SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT);
+- SPEX(antenna_gain.ghz24.a1, SSB_SPROM8_AGAIN01,
++ SPEX(antenna_gain.a1, SSB_SPROM8_AGAIN01,
+ SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT);
+- SPEX(antenna_gain.ghz24.a2, SSB_SPROM8_AGAIN23,
++ SPEX(antenna_gain.a2, SSB_SPROM8_AGAIN23,
+ SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT);
+- SPEX(antenna_gain.ghz24.a3, SSB_SPROM8_AGAIN23,
++ SPEX(antenna_gain.a3, SSB_SPROM8_AGAIN23,
+ SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT);
+- memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
+- sizeof(out->antenna_gain.ghz5));
++
++ /* Extract cores power info info */
++ for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
++ o = pwr_info_offset[i];
++ SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
++ SSB_SPROM8_2G_ITSSI, SSB_SPROM8_2G_ITSSI_SHIFT);
++ SPEX(core_pwr_info[i].maxpwr_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
++ SSB_SPROM8_2G_MAXP, 0);
++
++ SPEX(core_pwr_info[i].pa_2g[0], o + SSB_SROM8_2G_PA_0, ~0, 0);
++ SPEX(core_pwr_info[i].pa_2g[1], o + SSB_SROM8_2G_PA_1, ~0, 0);
++ SPEX(core_pwr_info[i].pa_2g[2], o + SSB_SROM8_2G_PA_2, ~0, 0);
++
++ SPEX(core_pwr_info[i].itssi_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
++ SSB_SPROM8_5G_ITSSI, SSB_SPROM8_5G_ITSSI_SHIFT);
++ SPEX(core_pwr_info[i].maxpwr_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
++ SSB_SPROM8_5G_MAXP, 0);
++ SPEX(core_pwr_info[i].maxpwr_5gh, o + SSB_SPROM8_5GHL_MAXP,
++ SSB_SPROM8_5GH_MAXP, 0);
++ SPEX(core_pwr_info[i].maxpwr_5gl, o + SSB_SPROM8_5GHL_MAXP,
++ SSB_SPROM8_5GL_MAXP, SSB_SPROM8_5GL_MAXP_SHIFT);
++
++ SPEX(core_pwr_info[i].pa_5gl[0], o + SSB_SROM8_5GL_PA_0, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gl[1], o + SSB_SROM8_5GL_PA_1, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gl[2], o + SSB_SROM8_5GL_PA_2, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5g[0], o + SSB_SROM8_5G_PA_0, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5g[1], o + SSB_SROM8_5G_PA_1, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5g[2], o + SSB_SROM8_5G_PA_2, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gh[0], o + SSB_SROM8_5GH_PA_0, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gh[1], o + SSB_SROM8_5GH_PA_1, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gh[2], o + SSB_SROM8_5GH_PA_2, ~0, 0);
++ }
+
+ /* Extract FEM info */
+ SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G,
+@@ -630,6 +675,63 @@ static void sprom_extract_r8(struct ssb_
+ SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
++ SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
++ SSB_SPROM8_LEDDC_ON_SHIFT);
++ SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
++ SSB_SPROM8_LEDDC_OFF_SHIFT);
++
++ SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
++ SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
++ SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
++ SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
++ SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
++ SSB_SPROM8_TXRXC_SWITCH_SHIFT);
++
++ SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
++
++ SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
++ SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
++ SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
++ SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
++
++ SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
++ SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
++ SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
++ SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
++ SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
++ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
++ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
++ SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
++ SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
++ SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
++ SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
++ SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
++ SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
++ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
++ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
++ SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
++ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
++ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
++ SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
++ SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
++
++ SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
++ SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
++ SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
++ SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
++
++ SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
++ SSB_SPROM8_THERMAL_TRESH_SHIFT);
++ SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
++ SSB_SPROM8_THERMAL_OFFSET_SHIFT);
++ SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
++ SSB_SPROM8_TEMPDELTA_PHYCAL,
++ SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
++ SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
++ SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
++ SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
++ SSB_SPROM8_TEMPDELTA_HYSTERESIS,
++ SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
+ sprom_extract_r458(out, in);
+
+ /* TODO - get remaining rev 8 stuff needed */
+@@ -759,7 +861,6 @@ static void ssb_pci_get_boardinfo(struct
+ {
+ bi->vendor = bus->host_pci->subsystem_vendor;
+ bi->type = bus->host_pci->subsystem_device;
+- bi->rev = bus->host_pci->revision;
+ }
+
+ int ssb_pci_get_invariants(struct ssb_bus *bus,
+--- a/drivers/ssb/pcmcia.c
++++ b/drivers/ssb/pcmcia.c
+@@ -676,14 +676,10 @@ static int ssb_pcmcia_do_get_invariants(
+ case SSB_PCMCIA_CIS_ANTGAIN:
+ GOTO_ERROR_ON(tuple->TupleDataLen != 2,
+ "antg tpl size");
+- sprom->antenna_gain.ghz24.a0 = tuple->TupleData[1];
+- sprom->antenna_gain.ghz24.a1 = tuple->TupleData[1];
+- sprom->antenna_gain.ghz24.a2 = tuple->TupleData[1];
+- sprom->antenna_gain.ghz24.a3 = tuple->TupleData[1];
+- sprom->antenna_gain.ghz5.a0 = tuple->TupleData[1];
+- sprom->antenna_gain.ghz5.a1 = tuple->TupleData[1];
+- sprom->antenna_gain.ghz5.a2 = tuple->TupleData[1];
+- sprom->antenna_gain.ghz5.a3 = tuple->TupleData[1];
++ sprom->antenna_gain.a0 = tuple->TupleData[1];
++ sprom->antenna_gain.a1 = tuple->TupleData[1];
++ sprom->antenna_gain.a2 = tuple->TupleData[1];
++ sprom->antenna_gain.a3 = tuple->TupleData[1];
+ break;
+ case SSB_PCMCIA_CIS_BFLAGS:
+ GOTO_ERROR_ON((tuple->TupleDataLen != 3) &&
+--- a/drivers/ssb/scan.c
++++ b/drivers/ssb/scan.c
+@@ -90,6 +90,8 @@ const char *ssb_core_name(u16 coreid)
+ return "ARM 1176";
+ case SSB_DEV_ARM_7TDMI:
+ return "ARM 7TDMI";
++ case SSB_DEV_ARM_CM3:
++ return "ARM Cortex M3";
+ }
+ return "UNKNOWN";
+ }
+@@ -318,6 +320,9 @@ int ssb_bus_scan(struct ssb_bus *bus,
+ bus->chip_package = 0;
+ }
+ }
++ ssb_printk(KERN_INFO PFX "Found chip with id 0x%04X, rev 0x%02X and "
++ "package 0x%02X\n", bus->chip_id, bus->chip_rev,
++ bus->chip_package);
+ if (!bus->nr_devices)
+ bus->nr_devices = chipid_to_nrcores(bus->chip_id);
+ if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
+--- a/drivers/ssb/sdio.c
++++ b/drivers/ssb/sdio.c
+@@ -551,14 +551,10 @@ int ssb_sdio_get_invariants(struct ssb_b
+ case SSB_SDIO_CIS_ANTGAIN:
+ GOTO_ERROR_ON(tuple->size != 2,
+ "antg tpl size");
+- sprom->antenna_gain.ghz24.a0 = tuple->data[1];
+- sprom->antenna_gain.ghz24.a1 = tuple->data[1];
+- sprom->antenna_gain.ghz24.a2 = tuple->data[1];
+- sprom->antenna_gain.ghz24.a3 = tuple->data[1];
+- sprom->antenna_gain.ghz5.a0 = tuple->data[1];
+- sprom->antenna_gain.ghz5.a1 = tuple->data[1];
+- sprom->antenna_gain.ghz5.a2 = tuple->data[1];
+- sprom->antenna_gain.ghz5.a3 = tuple->data[1];
++ sprom->antenna_gain.a0 = tuple->data[1];
++ sprom->antenna_gain.a1 = tuple->data[1];
++ sprom->antenna_gain.a2 = tuple->data[1];
++ sprom->antenna_gain.a3 = tuple->data[1];
+ break;
+ case SSB_SDIO_CIS_BFLAGS:
+ GOTO_ERROR_ON((tuple->size != 3) &&
+--- a/drivers/ssb/ssb_private.h
++++ b/drivers/ssb/ssb_private.h
+@@ -207,4 +207,8 @@ static inline void b43_pci_ssb_bridge_ex
+ }
+ #endif /* CONFIG_SSB_B43_PCI_BRIDGE */
+
++/* driver_chipcommon_pmu.c */
++extern u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc);
++extern u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc);
++
+ #endif /* LINUX_SSB_PRIVATE_H_ */
+--- a/include/linux/ssb/ssb.h
++++ b/include/linux/ssb/ssb.h
+@@ -16,6 +16,12 @@ struct pcmcia_device;
+ struct ssb_bus;
+ struct ssb_driver;
+
++struct ssb_sprom_core_pwr_info {
++ u8 itssi_2g, itssi_5g;
++ u8 maxpwr_2g, maxpwr_5gl, maxpwr_5g, maxpwr_5gh;
++ u16 pa_2g[4], pa_5gl[4], pa_5g[4], pa_5gh[4];
++};
++
+ struct ssb_sprom {
+ u8 revision;
+ u8 il0mac[6]; /* MAC address for 802.11b/g */
+@@ -26,9 +32,12 @@ struct ssb_sprom {
+ u8 et0mdcport; /* MDIO for enet0 */
+ u8 et1mdcport; /* MDIO for enet1 */
+ u16 board_rev; /* Board revision number from SPROM. */
++ u16 board_num; /* Board number from SPROM. */
++ u16 board_type; /* Board type from SPROM. */
+ u8 country_code; /* Country Code */
+- u16 leddc_on_time; /* LED Powersave Duty Cycle On Count */
+- u16 leddc_off_time; /* LED Powersave Duty Cycle Off Count */
++ char alpha2[2]; /* Country Code as two chars like EU or US */
++ u8 leddc_on_time; /* LED Powersave Duty Cycle On Count */
++ u8 leddc_off_time; /* LED Powersave Duty Cycle Off Count */
+ u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */
+ u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */
+ u16 pa0b0;
+@@ -47,10 +56,10 @@ struct ssb_sprom {
+ u8 gpio1; /* GPIO pin 1 */
+ u8 gpio2; /* GPIO pin 2 */
+ u8 gpio3; /* GPIO pin 3 */
+- u16 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */
+- u16 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */
+- u16 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */
+- u16 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */
++ u8 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */
++ u8 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */
++ u8 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */
++ u8 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 itssi_a; /* Idle TSSI Target for A-PHY */
+ u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */
+ u8 tri2g; /* 2.4GHz TX isolation */
+@@ -61,8 +70,8 @@ struct ssb_sprom {
+ u8 txpid5gl[4]; /* 4.9 - 5.1GHz TX power index */
+ u8 txpid5g[4]; /* 5.1 - 5.5GHz TX power index */
+ u8 txpid5gh[4]; /* 5.5 - ...GHz TX power index */
+- u8 rxpo2g; /* 2GHz RX power offset */
+- u8 rxpo5g; /* 5GHz RX power offset */
++ s8 rxpo2g; /* 2GHz RX power offset */
++ s8 rxpo5g; /* 5GHz RX power offset */
+ u8 rssisav2g; /* 2GHz RSSI params */
+ u8 rssismc2g;
+ u8 rssismf2g;
+@@ -82,16 +91,13 @@ struct ssb_sprom {
+ u16 boardflags2_hi; /* Board flags (bits 48-63) */
+ /* TODO store board flags in a single u64 */
+
++ struct ssb_sprom_core_pwr_info core_pwr_info[4];
++
+ /* Antenna gain values for up to 4 antennas
+ * on each band. Values in dBm/4 (Q5.2). Negative gain means the
+ * loss in the connectors is bigger than the gain. */
+ struct {
+- struct {
+- s8 a0, a1, a2, a3;
+- } ghz24; /* 2.4GHz band */
+- struct {
+- s8 a0, a1, a2, a3;
+- } ghz5; /* 5GHz band */
++ s8 a0, a1, a2, a3;
+ } antenna_gain;
+
+ struct {
+@@ -103,14 +109,85 @@ struct ssb_sprom {
+ } ghz5;
+ } fem;
+
+- /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */
++ u16 mcs2gpo[8];
++ u16 mcs5gpo[8];
++ u16 mcs5glpo[8];
++ u16 mcs5ghpo[8];
++ u8 opo;
++
++ u8 rxgainerr2ga[3];
++ u8 rxgainerr5gla[3];
++ u8 rxgainerr5gma[3];
++ u8 rxgainerr5gha[3];
++ u8 rxgainerr5gua[3];
++
++ u8 noiselvl2ga[3];
++ u8 noiselvl5gla[3];
++ u8 noiselvl5gma[3];
++ u8 noiselvl5gha[3];
++ u8 noiselvl5gua[3];
++
++ u8 regrev;
++ u8 txchain;
++ u8 rxchain;
++ u8 antswitch;
++ u16 cddpo;
++ u16 stbcpo;
++ u16 bw40po;
++ u16 bwduppo;
++
++ u8 tempthresh;
++ u8 tempoffset;
++ u16 rawtempsense;
++ u8 measpower;
++ u8 tempsense_slope;
++ u8 tempcorrx;
++ u8 tempsense_option;
++ u8 freqoffset_corr;
++ u8 iqcal_swp_dis;
++ u8 hw_iqcal_en;
++ u8 elna2g;
++ u8 elna5g;
++ u8 phycal_tempdelta;
++ u8 temps_period;
++ u8 temps_hysteresis;
++ u8 measpower1;
++ u8 measpower2;
++ u8 pcieingress_war;
++
++ /* power per rate from sromrev 9 */
++ u16 cckbw202gpo;
++ u16 cckbw20ul2gpo;
++ u32 legofdmbw202gpo;
++ u32 legofdmbw20ul2gpo;
++ u32 legofdmbw205glpo;
++ u32 legofdmbw20ul5glpo;
++ u32 legofdmbw205gmpo;
++ u32 legofdmbw20ul5gmpo;
++ u32 legofdmbw205ghpo;
++ u32 legofdmbw20ul5ghpo;
++ u32 mcsbw202gpo;
++ u32 mcsbw20ul2gpo;
++ u32 mcsbw402gpo;
++ u32 mcsbw205glpo;
++ u32 mcsbw20ul5glpo;
++ u32 mcsbw405glpo;
++ u32 mcsbw205gmpo;
++ u32 mcsbw20ul5gmpo;
++ u32 mcsbw405gmpo;
++ u32 mcsbw205ghpo;
++ u32 mcsbw20ul5ghpo;
++ u32 mcsbw405ghpo;
++ u16 mcs32po;
++ u16 legofdm40duppo;
++ u8 sar2g;
++ u8 sar5g;
+ };
+
+ /* Information about the PCB the circuitry is soldered on. */
+ struct ssb_boardinfo {
+ u16 vendor;
+ u16 type;
+- u8 rev;
+ };
+
+
+@@ -166,6 +243,7 @@ struct ssb_bus_ops {
+ #define SSB_DEV_MINI_MACPHY 0x823
+ #define SSB_DEV_ARM_1176 0x824
+ #define SSB_DEV_ARM_7TDMI 0x825
++#define SSB_DEV_ARM_CM3 0x82A
+
+ /* Vendor-ID values */
+ #define SSB_VENDOR_BROADCOM 0x4243
+--- a/include/linux/ssb/ssb_driver_gige.h
++++ b/include/linux/ssb/ssb_driver_gige.h
+@@ -2,6 +2,7 @@
+ #define LINUX_SSB_DRIVER_GIGE_H_
+
+ #include <linux/ssb/ssb.h>
++#include <linux/bug.h>
+ #include <linux/pci.h>
+ #include <linux/spinlock.h>
+
+--- a/include/linux/ssb/ssb_regs.h
++++ b/include/linux/ssb/ssb_regs.h
+@@ -228,6 +228,7 @@
+ #define SSB_SPROM1_AGAIN_BG_SHIFT 0
+ #define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */
+ #define SSB_SPROM1_AGAIN_A_SHIFT 8
++#define SSB_SPROM1_CCODE 0x0076
+
+ /* SPROM Revision 2 (inherits from rev 1) */
+ #define SSB_SPROM2_BFLHI 0x0038 /* Boardflags (high 16 bits) */
+@@ -267,6 +268,7 @@
+ #define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */
+
+ /* SPROM Revision 4 */
++#define SSB_SPROM4_BOARDREV 0x0042 /* Board revision */
+ #define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */
+ #define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */
+ #define SSB_SPROM4_BFL2LO 0x0048 /* Board flags 2 (low 16 bits) */
+@@ -389,6 +391,11 @@
+ #define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
+ #define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
+ #define SSB_SPROM8_GPIOB_P3_SHIFT 8
++#define SSB_SPROM8_LEDDC 0x009A
++#define SSB_SPROM8_LEDDC_ON 0xFF00 /* oncount */
++#define SSB_SPROM8_LEDDC_ON_SHIFT 8
++#define SSB_SPROM8_LEDDC_OFF 0x00FF /* offcount */
++#define SSB_SPROM8_LEDDC_OFF_SHIFT 0
+ #define SSB_SPROM8_ANTAVAIL 0x009C /* Antenna available bitfields*/
+ #define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
+ #define SSB_SPROM8_ANTAVAIL_A_SHIFT 8
+@@ -404,6 +411,13 @@
+ #define SSB_SPROM8_AGAIN2_SHIFT 0
+ #define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */
+ #define SSB_SPROM8_AGAIN3_SHIFT 8
++#define SSB_SPROM8_TXRXC 0x00A2
++#define SSB_SPROM8_TXRXC_TXCHAIN 0x000f
++#define SSB_SPROM8_TXRXC_TXCHAIN_SHIFT 0
++#define SSB_SPROM8_TXRXC_RXCHAIN 0x00f0
++#define SSB_SPROM8_TXRXC_RXCHAIN_SHIFT 4
++#define SSB_SPROM8_TXRXC_SWITCH 0xff00
++#define SSB_SPROM8_TXRXC_SWITCH_SHIFT 8
+ #define SSB_SPROM8_RSSIPARM2G 0x00A4 /* RSSI params for 2GHz */
+ #define SSB_SPROM8_RSSISMF2G 0x000F
+ #define SSB_SPROM8_RSSISMC2G 0x00F0
+@@ -430,6 +444,7 @@
+ #define SSB_SPROM8_TRI5GH_SHIFT 8
+ #define SSB_SPROM8_RXPO 0x00AC /* RX power offsets */
+ #define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */
++#define SSB_SPROM8_RXPO2G_SHIFT 0
+ #define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */
+ #define SSB_SPROM8_RXPO5G_SHIFT 8
+ #define SSB_SPROM8_FEM2G 0x00AE
+@@ -445,10 +460,71 @@
+ #define SSB_SROM8_FEM_ANTSWLUT 0xF800
+ #define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11
+ #define SSB_SPROM8_THERMAL 0x00B2
+-#define SSB_SPROM8_MPWR_RAWTS 0x00B4
+-#define SSB_SPROM8_TS_SLP_OPT_CORRX 0x00B6
+-#define SSB_SPROM8_FOC_HWIQ_IQSWP 0x00B8
+-#define SSB_SPROM8_PHYCAL_TEMPDELTA 0x00BA
++#define SSB_SPROM8_THERMAL_OFFSET 0x00ff
++#define SSB_SPROM8_THERMAL_OFFSET_SHIFT 0
++#define SSB_SPROM8_THERMAL_TRESH 0xff00
++#define SSB_SPROM8_THERMAL_TRESH_SHIFT 8
++/* Temp sense related entries */
++#define SSB_SPROM8_RAWTS 0x00B4
++#define SSB_SPROM8_RAWTS_RAWTEMP 0x01ff
++#define SSB_SPROM8_RAWTS_RAWTEMP_SHIFT 0
++#define SSB_SPROM8_RAWTS_MEASPOWER 0xfe00
++#define SSB_SPROM8_RAWTS_MEASPOWER_SHIFT 9
++#define SSB_SPROM8_OPT_CORRX 0x00B6
++#define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE 0x00ff
++#define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT 0
++#define SSB_SPROM8_OPT_CORRX_TEMPCORRX 0xfc00
++#define SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT 10
++#define SSB_SPROM8_OPT_CORRX_TEMP_OPTION 0x0300
++#define SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT 8
++/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
++#define SSB_SPROM8_HWIQ_IQSWP 0x00B8
++#define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR 0x000f
++#define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT 0
++#define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP 0x0010
++#define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT 4
++#define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL 0x0020
++#define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT 5
++#define SSB_SPROM8_TEMPDELTA 0x00BA
++#define SSB_SPROM8_TEMPDELTA_PHYCAL 0x00ff
++#define SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT 0
++#define SSB_SPROM8_TEMPDELTA_PERIOD 0x0f00
++#define SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT 8
++#define SSB_SPROM8_TEMPDELTA_HYSTERESIS 0xf000
++#define SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT 12
++
++/* There are 4 blocks with power info sharing the same layout */
++#define SSB_SROM8_PWR_INFO_CORE0 0x00C0
++#define SSB_SROM8_PWR_INFO_CORE1 0x00E0
++#define SSB_SROM8_PWR_INFO_CORE2 0x0100
++#define SSB_SROM8_PWR_INFO_CORE3 0x0120
++
++#define SSB_SROM8_2G_MAXP_ITSSI 0x00
++#define SSB_SPROM8_2G_MAXP 0x00FF
++#define SSB_SPROM8_2G_ITSSI 0xFF00
++#define SSB_SPROM8_2G_ITSSI_SHIFT 8
++#define SSB_SROM8_2G_PA_0 0x02 /* 2GHz power amp settings */
++#define SSB_SROM8_2G_PA_1 0x04
++#define SSB_SROM8_2G_PA_2 0x06
++#define SSB_SROM8_5G_MAXP_ITSSI 0x08 /* 5GHz ITSSI and 5.3GHz Max Power */
++#define SSB_SPROM8_5G_MAXP 0x00FF
++#define SSB_SPROM8_5G_ITSSI 0xFF00
++#define SSB_SPROM8_5G_ITSSI_SHIFT 8
++#define SSB_SPROM8_5GHL_MAXP 0x0A /* 5.2GHz and 5.8GHz Max Power */
++#define SSB_SPROM8_5GH_MAXP 0x00FF
++#define SSB_SPROM8_5GL_MAXP 0xFF00
++#define SSB_SPROM8_5GL_MAXP_SHIFT 8
++#define SSB_SROM8_5G_PA_0 0x0C /* 5.3GHz power amp settings */
++#define SSB_SROM8_5G_PA_1 0x0E
++#define SSB_SROM8_5G_PA_2 0x10
++#define SSB_SROM8_5GL_PA_0 0x12 /* 5.2GHz power amp settings */
++#define SSB_SROM8_5GL_PA_1 0x14
++#define SSB_SROM8_5GL_PA_2 0x16
++#define SSB_SROM8_5GH_PA_0 0x18 /* 5.8GHz power amp settings */
++#define SSB_SROM8_5GH_PA_1 0x1A
++#define SSB_SROM8_5GH_PA_2 0x1C
++
++/* TODO: Make it deprecated */
+ #define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */
+ #define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */
+ #define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
+@@ -473,12 +549,23 @@
+ #define SSB_SPROM8_PA1HIB0 0x00D8 /* 5.8GHz power amp settings */
+ #define SSB_SPROM8_PA1HIB1 0x00DA
+ #define SSB_SPROM8_PA1HIB2 0x00DC
++
+ #define SSB_SPROM8_CCK2GPO 0x0140 /* CCK power offset */
+ #define SSB_SPROM8_OFDM2GPO 0x0142 /* 2.4GHz OFDM power offset */
+ #define SSB_SPROM8_OFDM5GPO 0x0146 /* 5.3GHz OFDM power offset */
+ #define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */
+ #define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */
+
++#define SSB_SPROM8_2G_MCSPO 0x0152
++#define SSB_SPROM8_5G_MCSPO 0x0162
++#define SSB_SPROM8_5GL_MCSPO 0x0172
++#define SSB_SPROM8_5GH_MCSPO 0x0182
++
++#define SSB_SPROM8_CDDPO 0x0192
++#define SSB_SPROM8_STBCPO 0x0194
++#define SSB_SPROM8_BW40PO 0x0196
++#define SSB_SPROM8_BWDUPPO 0x0198
++
+ /* Values for boardflags_lo read from SPROM */
+ #define SSB_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */
+ #define SSB_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */
diff --git a/target/linux/generic/patches-3.3/025-bcma_backport.patch b/target/linux/generic/patches-3.3/025-bcma_backport.patch
new file mode 100644
index 000000000..29aaa2958
--- /dev/null
+++ b/target/linux/generic/patches-3.3/025-bcma_backport.patch
@@ -0,0 +1,3330 @@
+--- a/drivers/bcma/Kconfig
++++ b/drivers/bcma/Kconfig
+@@ -29,7 +29,7 @@ config BCMA_HOST_PCI
+
+ config BCMA_DRIVER_PCI_HOSTMODE
+ bool "Driver for PCI core working in hostmode"
+- depends on BCMA && MIPS
++ depends on BCMA && MIPS && BCMA_HOST_PCI
+ help
+ PCI core hostmode operation (external PCI bus).
+
+@@ -46,6 +46,15 @@ config BCMA_DRIVER_MIPS
+
+ If unsure, say N
+
++config BCMA_DRIVER_GMAC_CMN
++ bool "BCMA Broadcom GBIT MAC COMMON core driver"
++ depends on BCMA
++ help
++ Driver for the Broadcom GBIT MAC COMMON core attached to Broadcom
++ specific Advanced Microcontroller Bus.
++
++ If unsure, say N
++
+ config BCMA_DEBUG
+ bool "BCMA debugging"
+ depends on BCMA
+--- a/drivers/bcma/Makefile
++++ b/drivers/bcma/Makefile
+@@ -3,6 +3,7 @@ bcma-y += driver_chipcommon.o driver
+ bcma-y += driver_pci.o
+ bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
+ bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
++bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
+ bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
+ bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
+ obj-$(CONFIG_BCMA) += bcma.o
+--- a/drivers/bcma/bcma_private.h
++++ b/drivers/bcma/bcma_private.h
+@@ -10,10 +10,19 @@
+
+ #define BCMA_CORE_SIZE 0x1000
+
++#define bcma_err(bus, fmt, ...) \
++ pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
++#define bcma_warn(bus, fmt, ...) \
++ pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
++#define bcma_info(bus, fmt, ...) \
++ pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
++#define bcma_debug(bus, fmt, ...) \
++ pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
++
+ struct bcma_bus;
+
+ /* main.c */
+-int bcma_bus_register(struct bcma_bus *bus);
++int __devinit bcma_bus_register(struct bcma_bus *bus);
+ void bcma_bus_unregister(struct bcma_bus *bus);
+ int __init bcma_bus_early_register(struct bcma_bus *bus,
+ struct bcma_device *core_cc,
+@@ -48,8 +57,12 @@ extern int __init bcma_host_pci_init(voi
+ extern void __exit bcma_host_pci_exit(void);
+ #endif /* CONFIG_BCMA_HOST_PCI */
+
++/* driver_pci.c */
++u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
++
+ #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
+-void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
++bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
++void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
+ #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+
+ #endif
+--- a/drivers/bcma/core.c
++++ b/drivers/bcma/core.c
+@@ -30,6 +30,7 @@ void bcma_core_disable(struct bcma_devic
+ udelay(10);
+
+ bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
++ bcma_aread32(core, BCMA_RESET_CTL);
+ udelay(1);
+ }
+ EXPORT_SYMBOL_GPL(bcma_core_disable);
+@@ -74,10 +75,10 @@ void bcma_core_set_clockmode(struct bcma
+ udelay(10);
+ }
+ if (i)
+- pr_err("HT force timeout\n");
++ bcma_err(core->bus, "HT force timeout\n");
+ break;
+ case BCMA_CLKMODE_DYNAMIC:
+- pr_warn("Dynamic clockmode not supported yet!\n");
++ bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
+ break;
+ }
+ }
+@@ -101,9 +102,9 @@ void bcma_core_pll_ctl(struct bcma_devic
+ udelay(10);
+ }
+ if (i)
+- pr_err("PLL enable timeout\n");
++ bcma_err(core->bus, "PLL enable timeout\n");
+ } else {
+- pr_warn("Disabling PLL not supported yet!\n");
++ bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
+ }
+ }
+ EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
+@@ -119,8 +120,8 @@ u32 bcma_core_dma_translation(struct bcm
+ else
+ return BCMA_DMA_TRANSLATION_DMA32_CMT;
+ default:
+- pr_err("DMA translation unknown for host %d\n",
+- core->bus->hosttype);
++ bcma_err(core->bus, "DMA translation unknown for host %d\n",
++ core->bus->hosttype);
+ }
+ return BCMA_DMA_TRANSLATION_NONE;
+ }
+--- a/drivers/bcma/driver_chipcommon.c
++++ b/drivers/bcma/driver_chipcommon.c
+@@ -44,7 +44,7 @@ void bcma_core_chipcommon_init(struct bc
+ if (cc->capabilities & BCMA_CC_CAP_PMU)
+ bcma_pmu_init(cc);
+ if (cc->capabilities & BCMA_CC_CAP_PCTL)
+- pr_err("Power control not implemented!\n");
++ bcma_err(cc->core->bus, "Power control not implemented!\n");
+
+ if (cc->core->id.rev >= 16) {
+ if (cc->core->bus->sprom.leddc_on_time &&
+@@ -137,8 +137,7 @@ void bcma_chipco_serial_init(struct bcma
+ | BCMA_CC_CORECTL_UARTCLKEN);
+ }
+ } else {
+- pr_err("serial not supported on this device ccrev: 0x%x\n",
+- ccrev);
++ bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev);
+ return;
+ }
+
+--- a/drivers/bcma/driver_chipcommon_pmu.c
++++ b/drivers/bcma/driver_chipcommon_pmu.c
+@@ -3,7 +3,8 @@
+ * ChipCommon Power Management Unit driver
+ *
+ * Copyright 2009, Michael Buesch <m@bues.ch>
+- * Copyright 2007, Broadcom Corporation
++ * Copyright 2007, 2011, Broadcom Corporation
++ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+@@ -54,38 +55,19 @@ void bcma_chipco_regctl_maskset(struct b
+ }
+ EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
+
+-static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
+-{
+- struct bcma_bus *bus = cc->core->bus;
+-
+- switch (bus->chipinfo.id) {
+- case 0x4313:
+- case 0x4331:
+- case 43224:
+- case 43225:
+- break;
+- default:
+- pr_err("PLL init unknown for device 0x%04X\n",
+- bus->chipinfo.id);
+- }
+-}
+-
+ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
+ {
+ struct bcma_bus *bus = cc->core->bus;
+ u32 min_msk = 0, max_msk = 0;
+
+ switch (bus->chipinfo.id) {
+- case 0x4313:
++ case BCMA_CHIP_ID_BCM4313:
+ min_msk = 0x200D;
+ max_msk = 0xFFFF;
+ break;
+- case 43224:
+- case 43225:
+- break;
+ default:
+- pr_err("PMU resource config unknown for device 0x%04X\n",
+- bus->chipinfo.id);
++ bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
++ bus->chipinfo.id);
+ }
+
+ /* Set the resource masks. */
+@@ -93,22 +75,9 @@ static void bcma_pmu_resources_init(stru
+ bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
+ if (max_msk)
+ bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
+-}
+-
+-void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
+-{
+- struct bcma_bus *bus = cc->core->bus;
+
+- switch (bus->chipinfo.id) {
+- case 0x4313:
+- case 0x4331:
+- case 43224:
+- case 43225:
+- break;
+- default:
+- pr_err("PMU switch/regulators init unknown for device "
+- "0x%04X\n", bus->chipinfo.id);
+- }
++ /* Add some delay; allow resources to come up and settle. */
++ mdelay(2);
+ }
+
+ /* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
+@@ -122,8 +91,11 @@ void bcma_chipco_bcm4331_ext_pa_lines_ct
+ val |= BCMA_CHIPCTL_4331_EXTPA_EN;
+ if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
+ val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
++ else if (bus->chipinfo.rev > 0)
++ val |= BCMA_CHIPCTL_4331_EXTPA_EN2;
+ } else {
+ val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
++ val &= ~BCMA_CHIPCTL_4331_EXTPA_EN2;
+ val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+ }
+ bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
+@@ -134,26 +106,38 @@ void bcma_pmu_workarounds(struct bcma_dr
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+- case 0x4313:
+- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
++ case BCMA_CHIP_ID_BCM4313:
++ /* enable 12 mA drive strenth for 4313 and set chipControl
++ register bit 1 */
++ bcma_chipco_chipctl_maskset(cc, 0,
++ BCMA_CCTRL_4313_12MA_LED_DRIVE,
++ BCMA_CCTRL_4313_12MA_LED_DRIVE);
+ break;
+- case 0x4331:
+- /* BCM4331 workaround is SPROM-related, we put it in sprom.c */
++ case BCMA_CHIP_ID_BCM4331:
++ case BCMA_CHIP_ID_BCM43431:
++ /* Ext PA lines must be enabled for tx on BCM4331 */
++ bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
+ break;
+- case 43224:
++ case BCMA_CHIP_ID_BCM43224:
++ case BCMA_CHIP_ID_BCM43421:
++ /* enable 12 mA drive strenth for 43224 and set chipControl
++ register bit 15 */
+ if (bus->chipinfo.rev == 0) {
+- pr_err("Workarounds for 43224 rev 0 not fully "
+- "implemented\n");
+- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0);
++ bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
++ BCMA_CCTRL_43224_GPIO_TOGGLE,
++ BCMA_CCTRL_43224_GPIO_TOGGLE);
++ bcma_chipco_chipctl_maskset(cc, 0,
++ BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
++ BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
+ } else {
+- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
++ bcma_chipco_chipctl_maskset(cc, 0,
++ BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
++ BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
+ }
+ break;
+- case 43225:
+- break;
+ default:
+- pr_err("Workarounds unknown for device 0x%04X\n",
+- bus->chipinfo.id);
++ bcma_debug(bus, "Workarounds unknown or not needed for device 0x%04X\n",
++ bus->chipinfo.id);
+ }
+ }
+
+@@ -164,8 +148,8 @@ void bcma_pmu_init(struct bcma_drv_cc *c
+ pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
+ cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
+
+- pr_debug("Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
+- pmucap);
++ bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
++ cc->pmu.rev, pmucap);
+
+ if (cc->pmu.rev == 1)
+ bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
+@@ -174,12 +158,7 @@ void bcma_pmu_init(struct bcma_drv_cc *c
+ bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
+ BCMA_CC_PMU_CTL_NOILPONW);
+
+- if (cc->core->id.id == 0x4329 && cc->core->id.rev == 2)
+- pr_err("Fix for 4329b0 bad LPOM state not implemented!\n");
+-
+- bcma_pmu_pll_init(cc);
+ bcma_pmu_resources_init(cc);
+- bcma_pmu_swreg_init(cc);
+ bcma_pmu_workarounds(cc);
+ }
+
+@@ -188,23 +167,22 @@ u32 bcma_pmu_alp_clock(struct bcma_drv_c
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+- case 0x4716:
+- case 0x4748:
+- case 47162:
+- case 0x4313:
+- case 0x5357:
+- case 0x4749:
+- case 53572:
++ case BCMA_CHIP_ID_BCM4716:
++ case BCMA_CHIP_ID_BCM4748:
++ case BCMA_CHIP_ID_BCM47162:
++ case BCMA_CHIP_ID_BCM4313:
++ case BCMA_CHIP_ID_BCM5357:
++ case BCMA_CHIP_ID_BCM4749:
++ case BCMA_CHIP_ID_BCM53572:
+ /* always 20Mhz */
+ return 20000 * 1000;
+- case 0x5356:
+- case 0x5300:
++ case BCMA_CHIP_ID_BCM5356:
++ case BCMA_CHIP_ID_BCM4706:
+ /* always 25Mhz */
+ return 25000 * 1000;
+ default:
+- pr_warn("No ALP clock specified for %04X device, "
+- "pmu rev. %d, using default %d Hz\n",
+- bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
++ bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
++ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
+ }
+ return BCMA_CC_PMU_ALP_CLOCK;
+ }
+@@ -221,7 +199,8 @@ static u32 bcma_pmu_clock(struct bcma_dr
+
+ BUG_ON(!m || m > 4);
+
+- if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) {
++ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
++ bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) {
+ /* Detect failure in clock setting */
+ tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+ if (tmp & 0x40000)
+@@ -247,33 +226,62 @@ static u32 bcma_pmu_clock(struct bcma_dr
+ return (fc / div) * 1000000;
+ }
+
++static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
++{
++ u32 tmp, ndiv, p1div, p2div;
++ u32 clock;
++
++ BUG_ON(!m || m > 4);
++
++ /* Get N, P1 and P2 dividers to determine CPU clock */
++ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PMU6_4706_PROCPLL_OFF);
++ ndiv = (tmp & BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK)
++ >> BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT;
++ p1div = (tmp & BCMA_CC_PMU6_4706_PROC_P1DIV_MASK)
++ >> BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT;
++ p2div = (tmp & BCMA_CC_PMU6_4706_PROC_P2DIV_MASK)
++ >> BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT;
++
++ tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
++ if (tmp & BCMA_CC_CHIPST_4706_PKG_OPTION)
++ /* Low cost bonding: Fixed reference clock 25MHz and m = 4 */
++ clock = (25000000 / 4) * ndiv * p2div / p1div;
++ else
++ /* Fixed reference clock 25MHz and m = 2 */
++ clock = (25000000 / 2) * ndiv * p2div / p1div;
++
++ if (m == BCMA_CC_PMU5_MAINPLL_SSB)
++ clock = clock / 4;
++
++ return clock;
++}
++
+ /* query bus clock frequency for PMU-enabled chipcommon */
+ u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
+ {
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+- case 0x4716:
+- case 0x4748:
+- case 47162:
++ case BCMA_CHIP_ID_BCM4716:
++ case BCMA_CHIP_ID_BCM4748:
++ case BCMA_CHIP_ID_BCM47162:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+- case 0x5356:
++ case BCMA_CHIP_ID_BCM5356:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+- case 0x5357:
+- case 0x4749:
++ case BCMA_CHIP_ID_BCM5357:
++ case BCMA_CHIP_ID_BCM4749:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+- case 0x5300:
+- return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
+- BCMA_CC_PMU5_MAINPLL_SSB);
+- case 53572:
++ case BCMA_CHIP_ID_BCM4706:
++ return bcma_pmu_clock_bcm4706(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
++ BCMA_CC_PMU5_MAINPLL_SSB);
++ case BCMA_CHIP_ID_BCM53572:
+ return 75000000;
+ default:
+- pr_warn("No backplane clock specified for %04X device, "
+- "pmu rev. %d, using default %d Hz\n",
+- bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
++ bcma_warn(bus, "No backplane clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
++ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
+ }
+ return BCMA_CC_PMU_HT_CLOCK;
+ }
+@@ -283,17 +291,21 @@ u32 bcma_pmu_get_clockcpu(struct bcma_dr
+ {
+ struct bcma_bus *bus = cc->core->bus;
+
+- if (bus->chipinfo.id == 53572)
++ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
+ return 300000000;
+
+ if (cc->pmu.rev >= 5) {
+ u32 pll;
+ switch (bus->chipinfo.id) {
+- case 0x5356:
++ case BCMA_CHIP_ID_BCM4706:
++ return bcma_pmu_clock_bcm4706(cc,
++ BCMA_CC_PMU4706_MAINPLL_PLL0,
++ BCMA_CC_PMU5_MAINPLL_CPU);
++ case BCMA_CHIP_ID_BCM5356:
+ pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
+ break;
+- case 0x5357:
+- case 0x4749:
++ case BCMA_CHIP_ID_BCM5357:
++ case BCMA_CHIP_ID_BCM4749:
+ pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
+ break;
+ default:
+@@ -301,10 +313,188 @@ u32 bcma_pmu_get_clockcpu(struct bcma_dr
+ break;
+ }
+
+- /* TODO: if (bus->chipinfo.id == 0x5300)
+- return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */
+ return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
+ }
+
+ return bcma_pmu_get_clockcontrol(cc);
+ }
++
++static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
++ u32 value)
++{
++ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
++ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
++}
++
++void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
++{
++ u32 tmp = 0;
++ u8 phypll_offset = 0;
++ u8 bcm5357_bcm43236_p1div[] = {0x1, 0x5, 0x5};
++ u8 bcm5357_bcm43236_ndiv[] = {0x30, 0xf6, 0xfc};
++ struct bcma_bus *bus = cc->core->bus;
++
++ switch (bus->chipinfo.id) {
++ case BCMA_CHIP_ID_BCM5357:
++ case BCMA_CHIP_ID_BCM4749:
++ case BCMA_CHIP_ID_BCM53572:
++ /* 5357[ab]0, 43236[ab]0, and 6362b0 */
++
++ /* BCM5357 needs to touch PLL1_PLLCTL[02],
++ so offset PLL0_PLLCTL[02] by 6 */
++ phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
++ bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
++ bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
++
++ /* RMW only the P1 divider */
++ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
++ BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
++ tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
++ tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
++ tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
++ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
++
++ /* RMW only the int feedback divider */
++ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
++ BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
++ tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
++ tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
++ tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
++ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
++
++ tmp = 1 << 10;
++ break;
++
++ case BCMA_CHIP_ID_BCM4331:
++ case BCMA_CHIP_ID_BCM43431:
++ if (spuravoid == 2) {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x11500014);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x0FC00a08);
++ } else if (spuravoid == 1) {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x11500014);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x0F600a08);
++ } else {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x11100014);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x03000a08);
++ }
++ tmp = 1 << 10;
++ break;
++
++ case BCMA_CHIP_ID_BCM43224:
++ case BCMA_CHIP_ID_BCM43225:
++ case BCMA_CHIP_ID_BCM43421:
++ if (spuravoid == 1) {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x11500010);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
++ 0x000C0C06);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x0F600a08);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
++ 0x00000000);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
++ 0x2001E920);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
++ 0x88888815);
++ } else {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x11100010);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
++ 0x000c0c06);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x03000a08);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
++ 0x00000000);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
++ 0x200005c0);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
++ 0x88888815);
++ }
++ tmp = 1 << 10;
++ break;
++
++ case BCMA_CHIP_ID_BCM4716:
++ case BCMA_CHIP_ID_BCM4748:
++ case BCMA_CHIP_ID_BCM47162:
++ if (spuravoid == 1) {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x11500060);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
++ 0x080C0C06);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x0F600000);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
++ 0x00000000);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
++ 0x2001E924);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
++ 0x88888815);
++ } else {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x11100060);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
++ 0x080c0c06);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x03000000);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
++ 0x00000000);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
++ 0x200005c0);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
++ 0x88888815);
++ }
++
++ tmp = 3 << 9;
++ break;
++
++ case BCMA_CHIP_ID_BCM43227:
++ case BCMA_CHIP_ID_BCM43228:
++ case BCMA_CHIP_ID_BCM43428:
++ /* LCNXN */
++ /* PLL Settings for spur avoidance on/off mode,
++ no on2 support for 43228A0 */
++ if (spuravoid == 1) {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x01100014);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
++ 0x040C0C06);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x03140A08);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
++ 0x00333333);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
++ 0x202C2820);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
++ 0x88888815);
++ } else {
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
++ 0x11100014);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
++ 0x040c0c06);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
++ 0x03000a08);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
++ 0x00000000);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
++ 0x200005c0);
++ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
++ 0x88888815);
++ }
++ tmp = 1 << 10;
++ break;
++ default:
++ bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
++ bus->chipinfo.id);
++ break;
++ }
++
++ tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
++ bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
++}
++EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
+--- /dev/null
++++ b/drivers/bcma/driver_gmac_cmn.c
+@@ -0,0 +1,14 @@
++/*
++ * Broadcom specific AMBA
++ * GBIT MAC COMMON Core
++ *
++ * Licensed under the GNU/GPL. See COPYING for details.
++ */
++
++#include "bcma_private.h"
++#include <linux/bcma/bcma.h>
++
++void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
++{
++ mutex_init(&gc->phy_mutex);
++}
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -22,15 +22,15 @@
+ /* The 47162a0 hangs when reading MIPS DMP registers registers */
+ static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
+ {
+- return dev->bus->chipinfo.id == 47162 && dev->bus->chipinfo.rev == 0 &&
+- dev->id.id == BCMA_CORE_MIPS_74K;
++ return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
++ dev->bus->chipinfo.rev == 0 && dev->id.id == BCMA_CORE_MIPS_74K;
+ }
+
+ /* The 5357b0 hangs when reading USB20H DMP registers */
+ static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
+ {
+- return (dev->bus->chipinfo.id == 0x5357 ||
+- dev->bus->chipinfo.id == 0x4749) &&
++ return (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
++ dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) &&
+ dev->bus->chipinfo.pkg == 11 &&
+ dev->id.id == BCMA_CORE_USB20_HOST;
+ }
+@@ -143,8 +143,8 @@ static void bcma_core_mips_set_irq(struc
+ 1 << irqflag);
+ }
+
+- pr_info("set_irq: core 0x%04x, irq %d => %d\n",
+- dev->id.id, oldirq + 2, irq + 2);
++ bcma_info(bus, "set_irq: core 0x%04x, irq %d => %d\n",
++ dev->id.id, oldirq + 2, irq + 2);
+ }
+
+ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
+@@ -173,7 +173,7 @@ u32 bcma_cpu_clock(struct bcma_drv_mips
+ if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
+ return bcma_pmu_get_clockcpu(&bus->drv_cc);
+
+- pr_err("No PMU available, need this to get the cpu clock\n");
++ bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
+ return 0;
+ }
+ EXPORT_SYMBOL(bcma_cpu_clock);
+@@ -185,10 +185,10 @@ static void bcma_core_mips_flash_detect(
+ switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ case BCMA_CC_FLASHT_ATSER:
+- pr_err("Serial flash not supported.\n");
++ bcma_err(bus, "Serial flash not supported.\n");
+ break;
+ case BCMA_CC_FLASHT_PARA:
+- pr_info("found parallel flash.\n");
++ bcma_info(bus, "found parallel flash.\n");
+ bus->drv_cc.pflash.window = 0x1c000000;
+ bus->drv_cc.pflash.window_size = 0x02000000;
+
+@@ -199,7 +199,7 @@ static void bcma_core_mips_flash_detect(
+ bus->drv_cc.pflash.buswidth = 2;
+ break;
+ default:
+- pr_err("flash not supported.\n");
++ bcma_err(bus, "flash not supported.\n");
+ }
+ }
+
+@@ -209,7 +209,7 @@ void bcma_core_mips_init(struct bcma_drv
+ struct bcma_device *core;
+ bus = mcore->core->bus;
+
+- pr_info("Initializing MIPS core...\n");
++ bcma_info(bus, "Initializing MIPS core...\n");
+
+ if (!mcore->setup_done)
+ mcore->assigned_irqs = 1;
+@@ -244,7 +244,7 @@ void bcma_core_mips_init(struct bcma_drv
+ break;
+ }
+ }
+- pr_info("IRQ reconfiguration done\n");
++ bcma_info(bus, "IRQ reconfiguration done\n");
+ bcma_core_mips_dump_irq(bus);
+
+ if (mcore->setup_done)
+--- a/drivers/bcma/driver_pci.c
++++ b/drivers/bcma/driver_pci.c
+@@ -2,8 +2,9 @@
+ * Broadcom specific AMBA
+ * PCI Core
+ *
+- * Copyright 2005, Broadcom Corporation
++ * Copyright 2005, 2011, Broadcom Corporation
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
++ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+@@ -16,40 +17,39 @@
+ * R/W ops.
+ **************************************************/
+
+-static u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
++u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
+ {
+- pcicore_write32(pc, 0x130, address);
+- pcicore_read32(pc, 0x130);
+- return pcicore_read32(pc, 0x134);
++ pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
++ pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
++ return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
+ }
+
+-#if 0
+ static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
+ {
+- pcicore_write32(pc, 0x130, address);
+- pcicore_read32(pc, 0x130);
+- pcicore_write32(pc, 0x134, data);
++ pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
++ pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
++ pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
+ }
+-#endif
+
+ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
+ {
+- const u16 mdio_control = 0x128;
+- const u16 mdio_data = 0x12C;
+ u32 v;
+ int i;
+
+- v = (1 << 30); /* Start of Transaction */
+- v |= (1 << 28); /* Write Transaction */
+- v |= (1 << 17); /* Turnaround */
+- v |= (0x1F << 18);
++ v = BCMA_CORE_PCI_MDIODATA_START;
++ v |= BCMA_CORE_PCI_MDIODATA_WRITE;
++ v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
++ BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
++ v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
++ BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
++ v |= BCMA_CORE_PCI_MDIODATA_TA;
+ v |= (phy << 4);
+- pcicore_write32(pc, mdio_data, v);
++ pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
+
+ udelay(10);
+ for (i = 0; i < 200; i++) {
+- v = pcicore_read32(pc, mdio_control);
+- if (v & 0x100 /* Trans complete */)
++ v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
++ if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
+ break;
+ msleep(1);
+ }
+@@ -57,79 +57,84 @@ static void bcma_pcie_mdio_set_phy(struc
+
+ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
+ {
+- const u16 mdio_control = 0x128;
+- const u16 mdio_data = 0x12C;
+ int max_retries = 10;
+ u16 ret = 0;
+ u32 v;
+ int i;
+
+- v = 0x80; /* Enable Preamble Sequence */
+- v |= 0x2; /* MDIO Clock Divisor */
+- pcicore_write32(pc, mdio_control, v);
++ /* enable mdio access to SERDES */
++ v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
++ v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
++ pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
+
+ if (pc->core->id.rev >= 10) {
+ max_retries = 200;
+ bcma_pcie_mdio_set_phy(pc, device);
++ v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
++ BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
++ v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
++ } else {
++ v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
++ v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
+ }
+
+- v = (1 << 30); /* Start of Transaction */
+- v |= (1 << 29); /* Read Transaction */
+- v |= (1 << 17); /* Turnaround */
+- if (pc->core->id.rev < 10)
+- v |= (u32)device << 22;
+- v |= (u32)address << 18;
+- pcicore_write32(pc, mdio_data, v);
++ v = BCMA_CORE_PCI_MDIODATA_START;
++ v |= BCMA_CORE_PCI_MDIODATA_READ;
++ v |= BCMA_CORE_PCI_MDIODATA_TA;
++
++ pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
+ /* Wait for the device to complete the transaction */
+ udelay(10);
+ for (i = 0; i < max_retries; i++) {
+- v = pcicore_read32(pc, mdio_control);
+- if (v & 0x100 /* Trans complete */) {
++ v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
++ if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
+ udelay(10);
+- ret = pcicore_read32(pc, mdio_data);
++ ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
+ break;
+ }
+ msleep(1);
+ }
+- pcicore_write32(pc, mdio_control, 0);
++ pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
+ return ret;
+ }
+
+ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
+ u8 address, u16 data)
+ {
+- const u16 mdio_control = 0x128;
+- const u16 mdio_data = 0x12C;
+ int max_retries = 10;
+ u32 v;
+ int i;
+
+- v = 0x80; /* Enable Preamble Sequence */
+- v |= 0x2; /* MDIO Clock Divisor */
+- pcicore_write32(pc, mdio_control, v);
++ /* enable mdio access to SERDES */
++ v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
++ v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
++ pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
+
+ if (pc->core->id.rev >= 10) {
+ max_retries = 200;
+ bcma_pcie_mdio_set_phy(pc, device);
++ v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
++ BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
++ v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
++ } else {
++ v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
++ v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
+ }
+
+- v = (1 << 30); /* Start of Transaction */
+- v |= (1 << 28); /* Write Transaction */
+- v |= (1 << 17); /* Turnaround */
+- if (pc->core->id.rev < 10)
+- v |= (u32)device << 22;
+- v |= (u32)address << 18;
++ v = BCMA_CORE_PCI_MDIODATA_START;
++ v |= BCMA_CORE_PCI_MDIODATA_WRITE;
++ v |= BCMA_CORE_PCI_MDIODATA_TA;
+ v |= data;
+- pcicore_write32(pc, mdio_data, v);
++ pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
+ /* Wait for the device to complete the transaction */
+ udelay(10);
+ for (i = 0; i < max_retries; i++) {
+- v = pcicore_read32(pc, mdio_control);
+- if (v & 0x100 /* Trans complete */)
++ v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
++ if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
+ break;
+ msleep(1);
+ }
+- pcicore_write32(pc, mdio_control, 0);
++ pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
+ }
+
+ /**************************************************
+@@ -138,88 +143,108 @@ static void bcma_pcie_mdio_write(struct
+
+ static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
+ {
+- return (bcma_pcie_read(pc, 0x204) & 0x10) ? 0xC0 : 0x80;
++ u32 tmp;
++
++ tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
++ if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
++ return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
++ BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
++ else
++ return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
+ }
+
+ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
+ {
+- const u8 serdes_pll_device = 0x1D;
+- const u8 serdes_rx_device = 0x1F;
+ u16 tmp;
+
+- bcma_pcie_mdio_write(pc, serdes_rx_device, 1 /* Control */,
+- bcma_pcicore_polarity_workaround(pc));
+- tmp = bcma_pcie_mdio_read(pc, serdes_pll_device, 1 /* Control */);
+- if (tmp & 0x4000)
+- bcma_pcie_mdio_write(pc, serdes_pll_device, 1, tmp & ~0x4000);
++ bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
++ BCMA_CORE_PCI_SERDES_RX_CTRL,
++ bcma_pcicore_polarity_workaround(pc));
++ tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
++ BCMA_CORE_PCI_SERDES_PLL_CTRL);
++ if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
++ bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
++ BCMA_CORE_PCI_SERDES_PLL_CTRL,
++ tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
++}
++
++static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
++{
++ struct bcma_device *core = pc->core;
++ u16 val16, core_index;
++ uint regoff;
++
++ regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
++ core_index = (u16)core->core_index;
++
++ val16 = pcicore_read16(pc, regoff);
++ if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
++ != core_index) {
++ val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
++ (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
++ pcicore_write16(pc, regoff, val16);
++ }
++}
++
++/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
++/* Needs to happen when coming out of 'standby'/'hibernate' */
++static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
++{
++ u16 val16;
++ uint regoff;
++
++ regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
++
++ val16 = pcicore_read16(pc, regoff);
++
++ if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
++ val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
++ pcicore_write16(pc, regoff, val16);
++ }
+ }
+
+ /**************************************************
+ * Init.
+ **************************************************/
+
+-static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
++static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
+ {
++ bcma_core_pci_fixcfg(pc);
+ bcma_pcicore_serdes_workaround(pc);
++ bcma_core_pci_config_fixup(pc);
+ }
+
+-static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
+-{
+- struct bcma_bus *bus = pc->core->bus;
+- u16 chipid_top;
+-
+- chipid_top = (bus->chipinfo.id & 0xFF00);
+- if (chipid_top != 0x4700 &&
+- chipid_top != 0x5300)
+- return false;
+-
+-#ifdef CONFIG_SSB_DRIVER_PCICORE
+- if (bus->sprom.boardflags_lo & SSB_BFL_NOPCI)
+- return false;
+-#endif /* CONFIG_SSB_DRIVER_PCICORE */
+-
+-#if 0
+- /* TODO: on BCMA we use address from EROM instead of magic formula */
+- u32 tmp;
+- return !mips_busprobe32(tmp, (bus->mmio +
+- (pc->core->core_index * BCMA_CORE_SIZE)));
+-#endif
+-
+- return true;
+-}
+-
+-void bcma_core_pci_init(struct bcma_drv_pci *pc)
++void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
+ {
+ if (pc->setup_done)
+ return;
+
+- if (bcma_core_pci_is_in_hostmode(pc)) {
+ #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
++ pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
++ if (pc->hostmode)
+ bcma_core_pci_hostmode_init(pc);
+-#else
+- pr_err("Driver compiled without support for hostmode PCI\n");
+ #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+- } else {
+- bcma_core_pci_clientmode_init(pc);
+- }
+
+- pc->setup_done = true;
++ if (!pc->hostmode)
++ bcma_core_pci_clientmode_init(pc);
+ }
+
+ int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
+ bool enable)
+ {
+- struct pci_dev *pdev = pc->core->bus->host_pci;
++ struct pci_dev *pdev;
+ u32 coremask, tmp;
+ int err = 0;
+
+- if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
++ if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+ /* This bcma device is not on a PCI host-bus. So the IRQs are
+ * not routed through the PCI core.
+ * So we must not enable routing through the PCI core. */
+ goto out;
+ }
+
++ pdev = pc->core->bus->host_pci;
++
+ err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
+ if (err)
+ goto out;
+@@ -236,3 +261,17 @@ out:
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
++
++void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
++{
++ u32 w;
++
++ w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
++ if (extend)
++ w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
++ else
++ w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
++ bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
++ bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
++}
++EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer);
+--- a/drivers/bcma/driver_pci_host.c
++++ b/drivers/bcma/driver_pci_host.c
+@@ -2,13 +2,592 @@
+ * Broadcom specific AMBA
+ * PCI Core in hostmode
+ *
++ * Copyright 2005 - 2011, Broadcom Corporation
++ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
++ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
++ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+ #include "bcma_private.h"
++#include <linux/pci.h>
++#include <linux/export.h>
+ #include <linux/bcma/bcma.h>
++#include <asm/paccess.h>
++
++/* Probe a 32bit value on the bus and catch bus exceptions.
++ * Returns nonzero on a bus exception.
++ * This is MIPS specific */
++#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
++
++/* Assume one-hot slot wiring */
++#define BCMA_PCI_SLOT_MAX 16
++#define PCI_CONFIG_SPACE_SIZE 256
++
++bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
++{
++ struct bcma_bus *bus = pc->core->bus;
++ u16 chipid_top;
++ u32 tmp;
++
++ chipid_top = (bus->chipinfo.id & 0xFF00);
++ if (chipid_top != 0x4700 &&
++ chipid_top != 0x5300)
++ return false;
++
++ if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
++ bcma_info(bus, "This PCI core is disabled and not working\n");
++ return false;
++ }
++
++ bcma_core_enable(pc->core, 0);
++
++ return !mips_busprobe32(tmp, pc->core->io_addr);
++}
++
++static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
++{
++ pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
++ pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
++ return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
++}
++
++static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
++ u32 data)
++{
++ pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
++ pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
++ pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
++}
++
++static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
++ unsigned int func, unsigned int off)
++{
++ u32 addr = 0;
++
++ /* Issue config commands only when the data link is up (atleast
++ * one external pcie device is present).
++ */
++ if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
++ & BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
++ goto out;
++
++ /* Type 0 transaction */
++ /* Slide the PCI window to the appropriate slot */
++ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
++ /* Calculate the address */
++ addr = pc->host_controller->host_cfg_addr;
++ addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
++ addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
++ addr |= (off & ~3);
++
++out:
++ return addr;
++}
+
+-void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
++static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
++ unsigned int func, unsigned int off,
++ void *buf, int len)
+ {
+- pr_err("No support for PCI core in hostmode yet\n");
++ int err = -EINVAL;
++ u32 addr, val;
++ void __iomem *mmio = 0;
++
++ WARN_ON(!pc->hostmode);
++ if (unlikely(len != 1 && len != 2 && len != 4))
++ goto out;
++ if (dev == 0) {
++ /* we support only two functions on device 0 */
++ if (func > 1)
++ return -EINVAL;
++
++ /* accesses to config registers with offsets >= 256
++ * requires indirect access.
++ */
++ if (off >= PCI_CONFIG_SPACE_SIZE) {
++ addr = (func << 12);
++ addr |= (off & 0x0FFF);
++ val = bcma_pcie_read_config(pc, addr);
++ } else {
++ addr = BCMA_CORE_PCI_PCICFG0;
++ addr |= (func << 8);
++ addr |= (off & 0xfc);
++ val = pcicore_read32(pc, addr);
++ }
++ } else {
++ addr = bcma_get_cfgspace_addr(pc, dev, func, off);
++ if (unlikely(!addr))
++ goto out;
++ err = -ENOMEM;
++ mmio = ioremap_nocache(addr, sizeof(val));
++ if (!mmio)
++ goto out;
++
++ if (mips_busprobe32(val, mmio)) {
++ val = 0xffffffff;
++ goto unmap;
++ }
++
++ val = readl(mmio);
++ }
++ val >>= (8 * (off & 3));
++
++ switch (len) {
++ case 1:
++ *((u8 *)buf) = (u8)val;
++ break;
++ case 2:
++ *((u16 *)buf) = (u16)val;
++ break;
++ case 4:
++ *((u32 *)buf) = (u32)val;
++ break;
++ }
++ err = 0;
++unmap:
++ if (mmio)
++ iounmap(mmio);
++out:
++ return err;
++}
++
++static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
++ unsigned int func, unsigned int off,
++ const void *buf, int len)
++{
++ int err = -EINVAL;
++ u32 addr = 0, val = 0;
++ void __iomem *mmio = 0;
++ u16 chipid = pc->core->bus->chipinfo.id;
++
++ WARN_ON(!pc->hostmode);
++ if (unlikely(len != 1 && len != 2 && len != 4))
++ goto out;
++ if (dev == 0) {
++ /* accesses to config registers with offsets >= 256
++ * requires indirect access.
++ */
++ if (off < PCI_CONFIG_SPACE_SIZE) {
++ addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0;
++ addr |= (func << 8);
++ addr |= (off & 0xfc);
++ mmio = ioremap_nocache(addr, sizeof(val));
++ if (!mmio)
++ goto out;
++ }
++ } else {
++ addr = bcma_get_cfgspace_addr(pc, dev, func, off);
++ if (unlikely(!addr))
++ goto out;
++ err = -ENOMEM;
++ mmio = ioremap_nocache(addr, sizeof(val));
++ if (!mmio)
++ goto out;
++
++ if (mips_busprobe32(val, mmio)) {
++ val = 0xffffffff;
++ goto unmap;
++ }
++ }
++
++ switch (len) {
++ case 1:
++ val = readl(mmio);
++ val &= ~(0xFF << (8 * (off & 3)));
++ val |= *((const u8 *)buf) << (8 * (off & 3));
++ break;
++ case 2:
++ val = readl(mmio);
++ val &= ~(0xFFFF << (8 * (off & 3)));
++ val |= *((const u16 *)buf) << (8 * (off & 3));
++ break;
++ case 4:
++ val = *((const u32 *)buf);
++ break;
++ }
++ if (dev == 0 && !addr) {
++ /* accesses to config registers with offsets >= 256
++ * requires indirect access.
++ */
++ addr = (func << 12);
++ addr |= (off & 0x0FFF);
++ bcma_pcie_write_config(pc, addr, val);
++ } else {
++ writel(val, mmio);
++
++ if (chipid == BCMA_CHIP_ID_BCM4716 ||
++ chipid == BCMA_CHIP_ID_BCM4748)
++ readl(mmio);
++ }
++
++ err = 0;
++unmap:
++ if (mmio)
++ iounmap(mmio);
++out:
++ return err;
++}
++
++static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
++ unsigned int devfn,
++ int reg, int size, u32 *val)
++{
++ unsigned long flags;
++ int err;
++ struct bcma_drv_pci *pc;
++ struct bcma_drv_pci_host *pc_host;
++
++ pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
++ pc = pc_host->pdev;
++
++ spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
++ err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
++ PCI_FUNC(devfn), reg, val, size);
++ spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
++
++ return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
++}
++
++static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
++ unsigned int devfn,
++ int reg, int size, u32 val)
++{
++ unsigned long flags;
++ int err;
++ struct bcma_drv_pci *pc;
++ struct bcma_drv_pci_host *pc_host;
++
++ pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
++ pc = pc_host->pdev;
++
++ spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
++ err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
++ PCI_FUNC(devfn), reg, &val, size);
++ spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
++
++ return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
++}
++
++/* return cap_offset if requested capability exists in the PCI config space */
++static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
++ unsigned int dev,
++ unsigned int func, u8 req_cap_id,
++ unsigned char *buf, u32 *buflen)
++{
++ u8 cap_id;
++ u8 cap_ptr = 0;
++ u32 bufsize;
++ u8 byte_val;
++
++ /* check for Header type 0 */
++ bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
++ sizeof(u8));
++ if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
++ return cap_ptr;
++
++ /* check if the capability pointer field exists */
++ bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
++ sizeof(u8));
++ if (!(byte_val & PCI_STATUS_CAP_LIST))
++ return cap_ptr;
++
++ /* check if the capability pointer is 0x00 */
++ bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
++ sizeof(u8));
++ if (cap_ptr == 0x00)
++ return cap_ptr;
++
++ /* loop thr'u the capability list and see if the requested capabilty
++ * exists */
++ bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
++ while (cap_id != req_cap_id) {
++ bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
++ sizeof(u8));
++ if (cap_ptr == 0x00)
++ return cap_ptr;
++ bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
++ sizeof(u8));
++ }
++
++ /* found the caller requested capability */
++ if ((buf != NULL) && (buflen != NULL)) {
++ u8 cap_data;
++
++ bufsize = *buflen;
++ if (!bufsize)
++ return cap_ptr;
++
++ *buflen = 0;
++
++ /* copy the cpability data excluding cap ID and next ptr */
++ cap_data = cap_ptr + 2;
++ if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
++ bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
++ *buflen = bufsize;
++ while (bufsize--) {
++ bcma_extpci_read_config(pc, dev, func, cap_data, buf,
++ sizeof(u8));
++ cap_data++;
++ buf++;
++ }
++ }
++
++ return cap_ptr;
++}
++
++/* If the root port is capable of returning Config Request
++ * Retry Status (CRS) Completion Status to software then
++ * enable the feature.
++ */
++static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
++{
++ struct bcma_bus *bus = pc->core->bus;
++ u8 cap_ptr, root_ctrl, root_cap, dev;
++ u16 val16;
++ int i;
++
++ cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
++ NULL);
++ root_cap = cap_ptr + PCI_EXP_RTCAP;
++ bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
++ if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
++ /* Enable CRS software visibility */
++ root_ctrl = cap_ptr + PCI_EXP_RTCTL;
++ val16 = PCI_EXP_RTCTL_CRSSVE;
++ bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
++ sizeof(u16));
++
++ /* Initiate a configuration request to read the vendor id
++ * field of the device function's config space header after
++ * 100 ms wait time from the end of Reset. If the device is
++ * not done with its internal initialization, it must at
++ * least return a completion TLP, with a completion status
++ * of "Configuration Request Retry Status (CRS)". The root
++ * complex must complete the request to the host by returning
++ * a read-data value of 0001h for the Vendor ID field and
++ * all 1s for any additional bytes included in the request.
++ * Poll using the config reads for max wait time of 1 sec or
++ * until we receive the successful completion status. Repeat
++ * the procedure for all the devices.
++ */
++ for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
++ for (i = 0; i < 100000; i++) {
++ bcma_extpci_read_config(pc, dev, 0,
++ PCI_VENDOR_ID, &val16,
++ sizeof(val16));
++ if (val16 != 0x1)
++ break;
++ udelay(10);
++ }
++ if (val16 == 0x1)
++ bcma_err(bus, "PCI: Broken device in slot %d\n",
++ dev);
++ }
++ }
++}
++
++void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
++{
++ struct bcma_bus *bus = pc->core->bus;
++ struct bcma_drv_pci_host *pc_host;
++ u32 tmp;
++ u32 pci_membase_1G;
++ unsigned long io_map_base;
++
++ bcma_info(bus, "PCIEcore in host mode found\n");
++
++ pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
++ if (!pc_host) {
++ bcma_err(bus, "can not allocate memory");
++ return;
++ }
++
++ pc->host_controller = pc_host;
++ pc_host->pci_controller.io_resource = &pc_host->io_resource;
++ pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
++ pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
++ pc_host->pdev = pc;
++
++ pci_membase_1G = BCMA_SOC_PCI_DMA;
++ pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
++
++ pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
++ pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
++
++ pc_host->mem_resource.name = "BCMA PCIcore external memory",
++ pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
++ pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
++ pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
++
++ pc_host->io_resource.name = "BCMA PCIcore external I/O",
++ pc_host->io_resource.start = 0x100;
++ pc_host->io_resource.end = 0x7FF;
++ pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
++
++ /* Reset RC */
++ udelay(3000);
++ pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
++ udelay(1000);
++ pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
++ BCMA_CORE_PCI_CTL_RST_OE);
++
++ /* 64 MB I/O access window. On 4716, use
++ * sbtopcie0 to access the device registers. We
++ * can't use address match 2 (1 GB window) region
++ * as mips can't generate 64-bit address on the
++ * backplane.
++ */
++ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
++ bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
++ pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
++ pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
++ BCMA_SOC_PCI_MEM_SZ - 1;
++ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
++ BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
++ } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
++ tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
++ tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
++ tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
++ if (pc->core->core_unit == 0) {
++ pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
++ pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
++ BCMA_SOC_PCI_MEM_SZ - 1;
++ pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
++ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
++ tmp | BCMA_SOC_PCI_MEM);
++ } else if (pc->core->core_unit == 1) {
++ pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
++ pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
++ BCMA_SOC_PCI_MEM_SZ - 1;
++ pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
++ pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
++ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
++ tmp | BCMA_SOC_PCI1_MEM);
++ }
++ } else
++ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
++ BCMA_CORE_PCI_SBTOPCI_IO);
++
++ /* 64 MB configuration access window */
++ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
++
++ /* 1 GB memory access window */
++ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
++ BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
++
++
++ /* As per PCI Express Base Spec 1.1 we need to wait for
++ * at least 100 ms from the end of a reset (cold/warm/hot)
++ * before issuing configuration requests to PCI Express
++ * devices.
++ */
++ udelay(100000);
++
++ bcma_core_pci_enable_crs(pc);
++
++ /* Enable PCI bridge BAR0 memory & master access */
++ tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
++ bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
++
++ /* Enable PCI interrupts */
++ pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
++
++ /* Ok, ready to run, register it to the system.
++ * The following needs change, if we want to port hostmode
++ * to non-MIPS platform. */
++ io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
++ resource_size(&pc_host->mem_resource));
++ pc_host->pci_controller.io_map_base = io_map_base;
++ set_io_port_base(pc_host->pci_controller.io_map_base);
++ /* Give some time to the PCI controller to configure itself with the new
++ * values. Not waiting at this point causes crashes of the machine. */
++ mdelay(10);
++ register_pci_controller(&pc_host->pci_controller);
++ return;
++}
++
++/* Early PCI fixup for a device on the PCI-core bridge. */
++static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
++{
++ if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
++ /* This is not a device on the PCI-core bridge. */
++ return;
++ }
++ if (PCI_SLOT(dev->devfn) != 0)
++ return;
++
++ pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
++
++ /* Enable PCI bridge bus mastering and memory space */
++ pci_set_master(dev);
++ if (pcibios_enable_device(dev, ~0) < 0) {
++ pr_err("PCI: BCMA bridge enable failed\n");
++ return;
++ }
++
++ /* Enable PCI bridge BAR1 prefetch and burst */
++ pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
++
++/* Early PCI fixup for all PCI-cores to set the correct memory address. */
++static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
++{
++ struct resource *res;
++ int pos;
++
++ if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
++ /* This is not a device on the PCI-core bridge. */
++ return;
++ }
++ if (PCI_SLOT(dev->devfn) == 0)
++ return;
++
++ pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
++
++ for (pos = 0; pos < 6; pos++) {
++ res = &dev->resource[pos];
++ if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM))
++ pci_assign_resource(dev, pos);
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
++
++/* This function is called when doing a pci_enable_device().
++ * We must first check if the device is a device on the PCI-core bridge. */
++int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
++{
++ struct bcma_drv_pci_host *pc_host;
++
++ if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
++ /* This is not a device on the PCI-core bridge. */
++ return -ENODEV;
++ }
++ pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
++ pci_ops);
++
++ pr_info("PCI: Fixing up device %s\n", pci_name(dev));
++
++ /* Fix up interrupt lines */
++ dev->irq = bcma_core_mips_irq(pc_host->pdev->core) + 2;
++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
++
++ return 0;
++}
++EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
++
++/* PCI device IRQ mapping. */
++int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
++{
++ struct bcma_drv_pci_host *pc_host;
++
++ if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
++ /* This is not a device on the PCI-core bridge. */
++ return -ENODEV;
++ }
++
++ pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
++ pci_ops);
++ return bcma_core_mips_irq(pc_host->pdev->core) + 2;
+ }
++EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);
+--- a/drivers/bcma/host_pci.c
++++ b/drivers/bcma/host_pci.c
+@@ -18,7 +18,7 @@ static void bcma_host_pci_switch_core(st
+ pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
+ core->wrap);
+ core->bus->mapped_core = core;
+- pr_debug("Switched to core: 0x%X\n", core->id.id);
++ bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
+ }
+
+ /* Provides access to the requested core. Returns base offset that has to be
+@@ -154,8 +154,8 @@ const struct bcma_host_ops bcma_host_pci
+ .awrite32 = bcma_host_pci_awrite32,
+ };
+
+-static int bcma_host_pci_probe(struct pci_dev *dev,
+- const struct pci_device_id *id)
++static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
+ {
+ struct bcma_bus *bus;
+ int err = -ENOMEM;
+@@ -188,7 +188,7 @@ static int bcma_host_pci_probe(struct pc
+
+ /* SSB needed additional powering up, do we have any AMBA PCI cards? */
+ if (!pci_is_pcie(dev))
+- pr_err("PCI card detected, report problems.\n");
++ bcma_err(bus, "PCI card detected, report problems.\n");
+
+ /* Map MMIO */
+ err = -ENOMEM;
+@@ -201,6 +201,9 @@ static int bcma_host_pci_probe(struct pc
+ bus->hosttype = BCMA_HOSTTYPE_PCI;
+ bus->ops = &bcma_host_pci_ops;
+
++ bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
++ bus->boardinfo.type = bus->host_pci->subsystem_device;
++
+ /* Register */
+ err = bcma_bus_register(bus);
+ if (err)
+@@ -222,7 +225,7 @@ err_kfree_bus:
+ return err;
+ }
+
+-static void bcma_host_pci_remove(struct pci_dev *dev)
++static void __devexit bcma_host_pci_remove(struct pci_dev *dev)
+ {
+ struct bcma_bus *bus = pci_get_drvdata(dev);
+
+@@ -265,6 +268,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bc
+
+ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
++ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
+@@ -277,7 +281,7 @@ static struct pci_driver bcma_pci_bridge
+ .name = "bcma-pci-bridge",
+ .id_table = bcma_pci_bridge_tbl,
+ .probe = bcma_host_pci_probe,
+- .remove = bcma_host_pci_remove,
++ .remove = __devexit_p(bcma_host_pci_remove),
+ .driver.pm = BCMA_PM_OPS,
+ };
+
+--- a/drivers/bcma/main.c
++++ b/drivers/bcma/main.c
+@@ -13,6 +13,12 @@
+ MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
+ MODULE_LICENSE("GPL");
+
++/* contains the number the next bus should get. */
++static unsigned int bcma_bus_next_num = 0;
++
++/* bcma_buses_mutex locks the bcma_bus_next_num */
++static DEFINE_MUTEX(bcma_buses_mutex);
++
+ static int bcma_bus_match(struct device *dev, struct device_driver *drv);
+ static int bcma_device_probe(struct device *dev);
+ static int bcma_device_remove(struct device *dev);
+@@ -55,7 +61,14 @@ static struct bus_type bcma_bus_type = {
+ .dev_attrs = bcma_device_attrs,
+ };
+
+-static struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
++static u16 bcma_cc_core_id(struct bcma_bus *bus)
++{
++ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
++ return BCMA_CORE_4706_CHIPCOMMON;
++ return BCMA_CORE_CHIPCOMMON;
++}
++
++struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
+ {
+ struct bcma_device *core;
+
+@@ -65,6 +78,7 @@ static struct bcma_device *bcma_find_cor
+ }
+ return NULL;
+ }
++EXPORT_SYMBOL_GPL(bcma_find_core);
+
+ static void bcma_release_core_dev(struct device *dev)
+ {
+@@ -84,16 +98,18 @@ static int bcma_register_cores(struct bc
+ list_for_each_entry(core, &bus->cores, list) {
+ /* We support that cores ourself */
+ switch (core->id.id) {
++ case BCMA_CORE_4706_CHIPCOMMON:
+ case BCMA_CORE_CHIPCOMMON:
+ case BCMA_CORE_PCI:
+ case BCMA_CORE_PCIE:
+ case BCMA_CORE_MIPS_74K:
++ case BCMA_CORE_4706_MAC_GBIT_COMMON:
+ continue;
+ }
+
+ core->dev.release = bcma_release_core_dev;
+ core->dev.bus = &bcma_bus_type;
+- dev_set_name(&core->dev, "bcma%d:%d", 0/*bus->num*/, dev_id);
++ dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id);
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+@@ -111,8 +127,9 @@ static int bcma_register_cores(struct bc
+
+ err = device_register(&core->dev);
+ if (err) {
+- pr_err("Could not register dev for core 0x%03X\n",
+- core->id.id);
++ bcma_err(bus,
++ "Could not register dev for core 0x%03X\n",
++ core->id.id);
+ continue;
+ }
+ core->dev_registered = true;
+@@ -132,20 +149,24 @@ static void bcma_unregister_cores(struct
+ }
+ }
+
+-int bcma_bus_register(struct bcma_bus *bus)
++int __devinit bcma_bus_register(struct bcma_bus *bus)
+ {
+ int err;
+ struct bcma_device *core;
+
++ mutex_lock(&bcma_buses_mutex);
++ bus->num = bcma_bus_next_num++;
++ mutex_unlock(&bcma_buses_mutex);
++
+ /* Scan for devices (cores) */
+ err = bcma_bus_scan(bus);
+ if (err) {
+- pr_err("Failed to scan: %d\n", err);
++ bcma_err(bus, "Failed to scan: %d\n", err);
+ return -1;
+ }
+
+ /* Init CC core */
+- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
++ core = bcma_find_core(bus, bcma_cc_core_id(bus));
+ if (core) {
+ bus->drv_cc.core = core;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+@@ -165,17 +186,24 @@ int bcma_bus_register(struct bcma_bus *b
+ bcma_core_pci_init(&bus->drv_pci);
+ }
+
++ /* Init GBIT MAC COMMON core */
++ core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
++ if (core) {
++ bus->drv_gmac_cmn.core = core;
++ bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
++ }
++
+ /* Try to get SPROM */
+ err = bcma_sprom_get(bus);
+ if (err == -ENOENT) {
+- pr_err("No SPROM available\n");
++ bcma_err(bus, "No SPROM available\n");
+ } else if (err)
+- pr_err("Failed to get SPROM: %d\n", err);
++ bcma_err(bus, "Failed to get SPROM: %d\n", err);
+
+ /* Register found cores */
+ bcma_register_cores(bus);
+
+- pr_info("Bus registered\n");
++ bcma_info(bus, "Bus registered\n");
+
+ return 0;
+ }
+@@ -196,14 +224,14 @@ int __init bcma_bus_early_register(struc
+ bcma_init_bus(bus);
+
+ match.manuf = BCMA_MANUF_BCM;
+- match.id = BCMA_CORE_CHIPCOMMON;
++ match.id = bcma_cc_core_id(bus);
+ match.class = BCMA_CL_SIM;
+ match.rev = BCMA_ANY_REV;
+
+ /* Scan for chip common core */
+ err = bcma_bus_scan_early(bus, &match, core_cc);
+ if (err) {
+- pr_err("Failed to scan for common core: %d\n", err);
++ bcma_err(bus, "Failed to scan for common core: %d\n", err);
+ return -1;
+ }
+
+@@ -215,12 +243,12 @@ int __init bcma_bus_early_register(struc
+ /* Scan for mips core */
+ err = bcma_bus_scan_early(bus, &match, core_mips);
+ if (err) {
+- pr_err("Failed to scan for mips core: %d\n", err);
++ bcma_err(bus, "Failed to scan for mips core: %d\n", err);
+ return -1;
+ }
+
+ /* Init CC core */
+- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
++ core = bcma_find_core(bus, bcma_cc_core_id(bus));
+ if (core) {
+ bus->drv_cc.core = core;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+@@ -233,7 +261,7 @@ int __init bcma_bus_early_register(struc
+ bcma_core_mips_init(&bus->drv_mips);
+ }
+
+- pr_info("Early bus registered\n");
++ bcma_info(bus, "Early bus registered\n");
+
+ return 0;
+ }
+@@ -259,8 +287,7 @@ int bcma_bus_resume(struct bcma_bus *bus
+ struct bcma_device *core;
+
+ /* Init CC core */
+- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+- if (core) {
++ if (bus->drv_cc.core) {
+ bus->drv_cc.setup_done = false;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+ }
+--- a/drivers/bcma/scan.c
++++ b/drivers/bcma/scan.c
+@@ -19,15 +19,27 @@ struct bcma_device_id_name {
+ u16 id;
+ const char *name;
+ };
+-struct bcma_device_id_name bcma_device_names[] = {
++
++static const struct bcma_device_id_name bcma_arm_device_names[] = {
++ { BCMA_CORE_4706_MAC_GBIT_COMMON, "BCM4706 GBit MAC Common" },
++ { BCMA_CORE_ARM_1176, "ARM 1176" },
++ { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
++ { BCMA_CORE_ARM_CM3, "ARM CM3" },
++};
++
++static const struct bcma_device_id_name bcma_bcm_device_names[] = {
+ { BCMA_CORE_OOB_ROUTER, "OOB Router" },
++ { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
++ { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
++ { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
++ { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
++ { BCMA_CORE_ALTA, "ALTA (I2S)" },
+ { BCMA_CORE_INVALID, "Invalid" },
+ { BCMA_CORE_CHIPCOMMON, "ChipCommon" },
+ { BCMA_CORE_ILINE20, "ILine 20" },
+ { BCMA_CORE_SRAM, "SRAM" },
+ { BCMA_CORE_SDRAM, "SDRAM" },
+ { BCMA_CORE_PCI, "PCI" },
+- { BCMA_CORE_MIPS, "MIPS" },
+ { BCMA_CORE_ETHERNET, "Fast Ethernet" },
+ { BCMA_CORE_V90, "V90" },
+ { BCMA_CORE_USB11_HOSTDEV, "USB 1.1 Hostdev" },
+@@ -44,7 +56,6 @@ struct bcma_device_id_name bcma_device_n
+ { BCMA_CORE_PHY_A, "PHY A" },
+ { BCMA_CORE_PHY_B, "PHY B" },
+ { BCMA_CORE_PHY_G, "PHY G" },
+- { BCMA_CORE_MIPS_3302, "MIPS 3302" },
+ { BCMA_CORE_USB11_HOST, "USB 1.1 Host" },
+ { BCMA_CORE_USB11_DEV, "USB 1.1 Device" },
+ { BCMA_CORE_USB20_HOST, "USB 2.0 Host" },
+@@ -58,15 +69,11 @@ struct bcma_device_id_name bcma_device_n
+ { BCMA_CORE_PHY_N, "PHY N" },
+ { BCMA_CORE_SRAM_CTL, "SRAM Controller" },
+ { BCMA_CORE_MINI_MACPHY, "Mini MACPHY" },
+- { BCMA_CORE_ARM_1176, "ARM 1176" },
+- { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
+ { BCMA_CORE_PHY_LP, "PHY LP" },
+ { BCMA_CORE_PMU, "PMU" },
+ { BCMA_CORE_PHY_SSN, "PHY SSN" },
+ { BCMA_CORE_SDIO_DEV, "SDIO Device" },
+- { BCMA_CORE_ARM_CM3, "ARM CM3" },
+ { BCMA_CORE_PHY_HT, "PHY HT" },
+- { BCMA_CORE_MIPS_74K, "MIPS 74K" },
+ { BCMA_CORE_MAC_GBIT, "GBit MAC" },
+ { BCMA_CORE_DDR12_MEM_CTL, "DDR1/DDR2 Memory Controller" },
+ { BCMA_CORE_PCIE_RC, "PCIe Root Complex" },
+@@ -79,16 +86,41 @@ struct bcma_device_id_name bcma_device_n
+ { BCMA_CORE_SHIM, "SHIM" },
+ { BCMA_CORE_DEFAULT, "Default" },
+ };
+-const char *bcma_device_name(struct bcma_device_id *id)
++
++static const struct bcma_device_id_name bcma_mips_device_names[] = {
++ { BCMA_CORE_MIPS, "MIPS" },
++ { BCMA_CORE_MIPS_3302, "MIPS 3302" },
++ { BCMA_CORE_MIPS_74K, "MIPS 74K" },
++};
++
++static const char *bcma_device_name(const struct bcma_device_id *id)
+ {
+- int i;
++ const struct bcma_device_id_name *names;
++ int size, i;
+
+- if (id->manuf == BCMA_MANUF_BCM) {
+- for (i = 0; i < ARRAY_SIZE(bcma_device_names); i++) {
+- if (bcma_device_names[i].id == id->id)
+- return bcma_device_names[i].name;
+- }
++ /* search manufacturer specific names */
++ switch (id->manuf) {
++ case BCMA_MANUF_ARM:
++ names = bcma_arm_device_names;
++ size = ARRAY_SIZE(bcma_arm_device_names);
++ break;
++ case BCMA_MANUF_BCM:
++ names = bcma_bcm_device_names;
++ size = ARRAY_SIZE(bcma_bcm_device_names);
++ break;
++ case BCMA_MANUF_MIPS:
++ names = bcma_mips_device_names;
++ size = ARRAY_SIZE(bcma_mips_device_names);
++ break;
++ default:
++ return "UNKNOWN";
++ }
++
++ for (i = 0; i < size; i++) {
++ if (names[i].id == id->id)
++ return names[i].name;
+ }
++
+ return "UNKNOWN";
+ }
+
+@@ -212,6 +244,17 @@ static struct bcma_device *bcma_find_cor
+ return NULL;
+ }
+
++static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 coreid)
++{
++ struct bcma_device *core;
++
++ list_for_each_entry_reverse(core, &bus->cores, list) {
++ if (core->id.id == coreid)
++ return core;
++ }
++ return NULL;
++}
++
+ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
+ struct bcma_device_id *match, int core_num,
+ struct bcma_device *core)
+@@ -252,11 +295,15 @@ static int bcma_get_next_core(struct bcm
+
+ /* check if component is a core at all */
+ if (wrappers[0] + wrappers[1] == 0) {
+- /* we could save addrl of the router
+- if (cid == BCMA_CORE_OOB_ROUTER)
+- */
+- bcma_erom_skip_component(bus, eromptr);
+- return -ENXIO;
++ /* Some specific cores don't need wrappers */
++ switch (core->id.id) {
++ case BCMA_CORE_4706_MAC_GBIT_COMMON:
++ /* Not used yet: case BCMA_CORE_OOB_ROUTER: */
++ break;
++ default:
++ bcma_erom_skip_component(bus, eromptr);
++ return -ENXIO;
++ }
+ }
+
+ if (bcma_erom_is_bridge(bus, eromptr)) {
+@@ -286,6 +333,23 @@ static int bcma_get_next_core(struct bcm
+ return -EILSEQ;
+ }
+
++ /* First Slave Address Descriptor should be port 0:
++ * the main register space for the core
++ */
++ tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
++ if (tmp <= 0) {
++ /* Try again to see if it is a bridge */
++ tmp = bcma_erom_get_addr_desc(bus, eromptr,
++ SCAN_ADDR_TYPE_BRIDGE, 0);
++ if (tmp <= 0) {
++ return -EILSEQ;
++ } else {
++ bcma_info(bus, "Bridge found\n");
++ return -ENXIO;
++ }
++ }
++ core->addr = tmp;
++
+ /* get & parse slave ports */
+ for (i = 0; i < ports[1]; i++) {
+ for (j = 0; ; j++) {
+@@ -298,7 +362,7 @@ static int bcma_get_next_core(struct bcm
+ break;
+ } else {
+ if (i == 0 && j == 0)
+- core->addr = tmp;
++ core->addr1 = tmp;
+ }
+ }
+ }
+@@ -353,6 +417,7 @@ static int bcma_get_next_core(struct bcm
+ void bcma_init_bus(struct bcma_bus *bus)
+ {
+ s32 tmp;
++ struct bcma_chipinfo *chipinfo = &(bus->chipinfo);
+
+ if (bus->init_done)
+ return;
+@@ -363,9 +428,12 @@ void bcma_init_bus(struct bcma_bus *bus)
+ bcma_scan_switch_core(bus, BCMA_ADDR_BASE);
+
+ tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID);
+- bus->chipinfo.id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
+- bus->chipinfo.rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
+- bus->chipinfo.pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
++ chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
++ chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
++ chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
++ bcma_info(bus, "Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
++ chipinfo->id, chipinfo->rev, chipinfo->pkg);
++
+ bus->init_done = true;
+ }
+
+@@ -392,6 +460,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
+ bcma_scan_switch_core(bus, erombase);
+
+ while (eromptr < eromend) {
++ struct bcma_device *other_core;
+ struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+@@ -414,14 +483,15 @@ int bcma_bus_scan(struct bcma_bus *bus)
+
+ core->core_index = core_num++;
+ bus->nr_cores++;
++ other_core = bcma_find_core_reverse(bus, core->id.id);
++ core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
+
+- pr_info("Core %d found: %s "
+- "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+- core->core_index, bcma_device_name(&core->id),
+- core->id.manuf, core->id.id, core->id.rev,
+- core->id.class);
++ bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
++ core->core_index, bcma_device_name(&core->id),
++ core->id.manuf, core->id.id, core->id.rev,
++ core->id.class);
+
+- list_add(&core->list, &bus->cores);
++ list_add_tail(&core->list, &bus->cores);
+ }
+
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+@@ -471,13 +541,12 @@ int __init bcma_bus_scan_early(struct bc
+
+ core->core_index = core_num++;
+ bus->nr_cores++;
+- pr_info("Core %d found: %s "
+- "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+- core->core_index, bcma_device_name(&core->id),
+- core->id.manuf, core->id.id, core->id.rev,
+- core->id.class);
++ bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
++ core->core_index, bcma_device_name(&core->id),
++ core->id.manuf, core->id.id, core->id.rev,
++ core->id.class);
+
+- list_add(&core->list, &bus->cores);
++ list_add_tail(&core->list, &bus->cores);
+ err = 0;
+ break;
+ }
+--- a/drivers/bcma/scan.h
++++ b/drivers/bcma/scan.h
+@@ -27,7 +27,7 @@
+ #define SCAN_CIB_NMW 0x0007C000
+ #define SCAN_CIB_NMW_SHIFT 14
+ #define SCAN_CIB_NSW 0x00F80000
+-#define SCAN_CIB_NSW_SHIFT 17
++#define SCAN_CIB_NSW_SHIFT 19
+ #define SCAN_CIB_REV 0xFF000000
+ #define SCAN_CIB_REV_SHIFT 24
+
+--- a/drivers/bcma/sprom.c
++++ b/drivers/bcma/sprom.c
+@@ -2,6 +2,8 @@
+ * Broadcom specific AMBA
+ * SPROM reading
+ *
++ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
++ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+@@ -14,7 +16,57 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/slab.h>
+
+-#define SPOFF(offset) ((offset) / sizeof(u16))
++static int(*get_fallback_sprom)(struct bcma_bus *dev, struct ssb_sprom *out);
++
++/**
++ * bcma_arch_register_fallback_sprom - Registers a method providing a
++ * fallback SPROM if no SPROM is found.
++ *
++ * @sprom_callback: The callback function.
++ *
++ * With this function the architecture implementation may register a
++ * callback handler which fills the SPROM data structure. The fallback is
++ * used for PCI based BCMA devices, where no valid SPROM can be found
++ * in the shadow registers and to provide the SPROM for SoCs where BCMA is
++ * to controll the system bus.
++ *
++ * This function is useful for weird architectures that have a half-assed
++ * BCMA device hardwired to their PCI bus.
++ *
++ * This function is available for architecture code, only. So it is not
++ * exported.
++ */
++int bcma_arch_register_fallback_sprom(int (*sprom_callback)(struct bcma_bus *bus,
++ struct ssb_sprom *out))
++{
++ if (get_fallback_sprom)
++ return -EEXIST;
++ get_fallback_sprom = sprom_callback;
++
++ return 0;
++}
++
++static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
++ struct ssb_sprom *out)
++{
++ int err;
++
++ if (!get_fallback_sprom) {
++ err = -ENOENT;
++ goto fail;
++ }
++
++ err = get_fallback_sprom(bus, out);
++ if (err)
++ goto fail;
++
++ bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
++ bus->sprom.revision);
++ return 0;
++fail:
++ bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
++ return err;
++}
+
+ /**************************************************
+ * R/W ops.
+@@ -124,10 +176,37 @@ static int bcma_sprom_valid(const u16 *s
+ * SPROM extraction.
+ **************************************************/
+
++#define SPOFF(offset) ((offset) / sizeof(u16))
++
++#define SPEX(_field, _offset, _mask, _shift) \
++ bus->sprom._field = ((sprom[SPOFF(_offset)] & (_mask)) >> (_shift))
++
++#define SPEX32(_field, _offset, _mask, _shift) \
++ bus->sprom._field = ((((u32)sprom[SPOFF((_offset)+2)] << 16 | \
++ sprom[SPOFF(_offset)]) & (_mask)) >> (_shift))
++
++#define SPEX_ARRAY8(_field, _offset, _mask, _shift) \
++ do { \
++ SPEX(_field[0], _offset + 0, _mask, _shift); \
++ SPEX(_field[1], _offset + 2, _mask, _shift); \
++ SPEX(_field[2], _offset + 4, _mask, _shift); \
++ SPEX(_field[3], _offset + 6, _mask, _shift); \
++ SPEX(_field[4], _offset + 8, _mask, _shift); \
++ SPEX(_field[5], _offset + 10, _mask, _shift); \
++ SPEX(_field[6], _offset + 12, _mask, _shift); \
++ SPEX(_field[7], _offset + 14, _mask, _shift); \
++ } while (0)
++
+ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
+ {
+- u16 v;
++ u16 v, o;
+ int i;
++ u16 pwr_info_offset[] = {
++ SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1,
++ SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3
++ };
++ BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) !=
++ ARRAY_SIZE(bus->sprom.core_pwr_info));
+
+ bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
+ SSB_SPROM_REVISION_REV;
+@@ -137,102 +216,378 @@ static void bcma_sprom_extract_r8(struct
+ *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
+ }
+
+- bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
++ SPEX(board_rev, SSB_SPROM8_BOARDREV, ~0, 0);
+
+- bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+- SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT;
+- bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+- SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT;
+- bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+- SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT;
+- bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+- SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT;
+-
+- bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+- SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT;
+- bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+- SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT;
+- bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+- SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT;
+- bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+- SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT;
+-
+- bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+- SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT;
+- bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+- SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT;
+- bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+- SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT;
+- bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+- SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT;
+-
+- bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+- SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT;
+- bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+- SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT;
+- bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+- SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT;
+- bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+- SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT;
+-
+- bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
+- bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
+- bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
+- bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
+-
+- bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
+-
+- bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+- SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+- bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+- SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+- bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+- SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+- bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+- SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+- bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+- SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
+-
+- bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+- SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+- bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+- SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+- bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+- SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+- bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+- SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+- bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+- SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
++ SPEX(txpid2g[0], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G0,
++ SSB_SPROM4_TXPID2G0_SHIFT);
++ SPEX(txpid2g[1], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G1,
++ SSB_SPROM4_TXPID2G1_SHIFT);
++ SPEX(txpid2g[2], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G2,
++ SSB_SPROM4_TXPID2G2_SHIFT);
++ SPEX(txpid2g[3], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G3,
++ SSB_SPROM4_TXPID2G3_SHIFT);
++
++ SPEX(txpid5gl[0], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL0,
++ SSB_SPROM4_TXPID5GL0_SHIFT);
++ SPEX(txpid5gl[1], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL1,
++ SSB_SPROM4_TXPID5GL1_SHIFT);
++ SPEX(txpid5gl[2], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL2,
++ SSB_SPROM4_TXPID5GL2_SHIFT);
++ SPEX(txpid5gl[3], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL3,
++ SSB_SPROM4_TXPID5GL3_SHIFT);
++
++ SPEX(txpid5g[0], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G0,
++ SSB_SPROM4_TXPID5G0_SHIFT);
++ SPEX(txpid5g[1], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G1,
++ SSB_SPROM4_TXPID5G1_SHIFT);
++ SPEX(txpid5g[2], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G2,
++ SSB_SPROM4_TXPID5G2_SHIFT);
++ SPEX(txpid5g[3], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G3,
++ SSB_SPROM4_TXPID5G3_SHIFT);
++
++ SPEX(txpid5gh[0], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH0,
++ SSB_SPROM4_TXPID5GH0_SHIFT);
++ SPEX(txpid5gh[1], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH1,
++ SSB_SPROM4_TXPID5GH1_SHIFT);
++ SPEX(txpid5gh[2], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH2,
++ SSB_SPROM4_TXPID5GH2_SHIFT);
++ SPEX(txpid5gh[3], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH3,
++ SSB_SPROM4_TXPID5GH3_SHIFT);
++
++ SPEX(boardflags_lo, SSB_SPROM8_BFLLO, ~0, 0);
++ SPEX(boardflags_hi, SSB_SPROM8_BFLHI, ~0, 0);
++ SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, ~0, 0);
++ SPEX(boardflags2_hi, SSB_SPROM8_BFL2HI, ~0, 0);
++
++ SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
++ SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
++
++ /* Extract cores power info info */
++ for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
++ o = pwr_info_offset[i];
++ SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
++ SSB_SPROM8_2G_ITSSI, SSB_SPROM8_2G_ITSSI_SHIFT);
++ SPEX(core_pwr_info[i].maxpwr_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
++ SSB_SPROM8_2G_MAXP, 0);
++
++ SPEX(core_pwr_info[i].pa_2g[0], o + SSB_SROM8_2G_PA_0, ~0, 0);
++ SPEX(core_pwr_info[i].pa_2g[1], o + SSB_SROM8_2G_PA_1, ~0, 0);
++ SPEX(core_pwr_info[i].pa_2g[2], o + SSB_SROM8_2G_PA_2, ~0, 0);
++
++ SPEX(core_pwr_info[i].itssi_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
++ SSB_SPROM8_5G_ITSSI, SSB_SPROM8_5G_ITSSI_SHIFT);
++ SPEX(core_pwr_info[i].maxpwr_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
++ SSB_SPROM8_5G_MAXP, 0);
++ SPEX(core_pwr_info[i].maxpwr_5gh, o + SSB_SPROM8_5GHL_MAXP,
++ SSB_SPROM8_5GH_MAXP, 0);
++ SPEX(core_pwr_info[i].maxpwr_5gl, o + SSB_SPROM8_5GHL_MAXP,
++ SSB_SPROM8_5GL_MAXP, SSB_SPROM8_5GL_MAXP_SHIFT);
++
++ SPEX(core_pwr_info[i].pa_5gl[0], o + SSB_SROM8_5GL_PA_0, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gl[1], o + SSB_SROM8_5GL_PA_1, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gl[2], o + SSB_SROM8_5GL_PA_2, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5g[0], o + SSB_SROM8_5G_PA_0, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5g[1], o + SSB_SROM8_5G_PA_1, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5g[2], o + SSB_SROM8_5G_PA_2, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gh[0], o + SSB_SROM8_5GH_PA_0, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gh[1], o + SSB_SROM8_5GH_PA_1, ~0, 0);
++ SPEX(core_pwr_info[i].pa_5gh[2], o + SSB_SROM8_5GH_PA_2, ~0, 0);
++ }
++
++ SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TSSIPOS,
++ SSB_SROM8_FEM_TSSIPOS_SHIFT);
++ SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_EXTPA_GAIN,
++ SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
++ SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_PDET_RANGE,
++ SSB_SROM8_FEM_PDET_RANGE_SHIFT);
++ SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TR_ISO,
++ SSB_SROM8_FEM_TR_ISO_SHIFT);
++ SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_ANTSWLUT,
++ SSB_SROM8_FEM_ANTSWLUT_SHIFT);
++
++ SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TSSIPOS,
++ SSB_SROM8_FEM_TSSIPOS_SHIFT);
++ SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_EXTPA_GAIN,
++ SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
++ SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_PDET_RANGE,
++ SSB_SROM8_FEM_PDET_RANGE_SHIFT);
++ SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TR_ISO,
++ SSB_SROM8_FEM_TR_ISO_SHIFT);
++ SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_ANTSWLUT,
++ SSB_SROM8_FEM_ANTSWLUT_SHIFT);
++
++ SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A,
++ SSB_SPROM8_ANTAVAIL_A_SHIFT);
++ SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG,
++ SSB_SPROM8_ANTAVAIL_BG_SHIFT);
++ SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0);
++ SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG,
++ SSB_SPROM8_ITSSI_BG_SHIFT);
++ SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0);
++ SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A,
++ SSB_SPROM8_ITSSI_A_SHIFT);
++ SPEX(maxpwr_ah, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AH_MASK, 0);
++ SPEX(maxpwr_al, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AL_MASK,
++ SSB_SPROM8_MAXP_AL_SHIFT);
++ SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0);
++ SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1,
++ SSB_SPROM8_GPIOA_P1_SHIFT);
++ SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0);
++ SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3,
++ SSB_SPROM8_GPIOB_P3_SHIFT);
++ SPEX(tri2g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI2G, 0);
++ SPEX(tri5g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI5G,
++ SSB_SPROM8_TRI5G_SHIFT);
++ SPEX(tri5gl, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GL, 0);
++ SPEX(tri5gh, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GH,
++ SSB_SPROM8_TRI5GH_SHIFT);
++ SPEX(rxpo2g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO2G,
++ SSB_SPROM8_RXPO2G_SHIFT);
++ SPEX(rxpo5g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO5G,
++ SSB_SPROM8_RXPO5G_SHIFT);
++ SPEX(rssismf2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMF2G, 0);
++ SPEX(rssismc2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMC2G,
++ SSB_SPROM8_RSSISMC2G_SHIFT);
++ SPEX(rssisav2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISAV2G,
++ SSB_SPROM8_RSSISAV2G_SHIFT);
++ SPEX(bxa2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_BXA2G,
++ SSB_SPROM8_BXA2G_SHIFT);
++ SPEX(rssismf5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMF5G, 0);
++ SPEX(rssismc5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMC5G,
++ SSB_SPROM8_RSSISMC5G_SHIFT);
++ SPEX(rssisav5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISAV5G,
++ SSB_SPROM8_RSSISAV5G_SHIFT);
++ SPEX(bxa5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_BXA5G,
++ SSB_SPROM8_BXA5G_SHIFT);
++
++ SPEX(pa0b0, SSB_SPROM8_PA0B0, ~0, 0);
++ SPEX(pa0b1, SSB_SPROM8_PA0B1, ~0, 0);
++ SPEX(pa0b2, SSB_SPROM8_PA0B2, ~0, 0);
++ SPEX(pa1b0, SSB_SPROM8_PA1B0, ~0, 0);
++ SPEX(pa1b1, SSB_SPROM8_PA1B1, ~0, 0);
++ SPEX(pa1b2, SSB_SPROM8_PA1B2, ~0, 0);
++ SPEX(pa1lob0, SSB_SPROM8_PA1LOB0, ~0, 0);
++ SPEX(pa1lob1, SSB_SPROM8_PA1LOB1, ~0, 0);
++ SPEX(pa1lob2, SSB_SPROM8_PA1LOB2, ~0, 0);
++ SPEX(pa1hib0, SSB_SPROM8_PA1HIB0, ~0, 0);
++ SPEX(pa1hib1, SSB_SPROM8_PA1HIB1, ~0, 0);
++ SPEX(pa1hib2, SSB_SPROM8_PA1HIB2, ~0, 0);
++ SPEX(cck2gpo, SSB_SPROM8_CCK2GPO, ~0, 0);
++ SPEX32(ofdm2gpo, SSB_SPROM8_OFDM2GPO, ~0, 0);
++ SPEX32(ofdm5glpo, SSB_SPROM8_OFDM5GLPO, ~0, 0);
++ SPEX32(ofdm5gpo, SSB_SPROM8_OFDM5GPO, ~0, 0);
++ SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, ~0, 0);
++
++ /* Extract the antenna gain values. */
++ SPEX(antenna_gain.a0, SSB_SPROM8_AGAIN01,
++ SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT);
++ SPEX(antenna_gain.a1, SSB_SPROM8_AGAIN01,
++ SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT);
++ SPEX(antenna_gain.a2, SSB_SPROM8_AGAIN23,
++ SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT);
++ SPEX(antenna_gain.a3, SSB_SPROM8_AGAIN23,
++ SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT);
++
++ SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
++ SSB_SPROM8_LEDDC_ON_SHIFT);
++ SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
++ SSB_SPROM8_LEDDC_OFF_SHIFT);
++
++ SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
++ SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
++ SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
++ SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
++ SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
++ SSB_SPROM8_TXRXC_SWITCH_SHIFT);
++
++ SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
++
++ SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
++ SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
++ SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
++ SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
++
++ SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
++ SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
++ SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
++ SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
++ SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
++ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
++ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
++ SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
++ SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
++ SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
++ SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
++ SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
++ SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
++ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
++ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
++ SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
++ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
++ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
++ SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
++ SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
++
++ SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
++ SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
++ SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
++ SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
++
++ SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
++ SSB_SPROM8_THERMAL_TRESH_SHIFT);
++ SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
++ SSB_SPROM8_THERMAL_OFFSET_SHIFT);
++ SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
++ SSB_SPROM8_TEMPDELTA_PHYCAL,
++ SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
++ SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
++ SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
++ SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
++ SSB_SPROM8_TEMPDELTA_HYSTERESIS,
++ SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
++}
++
++/*
++ * Indicates the presence of external SPROM.
++ */
++static bool bcma_sprom_ext_available(struct bcma_bus *bus)
++{
++ u32 chip_status;
++ u32 srom_control;
++ u32 present_mask;
++
++ if (bus->drv_cc.core->id.rev >= 31) {
++ if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
++ return false;
++
++ srom_control = bcma_read32(bus->drv_cc.core,
++ BCMA_CC_SROM_CONTROL);
++ return srom_control & BCMA_CC_SROM_CONTROL_PRESENT;
++ }
++
++ /* older chipcommon revisions use chip status register */
++ chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
++ switch (bus->chipinfo.id) {
++ case BCMA_CHIP_ID_BCM4313:
++ present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
++ break;
++
++ case BCMA_CHIP_ID_BCM4331:
++ present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
++ break;
++
++ default:
++ return true;
++ }
++
++ return chip_status & present_mask;
++}
++
++/*
++ * Indicates that on-chip OTP memory is present and enabled.
++ */
++static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
++{
++ u32 chip_status;
++ u32 otpsize = 0;
++ bool present;
++
++ chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
++ switch (bus->chipinfo.id) {
++ case BCMA_CHIP_ID_BCM4313:
++ present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
++ break;
++
++ case BCMA_CHIP_ID_BCM4331:
++ present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
++ break;
++
++ case BCMA_CHIP_ID_BCM43224:
++ case BCMA_CHIP_ID_BCM43225:
++ /* for these chips OTP is always available */
++ present = true;
++ break;
++
++ default:
++ present = false;
++ break;
++ }
++
++ if (present) {
++ otpsize = bus->drv_cc.capabilities & BCMA_CC_CAP_OTPS;
++ otpsize >>= BCMA_CC_CAP_OTPS_SHIFT;
++ }
++
++ return otpsize != 0;
++}
++
++/*
++ * Verify OTP is filled and determine the byte
++ * offset where SPROM data is located.
++ *
++ * On error, returns 0; byte offset otherwise.
++ */
++static int bcma_sprom_onchip_offset(struct bcma_bus *bus)
++{
++ struct bcma_device *cc = bus->drv_cc.core;
++ u32 offset;
++
++ /* verify OTP status */
++ if ((bcma_read32(cc, BCMA_CC_OTPS) & BCMA_CC_OTPS_GU_PROG_HW) == 0)
++ return 0;
++
++ /* obtain bit offset from otplayout register */
++ offset = (bcma_read32(cc, BCMA_CC_OTPL) & BCMA_CC_OTPL_GURGN_OFFSET);
++ return BCMA_CC_SPROM + (offset >> 3);
+ }
+
+ int bcma_sprom_get(struct bcma_bus *bus)
+ {
+- u16 offset;
++ u16 offset = BCMA_CC_SPROM;
+ u16 *sprom;
+ int err = 0;
+
+ if (!bus->drv_cc.core)
+ return -EOPNOTSUPP;
+
+- if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
+- return -ENOENT;
++ if (!bcma_sprom_ext_available(bus)) {
++ bool sprom_onchip;
++
++ /*
++ * External SPROM takes precedence so check
++ * on-chip OTP only when no external SPROM
++ * is present.
++ */
++ sprom_onchip = bcma_sprom_onchip_available(bus);
++ if (sprom_onchip) {
++ /* determine offset */
++ offset = bcma_sprom_onchip_offset(bus);
++ }
++ if (!offset || !sprom_onchip) {
++ /*
++ * Maybe there is no SPROM on the device?
++ * Now we ask the arch code if there is some sprom
++ * available for this device in some other storage.
++ */
++ err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
++ return err;
++ }
++ }
+
+ sprom = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
+ GFP_KERNEL);
+ if (!sprom)
+ return -ENOMEM;
+
+- if (bus->chipinfo.id == 0x4331)
++ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
++ bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
+ bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
+
+- /* Most cards have SPROM moved by additional offset 0x30 (48 dwords).
+- * According to brcm80211 this applies to cards with PCIe rev >= 6
+- * TODO: understand this condition and use it */
+- offset = (bus->chipinfo.id == 0x4331) ? BCMA_CC_SPROM :
+- BCMA_CC_SPROM_PCIE6;
++ bcma_debug(bus, "SPROM offset 0x%x\n", offset);
+ bcma_sprom_read(bus, offset, sprom);
+
+- if (bus->chipinfo.id == 0x4331)
++ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
++ bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
+ bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
+
+ err = bcma_sprom_valid(sprom);
+--- a/include/linux/bcma/bcma.h
++++ b/include/linux/bcma/bcma.h
+@@ -7,6 +7,7 @@
+ #include <linux/bcma/bcma_driver_chipcommon.h>
+ #include <linux/bcma/bcma_driver_pci.h>
+ #include <linux/bcma/bcma_driver_mips.h>
++#include <linux/bcma/bcma_driver_gmac_cmn.h>
+ #include <linux/ssb/ssb.h> /* SPROM sharing */
+
+ #include "bcma_regs.h"
+@@ -26,6 +27,11 @@ struct bcma_chipinfo {
+ u8 pkg;
+ };
+
++struct bcma_boardinfo {
++ u16 vendor;
++ u16 type;
++};
++
+ enum bcma_clkmode {
+ BCMA_CLKMODE_FAST,
+ BCMA_CLKMODE_DYNAMIC,
+@@ -65,6 +71,13 @@ struct bcma_host_ops {
+
+ /* Core-ID values. */
+ #define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */
++#define BCMA_CORE_4706_CHIPCOMMON 0x500
++#define BCMA_CORE_4706_SOC_RAM 0x50E
++#define BCMA_CORE_4706_MAC_GBIT 0x52D
++#define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */
++#define BCMA_CORE_ALTA 0x534 /* I2S core */
++#define BCMA_CORE_4706_MAC_GBIT_COMMON 0x5DC
++#define BCMA_CORE_DDR23_PHY 0x5DD
+ #define BCMA_CORE_INVALID 0x700
+ #define BCMA_CORE_CHIPCOMMON 0x800
+ #define BCMA_CORE_ILINE20 0x801
+@@ -125,6 +138,36 @@ struct bcma_host_ops {
+
+ #define BCMA_MAX_NR_CORES 16
+
++/* Chip IDs of PCIe devices */
++#define BCMA_CHIP_ID_BCM4313 0x4313
++#define BCMA_CHIP_ID_BCM43224 43224
++#define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8
++#define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa
++#define BCMA_CHIP_ID_BCM43225 43225
++#define BCMA_CHIP_ID_BCM43227 43227
++#define BCMA_CHIP_ID_BCM43228 43228
++#define BCMA_CHIP_ID_BCM43421 43421
++#define BCMA_CHIP_ID_BCM43428 43428
++#define BCMA_CHIP_ID_BCM43431 43431
++#define BCMA_CHIP_ID_BCM43460 43460
++#define BCMA_CHIP_ID_BCM4331 0x4331
++#define BCMA_CHIP_ID_BCM6362 0x6362
++#define BCMA_CHIP_ID_BCM4360 0x4360
++#define BCMA_CHIP_ID_BCM4352 0x4352
++
++/* Chip IDs of SoCs */
++#define BCMA_CHIP_ID_BCM4706 0x5300
++#define BCMA_CHIP_ID_BCM4716 0x4716
++#define BCMA_PKG_ID_BCM4716 8
++#define BCMA_PKG_ID_BCM4717 9
++#define BCMA_PKG_ID_BCM4718 10
++#define BCMA_CHIP_ID_BCM47162 47162
++#define BCMA_CHIP_ID_BCM4748 0x4748
++#define BCMA_CHIP_ID_BCM4749 0x4749
++#define BCMA_CHIP_ID_BCM5356 0x5356
++#define BCMA_CHIP_ID_BCM5357 0x5357
++#define BCMA_CHIP_ID_BCM53572 53572
++
+ struct bcma_device {
+ struct bcma_bus *bus;
+ struct bcma_device_id id;
+@@ -136,8 +179,10 @@ struct bcma_device {
+ bool dev_registered;
+
+ u8 core_index;
++ u8 core_unit;
+
+ u32 addr;
++ u32 addr1;
+ u32 wrap;
+
+ void __iomem *io_addr;
+@@ -175,6 +220,12 @@ int __bcma_driver_register(struct bcma_d
+
+ extern void bcma_driver_unregister(struct bcma_driver *drv);
+
++/* Set a fallback SPROM.
++ * See kdoc at the function definition for complete documentation. */
++extern int bcma_arch_register_fallback_sprom(
++ int (*sprom_callback)(struct bcma_bus *bus,
++ struct ssb_sprom *out));
++
+ struct bcma_bus {
+ /* The MMIO area. */
+ void __iomem *mmio;
+@@ -191,14 +242,18 @@ struct bcma_bus {
+
+ struct bcma_chipinfo chipinfo;
+
++ struct bcma_boardinfo boardinfo;
++
+ struct bcma_device *mapped_core;
+ struct list_head cores;
+ u8 nr_cores;
+ u8 init_done:1;
++ u8 num;
+
+ struct bcma_drv_cc drv_cc;
+ struct bcma_drv_pci drv_pci;
+ struct bcma_drv_mips drv_mips;
++ struct bcma_drv_gmac_cmn drv_gmac_cmn;
+
+ /* We decided to share SPROM struct with SSB as long as we do not need
+ * any hacks for BCMA. This simplifies drivers code. */
+@@ -282,6 +337,7 @@ static inline void bcma_maskset16(struct
+ bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
+ }
+
++extern struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid);
+ extern bool bcma_core_is_enabled(struct bcma_device *core);
+ extern void bcma_core_disable(struct bcma_device *core, u32 flags);
+ extern int bcma_core_enable(struct bcma_device *core, u32 flags);
+--- a/include/linux/bcma/bcma_driver_chipcommon.h
++++ b/include/linux/bcma/bcma_driver_chipcommon.h
+@@ -56,6 +56,9 @@
+ #define BCMA_CC_OTPS_HW_PROTECT 0x00000001
+ #define BCMA_CC_OTPS_SW_PROTECT 0x00000002
+ #define BCMA_CC_OTPS_CID_PROTECT 0x00000004
++#define BCMA_CC_OTPS_GU_PROG_IND 0x00000F00 /* General Use programmed indication */
++#define BCMA_CC_OTPS_GU_PROG_IND_SHIFT 8
++#define BCMA_CC_OTPS_GU_PROG_HW 0x00000100 /* HW region programmed */
+ #define BCMA_CC_OTPC 0x0014 /* OTP control */
+ #define BCMA_CC_OTPC_RECWAIT 0xFF000000
+ #define BCMA_CC_OTPC_PROGWAIT 0x00FFFF00
+@@ -72,6 +75,8 @@
+ #define BCMA_CC_OTPP_READ 0x40000000
+ #define BCMA_CC_OTPP_START 0x80000000
+ #define BCMA_CC_OTPP_BUSY 0x80000000
++#define BCMA_CC_OTPL 0x001C /* OTP layout */
++#define BCMA_CC_OTPL_GURGN_OFFSET 0x00000FFF /* offset of general use region */
+ #define BCMA_CC_IRQSTAT 0x0020
+ #define BCMA_CC_IRQMASK 0x0024
+ #define BCMA_CC_IRQ_GPIO 0x00000001 /* gpio intr */
+@@ -79,6 +84,15 @@
+ #define BCMA_CC_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */
+ #define BCMA_CC_CHIPCTL 0x0028 /* Rev >= 11 only */
+ #define BCMA_CC_CHIPSTAT 0x002C /* Rev >= 11 only */
++#define BCMA_CC_CHIPST_4313_SPROM_PRESENT 1
++#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
++#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
++#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
++#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */
++#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */
++#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
++#define BCMA_CC_CHIPST_4706_MIPS_BENDIAN BIT(3) /* 0: little, 1: big endian */
++#define BCMA_CC_CHIPST_4706_PCIE1_DISABLE BIT(5) /* PCIE1 enable strap pin */
+ #define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */
+ #define BCMA_CC_JCMD_START 0x80000000
+ #define BCMA_CC_JCMD_BUSY 0x80000000
+@@ -181,6 +195,22 @@
+ #define BCMA_CC_FLASH_CFG 0x0128
+ #define BCMA_CC_FLASH_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */
+ #define BCMA_CC_FLASH_WAITCNT 0x012C
++#define BCMA_CC_SROM_CONTROL 0x0190
++#define BCMA_CC_SROM_CONTROL_START 0x80000000
++#define BCMA_CC_SROM_CONTROL_BUSY 0x80000000
++#define BCMA_CC_SROM_CONTROL_OPCODE 0x60000000
++#define BCMA_CC_SROM_CONTROL_OP_READ 0x00000000
++#define BCMA_CC_SROM_CONTROL_OP_WRITE 0x20000000
++#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
++#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
++#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
++#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
++#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
++#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
++#define BCMA_CC_SROM_CONTROL_SIZE_4K 0x00000002
++#define BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004
++#define BCMA_CC_SROM_CONTROL_SIZE_SHIFT 1
++#define BCMA_CC_SROM_CONTROL_PRESENT 0x00000001
+ /* 0x1E0 is defined as shared BCMA_CLKCTLST */
+ #define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */
+ #define BCMA_CC_UART0_DATA 0x0300
+@@ -240,7 +270,6 @@
+ #define BCMA_CC_PLLCTL_ADDR 0x0660
+ #define BCMA_CC_PLLCTL_DATA 0x0664
+ #define BCMA_CC_SPROM 0x0800 /* SPROM beginning */
+-#define BCMA_CC_SPROM_PCIE6 0x0830 /* SPROM beginning on PCIe rev >= 6 */
+
+ /* Divider allocation in 4716/47162/5356 */
+ #define BCMA_CC_PMU5_MAINPLL_CPU 1
+@@ -256,6 +285,15 @@
+
+ /* 4706 PMU */
+ #define BCMA_CC_PMU4706_MAINPLL_PLL0 0
++#define BCMA_CC_PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */
++#define BCMA_CC_PMU6_4706_PROC_P2DIV_MASK 0x000f0000
++#define BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT 16
++#define BCMA_CC_PMU6_4706_PROC_P1DIV_MASK 0x0000f000
++#define BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT 12
++#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8
++#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT 3
++#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007
++#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_SHIFT 0
+
+ /* ALP clock on pre-PMU chips */
+ #define BCMA_CC_PMU_ALP_CLOCK 20000000
+@@ -284,6 +322,19 @@
+ #define BCMA_CC_PPL_PCHI_OFF 5
+ #define BCMA_CC_PPL_PCHI_MASK 0x0000003f
+
++#define BCMA_CC_PMU_PLL_CTL0 0
++#define BCMA_CC_PMU_PLL_CTL1 1
++#define BCMA_CC_PMU_PLL_CTL2 2
++#define BCMA_CC_PMU_PLL_CTL3 3
++#define BCMA_CC_PMU_PLL_CTL4 4
++#define BCMA_CC_PMU_PLL_CTL5 5
++
++#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
++#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT 20
++
++#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
++#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
++
+ /* BCM4331 ChipControl numbers. */
+ #define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */
+ #define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */
+@@ -297,9 +348,18 @@
+ #define BCMA_CHIPCTL_4331_OVR_PIPEAUXPWRDOWN BIT(9) /* override core control on pipe_AuxPowerDown */
+ #define BCMA_CHIPCTL_4331_PCIE_AUXCLKEN BIT(10) /* pcie_auxclkenable */
+ #define BCMA_CHIPCTL_4331_PCIE_PIPE_PLLDOWN BIT(11) /* pcie_pipe_pllpowerdown */
++#define BCMA_CHIPCTL_4331_EXTPA_EN2 BIT(12) /* 0 ext pa disable, 1 ext pa enabled */
+ #define BCMA_CHIPCTL_4331_BT_SHD0_ON_GPIO4 BIT(16) /* enable bt_shd0 at gpio4 */
+ #define BCMA_CHIPCTL_4331_BT_SHD1_ON_GPIO5 BIT(17) /* enable bt_shd1 at gpio5 */
+
++/* 43224 chip-specific ChipControl register bits */
++#define BCMA_CCTRL_43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
++#define BCMA_CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
++#define BCMA_CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
++
++/* 4313 Chip specific ChipControl register bits */
++#define BCMA_CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
++
+ /* Data for the PMU, if available.
+ * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
+ */
+@@ -387,5 +447,6 @@ extern void bcma_chipco_chipctl_maskset(
+ u32 offset, u32 mask, u32 set);
+ extern void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set);
++extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid);
+
+ #endif /* LINUX_BCMA_DRIVER_CC_H_ */
+--- /dev/null
++++ b/include/linux/bcma/bcma_driver_gmac_cmn.h
+@@ -0,0 +1,100 @@
++#ifndef LINUX_BCMA_DRIVER_GMAC_CMN_H_
++#define LINUX_BCMA_DRIVER_GMAC_CMN_H_
++
++#include <linux/types.h>
++
++#define BCMA_GMAC_CMN_STAG0 0x000
++#define BCMA_GMAC_CMN_STAG1 0x004
++#define BCMA_GMAC_CMN_STAG2 0x008
++#define BCMA_GMAC_CMN_STAG3 0x00C
++#define BCMA_GMAC_CMN_PARSER_CTL 0x020
++#define BCMA_GMAC_CMN_MIB_MAX_LEN 0x024
++#define BCMA_GMAC_CMN_PHY_ACCESS 0x100
++#define BCMA_GMAC_CMN_PA_DATA_MASK 0x0000ffff
++#define BCMA_GMAC_CMN_PA_ADDR_MASK 0x001f0000
++#define BCMA_GMAC_CMN_PA_ADDR_SHIFT 16
++#define BCMA_GMAC_CMN_PA_REG_MASK 0x1f000000
++#define BCMA_GMAC_CMN_PA_REG_SHIFT 24
++#define BCMA_GMAC_CMN_PA_WRITE 0x20000000
++#define BCMA_GMAC_CMN_PA_START 0x40000000
++#define BCMA_GMAC_CMN_PHY_CTL 0x104
++#define BCMA_GMAC_CMN_PC_EPA_MASK 0x0000001f
++#define BCMA_GMAC_CMN_PC_MCT_MASK 0x007f0000
++#define BCMA_GMAC_CMN_PC_MCT_SHIFT 16
++#define BCMA_GMAC_CMN_PC_MTE 0x00800000
++#define BCMA_GMAC_CMN_GMAC0_RGMII_CTL 0x110
++#define BCMA_GMAC_CMN_CFP_ACCESS 0x200
++#define BCMA_GMAC_CMN_CFP_TCAM_DATA0 0x210
++#define BCMA_GMAC_CMN_CFP_TCAM_DATA1 0x214
++#define BCMA_GMAC_CMN_CFP_TCAM_DATA2 0x218
++#define BCMA_GMAC_CMN_CFP_TCAM_DATA3 0x21C
++#define BCMA_GMAC_CMN_CFP_TCAM_DATA4 0x220
++#define BCMA_GMAC_CMN_CFP_TCAM_DATA5 0x224
++#define BCMA_GMAC_CMN_CFP_TCAM_DATA6 0x228
++#define BCMA_GMAC_CMN_CFP_TCAM_DATA7 0x22C
++#define BCMA_GMAC_CMN_CFP_TCAM_MASK0 0x230
++#define BCMA_GMAC_CMN_CFP_TCAM_MASK1 0x234
++#define BCMA_GMAC_CMN_CFP_TCAM_MASK2 0x238
++#define BCMA_GMAC_CMN_CFP_TCAM_MASK3 0x23C
++#define BCMA_GMAC_CMN_CFP_TCAM_MASK4 0x240
++#define BCMA_GMAC_CMN_CFP_TCAM_MASK5 0x244
++#define BCMA_GMAC_CMN_CFP_TCAM_MASK6 0x248
++#define BCMA_GMAC_CMN_CFP_TCAM_MASK7 0x24C
++#define BCMA_GMAC_CMN_CFP_ACTION_DATA 0x250
++#define BCMA_GMAC_CMN_TCAM_BIST_CTL 0x2A0
++#define BCMA_GMAC_CMN_TCAM_BIST_STATUS 0x2A4
++#define BCMA_GMAC_CMN_TCAM_CMP_STATUS 0x2A8
++#define BCMA_GMAC_CMN_TCAM_DISABLE 0x2AC
++#define BCMA_GMAC_CMN_TCAM_TEST_CTL 0x2F0
++#define BCMA_GMAC_CMN_UDF_0_A3_A0 0x300
++#define BCMA_GMAC_CMN_UDF_0_A7_A4 0x304
++#define BCMA_GMAC_CMN_UDF_0_A8 0x308
++#define BCMA_GMAC_CMN_UDF_1_A3_A0 0x310
++#define BCMA_GMAC_CMN_UDF_1_A7_A4 0x314
++#define BCMA_GMAC_CMN_UDF_1_A8 0x318
++#define BCMA_GMAC_CMN_UDF_2_A3_A0 0x320
++#define BCMA_GMAC_CMN_UDF_2_A7_A4 0x324
++#define BCMA_GMAC_CMN_UDF_2_A8 0x328
++#define BCMA_GMAC_CMN_UDF_0_B3_B0 0x330
++#define BCMA_GMAC_CMN_UDF_0_B7_B4 0x334
++#define BCMA_GMAC_CMN_UDF_0_B8 0x338
++#define BCMA_GMAC_CMN_UDF_1_B3_B0 0x340
++#define BCMA_GMAC_CMN_UDF_1_B7_B4 0x344
++#define BCMA_GMAC_CMN_UDF_1_B8 0x348
++#define BCMA_GMAC_CMN_UDF_2_B3_B0 0x350
++#define BCMA_GMAC_CMN_UDF_2_B7_B4 0x354
++#define BCMA_GMAC_CMN_UDF_2_B8 0x358
++#define BCMA_GMAC_CMN_UDF_0_C3_C0 0x360
++#define BCMA_GMAC_CMN_UDF_0_C7_C4 0x364
++#define BCMA_GMAC_CMN_UDF_0_C8 0x368
++#define BCMA_GMAC_CMN_UDF_1_C3_C0 0x370
++#define BCMA_GMAC_CMN_UDF_1_C7_C4 0x374
++#define BCMA_GMAC_CMN_UDF_1_C8 0x378
++#define BCMA_GMAC_CMN_UDF_2_C3_C0 0x380
++#define BCMA_GMAC_CMN_UDF_2_C7_C4 0x384
++#define BCMA_GMAC_CMN_UDF_2_C8 0x388
++#define BCMA_GMAC_CMN_UDF_0_D3_D0 0x390
++#define BCMA_GMAC_CMN_UDF_0_D7_D4 0x394
++#define BCMA_GMAC_CMN_UDF_0_D11_D8 0x394
++
++struct bcma_drv_gmac_cmn {
++ struct bcma_device *core;
++
++ /* Drivers accessing BCMA_GMAC_CMN_PHY_ACCESS and
++ * BCMA_GMAC_CMN_PHY_CTL need to take that mutex first. */
++ struct mutex phy_mutex;
++};
++
++/* Register access */
++#define gmac_cmn_read16(gc, offset) bcma_read16((gc)->core, offset)
++#define gmac_cmn_read32(gc, offset) bcma_read32((gc)->core, offset)
++#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val)
++#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val)
++
++#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
++extern void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
++#else
++static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
++#endif
++
++#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */
+--- a/include/linux/bcma/bcma_driver_pci.h
++++ b/include/linux/bcma/bcma_driver_pci.h
+@@ -53,11 +53,47 @@ struct pci_dev;
+ #define BCMA_CORE_PCI_SBTOPCI1_MASK 0xFC000000
+ #define BCMA_CORE_PCI_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */
+ #define BCMA_CORE_PCI_SBTOPCI2_MASK 0xC0000000
++#define BCMA_CORE_PCI_CONFIG_ADDR 0x0120 /* pcie config space access */
++#define BCMA_CORE_PCI_CONFIG_DATA 0x0124 /* pcie config space access */
++#define BCMA_CORE_PCI_MDIO_CONTROL 0x0128 /* controls the mdio access */
++#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
++#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL 0x2
++#define BCMA_CORE_PCI_MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
++#define BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */
++#define BCMA_CORE_PCI_MDIO_DATA 0x012c /* Data to the mdio access */
++#define BCMA_CORE_PCI_MDIODATA_MASK 0x0000ffff /* data 2 bytes */
++#define BCMA_CORE_PCI_MDIODATA_TA 0x00020000 /* Turnaround */
++#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */
++#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */
++#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */
++#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */
++#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
++#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
++#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
++#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */
++#define BCMA_CORE_PCI_MDIODATA_WRITE 0x10000000 /* write Transaction */
++#define BCMA_CORE_PCI_MDIODATA_READ 0x20000000 /* Read Transaction */
++#define BCMA_CORE_PCI_MDIODATA_START 0x40000000 /* start of Transaction */
++#define BCMA_CORE_PCI_MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
++#define BCMA_CORE_PCI_MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
++#define BCMA_CORE_PCI_MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
++#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
++#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
++#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */
++#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */
++#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */
+ #define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */
+ #define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */
+ #define BCMA_CORE_PCI_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */
+ #define BCMA_CORE_PCI_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */
+ #define BCMA_CORE_PCI_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */
++#define BCMA_CORE_PCI_SPROM_PI_OFFSET 0 /* first word */
++#define BCMA_CORE_PCI_SPROM_PI_MASK 0xf000 /* bit 15:12 */
++#define BCMA_CORE_PCI_SPROM_PI_SHIFT 12 /* bit 15:12 */
++#define BCMA_CORE_PCI_SPROM_MISC_CONFIG 5 /* word 5 */
++#define BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
++#define BCMA_CORE_PCI_SPROM_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
++#define BCMA_CORE_PCI_SPROM_CLKREQ_ENB 0x0800 /* bit 11 */
+
+ /* SBtoPCIx */
+ #define BCMA_CORE_PCI_SBTOPCI_MEM 0x00000000
+@@ -72,20 +108,118 @@ struct pci_dev;
+ #define BCMA_CORE_PCI_SBTOPCI_RC_READL 0x00000010 /* Memory read line */
+ #define BCMA_CORE_PCI_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */
+
++/* PCIE protocol PHY diagnostic registers */
++#define BCMA_CORE_PCI_PLP_MODEREG 0x200 /* Mode */
++#define BCMA_CORE_PCI_PLP_STATUSREG 0x204 /* Status */
++#define BCMA_CORE_PCI_PLP_POLARITYINV_STAT 0x10 /* Status reg PCIE_PLP_STATUSREG */
++#define BCMA_CORE_PCI_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */
++#define BCMA_CORE_PCI_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */
++#define BCMA_CORE_PCI_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */
++#define BCMA_CORE_PCI_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */
++#define BCMA_CORE_PCI_PLP_ATTNREG 0x218 /* Attention */
++#define BCMA_CORE_PCI_PLP_ATTNMASKREG 0x21C /* Attention Mask */
++#define BCMA_CORE_PCI_PLP_RXERRCTR 0x220 /* Rx Error */
++#define BCMA_CORE_PCI_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */
++#define BCMA_CORE_PCI_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */
++#define BCMA_CORE_PCI_PLP_TESTCTRLREG 0x22C /* Test Control reg */
++#define BCMA_CORE_PCI_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */
++#define BCMA_CORE_PCI_PLP_TIMINGOVRDREG 0x234 /* Timing param override */
++#define BCMA_CORE_PCI_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */
++#define BCMA_CORE_PCI_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */
++
++/* PCIE protocol DLLP diagnostic registers */
++#define BCMA_CORE_PCI_DLLP_LCREG 0x100 /* Link Control */
++#define BCMA_CORE_PCI_DLLP_LSREG 0x104 /* Link Status */
++#define BCMA_CORE_PCI_DLLP_LAREG 0x108 /* Link Attention */
++#define BCMA_CORE_PCI_DLLP_LSREG_LINKUP (1 << 16)
++#define BCMA_CORE_PCI_DLLP_LAMASKREG 0x10C /* Link Attention Mask */
++#define BCMA_CORE_PCI_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */
++#define BCMA_CORE_PCI_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */
++#define BCMA_CORE_PCI_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */
++#define BCMA_CORE_PCI_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */
++#define BCMA_CORE_PCI_DLLP_LRREG 0x120 /* Link Replay */
++#define BCMA_CORE_PCI_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */
++#define BCMA_CORE_PCI_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
++#define BCMA_CORE_PCI_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */
++#define BCMA_CORE_PCI_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */
++#define BCMA_CORE_PCI_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */
++#define BCMA_CORE_PCI_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */
++#define BCMA_CORE_PCI_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */
++#define BCMA_CORE_PCI_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */
++#define BCMA_CORE_PCI_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */
++#define BCMA_CORE_PCI_DLLP_ERRCTRREG 0x144 /* Error Counter */
++#define BCMA_CORE_PCI_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */
++#define BCMA_CORE_PCI_DLLP_TESTREG 0x14C /* Test */
++#define BCMA_CORE_PCI_DLLP_PKTBIST 0x150 /* Packet BIST */
++#define BCMA_CORE_PCI_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */
++
++/* SERDES RX registers */
++#define BCMA_CORE_PCI_SERDES_RX_CTRL 1 /* Rx cntrl */
++#define BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
++#define BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
++#define BCMA_CORE_PCI_SERDES_RX_TIMER1 2 /* Rx Timer1 */
++#define BCMA_CORE_PCI_SERDES_RX_CDR 6 /* CDR */
++#define BCMA_CORE_PCI_SERDES_RX_CDRBW 7 /* CDR BW */
++
++/* SERDES PLL registers */
++#define BCMA_CORE_PCI_SERDES_PLL_CTRL 1 /* PLL control reg */
++#define BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
++
+ /* PCIcore specific boardflags */
+ #define BCMA_CORE_PCI_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */
+
++/* PCIE Config space accessing MACROS */
++#define BCMA_CORE_PCI_CFG_BUS_SHIFT 24 /* Bus shift */
++#define BCMA_CORE_PCI_CFG_SLOT_SHIFT 19 /* Slot/Device shift */
++#define BCMA_CORE_PCI_CFG_FUN_SHIFT 16 /* Function shift */
++#define BCMA_CORE_PCI_CFG_OFF_SHIFT 0 /* Register shift */
++
++#define BCMA_CORE_PCI_CFG_BUS_MASK 0xff /* Bus mask */
++#define BCMA_CORE_PCI_CFG_SLOT_MASK 0x1f /* Slot/Device mask */
++#define BCMA_CORE_PCI_CFG_FUN_MASK 7 /* Function mask */
++#define BCMA_CORE_PCI_CFG_OFF_MASK 0xfff /* Register mask */
++
++/* PCIE Root Capability Register bits (Host mode only) */
++#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
++
++struct bcma_drv_pci;
++
++#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
++struct bcma_drv_pci_host {
++ struct bcma_drv_pci *pdev;
++
++ u32 host_cfg_addr;
++ spinlock_t cfgspace_lock;
++
++ struct pci_controller pci_controller;
++ struct pci_ops pci_ops;
++ struct resource mem_resource;
++ struct resource io_resource;
++};
++#endif
++
+ struct bcma_drv_pci {
+ struct bcma_device *core;
+ u8 setup_done:1;
++ u8 hostmode:1;
++
++#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
++ struct bcma_drv_pci_host *host_controller;
++#endif
+ };
+
+ /* Register access */
++#define pcicore_read16(pc, offset) bcma_read16((pc)->core, offset)
+ #define pcicore_read32(pc, offset) bcma_read32((pc)->core, offset)
++#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
+ #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
+
+-extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
++extern void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc);
+ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
+ struct bcma_device *core, bool enable);
++extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend);
++
++extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
++extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
+
+ #endif /* LINUX_BCMA_DRIVER_PCI_H_ */
+--- a/include/linux/bcma/bcma_regs.h
++++ b/include/linux/bcma/bcma_regs.h
+@@ -56,4 +56,31 @@
+ #define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */
+ #define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */
+
++/* SiliconBackplane Address Map.
++ * All regions may not exist on all chips.
++ */
++#define BCMA_SOC_SDRAM_BASE 0x00000000U /* Physical SDRAM */
++#define BCMA_SOC_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */
++#define BCMA_SOC_PCI_MEM_SZ (64 * 1024 * 1024)
++#define BCMA_SOC_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */
++#define BCMA_SOC_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */
++#define BCMA_SOC_SDRAM_R2 0x80000000U /* Region 2 for sdram (512 MB) */
++
++
++#define BCMA_SOC_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */
++#define BCMA_SOC_PCI_DMA2 0x80000000U /* Client Mode sb2pcitranslation2 (1 GB) */
++#define BCMA_SOC_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */
++#define BCMA_SOC_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2
++ * (2 ZettaBytes), low 32 bits
++ */
++#define BCMA_SOC_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2
++ * (2 ZettaBytes), high 32 bits
++ */
++
++#define BCMA_SOC_PCI1_MEM 0x40000000U /* Host Mode sb2pcitranslation0 (64 MB) */
++#define BCMA_SOC_PCI1_CFG 0x44000000U /* Host Mode sb2pcitranslation1 (64 MB) */
++#define BCMA_SOC_PCIE1_DMA_H32 0xc0000000U /* PCIE Client Mode sb2pcitranslation2
++ * (2 ZettaBytes), high 32 bits
++ */
++
+ #endif /* LINUX_BCMA_REGS_H_ */
diff --git a/target/linux/generic/patches-3.3/026-bcma_pmu_regression.patch b/target/linux/generic/patches-3.3/026-bcma_pmu_regression.patch
new file mode 100644
index 000000000..35ca6b81e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/026-bcma_pmu_regression.patch
@@ -0,0 +1,29 @@
+--- a/drivers/bcma/driver_chipcommon_pmu.c
++++ b/drivers/bcma/driver_chipcommon_pmu.c
+@@ -110,7 +110,7 @@ void bcma_pmu_workarounds(struct bcma_dr
+ /* enable 12 mA drive strenth for 4313 and set chipControl
+ register bit 1 */
+ bcma_chipco_chipctl_maskset(cc, 0,
+- BCMA_CCTRL_4313_12MA_LED_DRIVE,
++ ~BCMA_CCTRL_4313_12MA_LED_DRIVE,
+ BCMA_CCTRL_4313_12MA_LED_DRIVE);
+ break;
+ case BCMA_CHIP_ID_BCM4331:
+@@ -124,14 +124,14 @@ void bcma_pmu_workarounds(struct bcma_dr
+ register bit 15 */
+ if (bus->chipinfo.rev == 0) {
+ bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
+- BCMA_CCTRL_43224_GPIO_TOGGLE,
++ ~BCMA_CCTRL_43224_GPIO_TOGGLE,
+ BCMA_CCTRL_43224_GPIO_TOGGLE);
+ bcma_chipco_chipctl_maskset(cc, 0,
+- BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
++ ~BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
+ BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
+ } else {
+ bcma_chipco_chipctl_maskset(cc, 0,
+- BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
++ ~BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
+ BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
+ }
+ break;
diff --git a/target/linux/generic/patches-3.3/027-bcma-add-missing-iounmap-on-error-path.patch b/target/linux/generic/patches-3.3/027-bcma-add-missing-iounmap-on-error-path.patch
new file mode 100644
index 000000000..dc8367b6c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/027-bcma-add-missing-iounmap-on-error-path.patch
@@ -0,0 +1,55 @@
+--- a/drivers/bcma/scan.c
++++ b/drivers/bcma/scan.c
+@@ -462,8 +462,10 @@ int bcma_bus_scan(struct bcma_bus *bus)
+ while (eromptr < eromend) {
+ struct bcma_device *other_core;
+ struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
+- if (!core)
+- return -ENOMEM;
++ if (!core) {
++ err = -ENOMEM;
++ goto out;
++ }
+ INIT_LIST_HEAD(&core->list);
+ core->bus = bus;
+
+@@ -478,7 +480,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
+ } else if (err == -ESPIPE) {
+ break;
+ }
+- return err;
++ goto out;
+ }
+
+ core->core_index = core_num++;
+@@ -494,10 +496,12 @@ int bcma_bus_scan(struct bcma_bus *bus)
+ list_add_tail(&core->list, &bus->cores);
+ }
+
++ err = 0;
++out:
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ iounmap(eromptr);
+
+- return 0;
++ return err;
+ }
+
+ int __init bcma_bus_scan_early(struct bcma_bus *bus,
+@@ -537,7 +541,7 @@ int __init bcma_bus_scan_early(struct bc
+ else if (err == -ESPIPE)
+ break;
+ else if (err < 0)
+- return err;
++ goto out;
+
+ core->core_index = core_num++;
+ bus->nr_cores++;
+@@ -551,6 +555,7 @@ int __init bcma_bus_scan_early(struct bc
+ break;
+ }
+
++out:
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ iounmap(eromptr);
+
diff --git a/target/linux/generic/patches-3.3/028-bcma-fix-regression-in-interrupt-assignment-on-mips.patch b/target/linux/generic/patches-3.3/028-bcma-fix-regression-in-interrupt-assignment-on-mips.patch
new file mode 100644
index 000000000..9386af29c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/028-bcma-fix-regression-in-interrupt-assignment-on-mips.patch
@@ -0,0 +1,29 @@
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -131,7 +131,7 @@ static void bcma_core_mips_set_irq(struc
+ /* backplane irq line is in use, find out who uses
+ * it and set user to irq 0
+ */
+- list_for_each_entry_reverse(core, &bus->cores, list) {
++ list_for_each_entry(core, &bus->cores, list) {
+ if ((1 << bcma_core_mips_irqflag(core)) ==
+ oldirqflag) {
+ bcma_core_mips_set_irq(core, 0);
+@@ -161,7 +161,7 @@ static void bcma_core_mips_dump_irq(stru
+ {
+ struct bcma_device *core;
+
+- list_for_each_entry_reverse(core, &bus->cores, list) {
++ list_for_each_entry(core, &bus->cores, list) {
+ bcma_core_mips_print_irq(core, bcma_core_mips_irq(core));
+ }
+ }
+@@ -215,7 +215,7 @@ void bcma_core_mips_init(struct bcma_drv
+ mcore->assigned_irqs = 1;
+
+ /* Assign IRQs to all cores on the bus */
+- list_for_each_entry_reverse(core, &bus->cores, list) {
++ list_for_each_entry(core, &bus->cores, list) {
+ int mips_irq;
+ if (core->irq)
+ continue;
diff --git a/target/linux/generic/patches-3.3/040-Controlled-Delay-AQM.patch b/target/linux/generic/patches-3.3/040-Controlled-Delay-AQM.patch
new file mode 100644
index 000000000..85f563b9f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/040-Controlled-Delay-AQM.patch
@@ -0,0 +1,757 @@
+From a93fd80d261f1dc2788442dba8dd5701363d3d6e Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 May 2012 07:51:25 +0000
+Subject: [PATCH] codel: Controlled Delay AQM
+
+commit 76e3cc126bb223013a6b9a0e2a51238d1ef2e409 upstream.
+
+An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson.
+
+http://queue.acm.org/detail.cfm?id=2209336
+
+This AQM main input is no longer queue size in bytes or packets, but the
+delay packets stay in (FIFO) queue.
+
+As we don't have infinite memory, we still can drop packets in enqueue()
+in case of massive load, but mean of CoDel is to drop packets in
+dequeue(), using a control law based on two simple parameters :
+
+target : target sojourn time (default 5ms)
+interval : width of moving time window (default 100ms)
+
+Based on initial work from Dave Taht.
+
+Refactored to help future codel inclusion as a plugin for other linux
+qdisc (FQ_CODEL, ...), like RED.
+
+include/net/codel.h contains codel algorithm as close as possible than
+Kathleen reference.
+
+net/sched/sch_codel.c contains the linux qdisc specific glue.
+
+Separate structures permit a memory efficient implementation of fq_codel
+(to be sent as a separate work) : Each flow has its own struct
+codel_vars.
+
+timestamps are taken at enqueue() time with 1024 ns precision, allowing
+a range of 2199 seconds in queue, and 100Gb links support. iproute2 uses
+usec as base unit.
+
+Selected packets are dropped, unless ECN is enabled and packets can get
+ECN mark instead.
+
+Tested from 2Mb to 10Gb speeds with no particular problems, on ixgbe and
+tg3 drivers (BQL enabled).
+
+Usage: tc qdisc ... codel [ limit PACKETS ] [ target TIME ]
+ [ interval TIME ] [ ecn ]
+
+qdisc codel 10: parent 1:1 limit 2000p target 3.0ms interval 60.0ms ecn
+ Sent 13347099587 bytes 8815805 pkt (dropped 0, overlimits 0 requeues 0)
+ rate 202365Kbit 16708pps backlog 113550b 75p requeues 0
+ count 116 lastcount 98 ldelay 4.3ms dropping drop_next 816us
+ maxpacket 1514 ecn_mark 84399 drop_overlimit 0
+
+CoDel must be seen as a base module, and should be used keeping in mind
+there is still a FIFO queue. So a typical setup will probably need a
+hierarchy of several qdiscs and packet classifiers to be able to meet
+whatever constraints a user might have.
+
+One possible example would be to use fq_codel, which combines Fair
+Queueing and CoDel, in replacement of sfq / sfq_red.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Dave Taht <dave.taht@bufferbloat.net>
+Cc: Kathleen Nichols <nichols@pollere.com>
+Cc: Van Jacobson <van@pollere.net>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Matt Mathis <mattmathis@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/linux/pkt_sched.h | 26 ++++
+ include/net/codel.h | 332 +++++++++++++++++++++++++++++++++++++++++++++
+ net/sched/Kconfig | 11 ++
+ net/sched/Makefile | 1 +
+ net/sched/sch_codel.c | 275 +++++++++++++++++++++++++++++++++++++
+ 5 files changed, 645 insertions(+)
+ create mode 100644 include/net/codel.h
+ create mode 100644 net/sched/sch_codel.c
+
+--- a/include/linux/pkt_sched.h
++++ b/include/linux/pkt_sched.h
+@@ -633,4 +633,30 @@ struct tc_qfq_stats {
+ __u32 lmax;
+ };
+
++/* CODEL */
++
++enum {
++ TCA_CODEL_UNSPEC,
++ TCA_CODEL_TARGET,
++ TCA_CODEL_LIMIT,
++ TCA_CODEL_INTERVAL,
++ TCA_CODEL_ECN,
++ __TCA_CODEL_MAX
++};
++
++#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
++
++struct tc_codel_xstats {
++ __u32 maxpacket; /* largest packet we've seen so far */
++ __u32 count; /* how many drops we've done since the last time we
++ * entered dropping state
++ */
++ __u32 lastcount; /* count at entry to dropping state */
++ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
++ __s32 drop_next; /* time to drop next packet */
++ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
++ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
++ __u32 dropping; /* are we in dropping state ? */
++};
++
+ #endif
+--- /dev/null
++++ b/include/net/codel.h
+@@ -0,0 +1,332 @@
++#ifndef __NET_SCHED_CODEL_H
++#define __NET_SCHED_CODEL_H
++
++/*
++ * Codel - The Controlled-Delay Active Queue Management algorithm
++ *
++ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
++ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
++ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
++ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions, and the following disclaimer,
++ * without modification.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * 3. The names of the authors may not be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * Alternatively, provided that this notice is retained in full, this
++ * software may be distributed under the terms of the GNU General
++ * Public License ("GPL") version 2, in which case the provisions of the
++ * GPL apply INSTEAD OF those given above.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/ktime.h>
++#include <linux/skbuff.h>
++#include <net/pkt_sched.h>
++#include <net/inet_ecn.h>
++
++/* Controlling Queue Delay (CoDel) algorithm
++ * =========================================
++ * Source : Kathleen Nichols and Van Jacobson
++ * http://queue.acm.org/detail.cfm?id=2209336
++ *
++ * Implemented on linux by Dave Taht and Eric Dumazet
++ */
++
++
++/* CoDel uses a 1024 nsec clock, encoded in u32
++ * This gives a range of 2199 seconds, because of signed compares
++ */
++typedef u32 codel_time_t;
++typedef s32 codel_tdiff_t;
++#define CODEL_SHIFT 10
++#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
++
++static inline codel_time_t codel_get_time(void)
++{
++ u64 ns = ktime_to_ns(ktime_get());
++
++ return ns >> CODEL_SHIFT;
++}
++
++#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0)
++#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0)
++#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0)
++#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0)
++
++/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
++struct codel_skb_cb {
++ codel_time_t enqueue_time;
++};
++
++static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
++{
++ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
++ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
++}
++
++static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
++{
++ return get_codel_cb(skb)->enqueue_time;
++}
++
++static void codel_set_enqueue_time(struct sk_buff *skb)
++{
++ get_codel_cb(skb)->enqueue_time = codel_get_time();
++}
++
++static inline u32 codel_time_to_us(codel_time_t val)
++{
++ u64 valns = ((u64)val << CODEL_SHIFT);
++
++ do_div(valns, NSEC_PER_USEC);
++ return (u32)valns;
++}
++
++/**
++ * struct codel_params - contains codel parameters
++ * @target: target queue size (in time units)
++ * @interval: width of moving time window
++ * @ecn: is Explicit Congestion Notification enabled
++ */
++struct codel_params {
++ codel_time_t target;
++ codel_time_t interval;
++ bool ecn;
++};
++
++/**
++ * struct codel_vars - contains codel variables
++ * @count: how many drops we've done since the last time we
++ * entered dropping state
++ * @lastcount: count at entry to dropping state
++ * @dropping: set to true if in dropping state
++ * @first_above_time: when we went (or will go) continuously above target
++ * for interval
++ * @drop_next: time to drop next packet, or when we dropped last
++ * @ldelay: sojourn time of last dequeued packet
++ */
++struct codel_vars {
++ u32 count;
++ u32 lastcount;
++ bool dropping;
++ codel_time_t first_above_time;
++ codel_time_t drop_next;
++ codel_time_t ldelay;
++};
++
++/**
++ * struct codel_stats - contains codel shared variables and stats
++ * @maxpacket: largest packet we've seen so far
++ * @drop_count: temp count of dropped packets in dequeue()
++ * ecn_mark: number of packets we ECN marked instead of dropping
++ */
++struct codel_stats {
++ u32 maxpacket;
++ u32 drop_count;
++ u32 ecn_mark;
++};
++
++static void codel_params_init(struct codel_params *params)
++{
++ params->interval = MS2TIME(100);
++ params->target = MS2TIME(5);
++ params->ecn = false;
++}
++
++static void codel_vars_init(struct codel_vars *vars)
++{
++ vars->drop_next = 0;
++ vars->first_above_time = 0;
++ vars->dropping = false; /* exit dropping state */
++ vars->count = 0;
++ vars->lastcount = 0;
++}
++
++static void codel_stats_init(struct codel_stats *stats)
++{
++ stats->maxpacket = 256;
++}
++
++/* return interval/sqrt(x) with good precision
++ * relies on int_sqrt(unsigned long x) kernel implementation
++ */
++static u32 codel_inv_sqrt(u32 _interval, u32 _x)
++{
++ u64 interval = _interval;
++ unsigned long x = _x;
++
++ /* Scale operands for max precision */
++
++#if BITS_PER_LONG == 64
++ x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
++ interval <<= 16;
++#endif
++
++ while (x < (1UL << (BITS_PER_LONG - 2))) {
++ x <<= 2;
++ interval <<= 1;
++ }
++ do_div(interval, int_sqrt(x));
++ return (u32)interval;
++}
++
++static codel_time_t codel_control_law(codel_time_t t,
++ codel_time_t interval,
++ u32 count)
++{
++ return t + codel_inv_sqrt(interval, count);
++}
++
++
++static bool codel_should_drop(struct sk_buff *skb,
++ unsigned int *backlog,
++ struct codel_vars *vars,
++ struct codel_params *params,
++ struct codel_stats *stats,
++ codel_time_t now)
++{
++ bool ok_to_drop;
++
++ if (!skb) {
++ vars->first_above_time = 0;
++ return false;
++ }
++
++ vars->ldelay = now - codel_get_enqueue_time(skb);
++ *backlog -= qdisc_pkt_len(skb);
++
++ if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
++ stats->maxpacket = qdisc_pkt_len(skb);
++
++ if (codel_time_before(vars->ldelay, params->target) ||
++ *backlog <= stats->maxpacket) {
++ /* went below - stay below for at least interval */
++ vars->first_above_time = 0;
++ return false;
++ }
++ ok_to_drop = false;
++ if (vars->first_above_time == 0) {
++ /* just went above from below. If we stay above
++ * for at least interval we'll say it's ok to drop
++ */
++ vars->first_above_time = now + params->interval;
++ } else if (codel_time_after(now, vars->first_above_time)) {
++ ok_to_drop = true;
++ }
++ return ok_to_drop;
++}
++
++typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
++ struct Qdisc *sch);
++
++static struct sk_buff *codel_dequeue(struct Qdisc *sch,
++ struct codel_params *params,
++ struct codel_vars *vars,
++ struct codel_stats *stats,
++ codel_skb_dequeue_t dequeue_func,
++ u32 *backlog)
++{
++ struct sk_buff *skb = dequeue_func(vars, sch);
++ codel_time_t now;
++ bool drop;
++
++ if (!skb) {
++ vars->dropping = false;
++ return skb;
++ }
++ now = codel_get_time();
++ drop = codel_should_drop(skb, backlog, vars, params, stats, now);
++ if (vars->dropping) {
++ if (!drop) {
++ /* sojourn time below target - leave dropping state */
++ vars->dropping = false;
++ } else if (codel_time_after_eq(now, vars->drop_next)) {
++ /* It's time for the next drop. Drop the current
++ * packet and dequeue the next. The dequeue might
++ * take us out of dropping state.
++ * If not, schedule the next drop.
++ * A large backlog might result in drop rates so high
++ * that the next drop should happen now,
++ * hence the while loop.
++ */
++ while (vars->dropping &&
++ codel_time_after_eq(now, vars->drop_next)) {
++ if (++vars->count == 0) /* avoid zero divides */
++ vars->count = ~0U;
++ if (params->ecn && INET_ECN_set_ce(skb)) {
++ stats->ecn_mark++;
++ vars->drop_next =
++ codel_control_law(vars->drop_next,
++ params->interval,
++ vars->count);
++ goto end;
++ }
++ qdisc_drop(skb, sch);
++ stats->drop_count++;
++ skb = dequeue_func(vars, sch);
++ if (!codel_should_drop(skb, backlog,
++ vars, params, stats, now)) {
++ /* leave dropping state */
++ vars->dropping = false;
++ } else {
++ /* and schedule the next drop */
++ vars->drop_next =
++ codel_control_law(vars->drop_next,
++ params->interval,
++ vars->count);
++ }
++ }
++ }
++ } else if (drop) {
++ if (params->ecn && INET_ECN_set_ce(skb)) {
++ stats->ecn_mark++;
++ } else {
++ qdisc_drop(skb, sch);
++ stats->drop_count++;
++
++ skb = dequeue_func(vars, sch);
++ drop = codel_should_drop(skb, backlog, vars, params,
++ stats, now);
++ }
++ vars->dropping = true;
++ /* if min went above target close to when we last went below it
++ * assume that the drop rate that controlled the queue on the
++ * last cycle is a good starting point to control it now.
++ */
++ if (codel_time_before(now - vars->drop_next,
++ 16 * params->interval)) {
++ vars->count = (vars->count - vars->lastcount) | 1;
++ } else {
++ vars->count = 1;
++ }
++ vars->lastcount = vars->count;
++ vars->drop_next = codel_control_law(now, params->interval,
++ vars->count);
++ }
++end:
++ return skb;
++}
++#endif
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -250,6 +250,17 @@ config NET_SCH_QFQ
+
+ If unsure, say N.
+
++config NET_SCH_CODEL
++ tristate "Controlled Delay AQM (CODEL)"
++ help
++ Say Y here if you want to use the Controlled Delay (CODEL)
++ packet scheduling algorithm.
++
++ To compile this driver as a module, choose M here: the module
++ will be called sch_codel.
++
++ If unsure, say N.
++
+ config NET_SCH_INGRESS
+ tristate "Ingress Qdisc"
+ depends on NET_CLS_ACT
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -36,6 +36,7 @@ obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
+ obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
+ obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
+ obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
++obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
+
+ obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
+ obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
+--- /dev/null
++++ b/net/sched/sch_codel.c
+@@ -0,0 +1,275 @@
++/*
++ * Codel - The Controlled-Delay Active Queue Management algorithm
++ *
++ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
++ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
++ *
++ * Implemented on linux by :
++ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
++ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions, and the following disclaimer,
++ * without modification.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * 3. The names of the authors may not be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * Alternatively, provided that this notice is retained in full, this
++ * software may be distributed under the terms of the GNU General
++ * Public License ("GPL") version 2, in which case the provisions of the
++ * GPL apply INSTEAD OF those given above.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ * DAMAGE.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/skbuff.h>
++#include <net/pkt_sched.h>
++#include <net/codel.h>
++
++
++#define DEFAULT_CODEL_LIMIT 1000
++
++struct codel_sched_data {
++ struct codel_params params;
++ struct codel_vars vars;
++ struct codel_stats stats;
++ u32 drop_overlimit;
++};
++
++/* This is the specific function called from codel_dequeue()
++ * to dequeue a packet from queue. Note: backlog is handled in
++ * codel, we dont need to reduce it here.
++ */
++static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
++{
++ struct sk_buff *skb = __skb_dequeue(&sch->q);
++
++ prefetch(&skb->end); /* we'll need skb_shinfo() */
++ return skb;
++}
++
++static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
++{
++ struct codel_sched_data *q = qdisc_priv(sch);
++ struct sk_buff *skb;
++
++ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
++ dequeue, &sch->qstats.backlog);
++ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
++ * or HTB crashes. Defer it for next round.
++ */
++ if (q->stats.drop_count && sch->q.qlen) {
++ qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
++ q->stats.drop_count = 0;
++ }
++ if (skb)
++ qdisc_bstats_update(sch, skb);
++ return skb;
++}
++
++static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
++{
++ struct codel_sched_data *q;
++
++ if (likely(qdisc_qlen(sch) < sch->limit)) {
++ codel_set_enqueue_time(skb);
++ return qdisc_enqueue_tail(skb, sch);
++ }
++ q = qdisc_priv(sch);
++ q->drop_overlimit++;
++ return qdisc_drop(skb, sch);
++}
++
++static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
++ [TCA_CODEL_TARGET] = { .type = NLA_U32 },
++ [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
++ [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
++ [TCA_CODEL_ECN] = { .type = NLA_U32 },
++};
++
++static int codel_change(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct codel_sched_data *q = qdisc_priv(sch);
++ struct nlattr *tb[TCA_CODEL_MAX + 1];
++ unsigned int qlen;
++ int err;
++
++ if (!opt)
++ return -EINVAL;
++
++ err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
++ if (err < 0)
++ return err;
++
++ sch_tree_lock(sch);
++
++ if (tb[TCA_CODEL_TARGET]) {
++ u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
++
++ q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
++ }
++
++ if (tb[TCA_CODEL_INTERVAL]) {
++ u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
++
++ q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
++ }
++
++ if (tb[TCA_CODEL_LIMIT])
++ sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
++
++ if (tb[TCA_CODEL_ECN])
++ q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
++
++ qlen = sch->q.qlen;
++ while (sch->q.qlen > sch->limit) {
++ struct sk_buff *skb = __skb_dequeue(&sch->q);
++
++ sch->qstats.backlog -= qdisc_pkt_len(skb);
++ qdisc_drop(skb, sch);
++ }
++ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++
++ sch_tree_unlock(sch);
++ return 0;
++}
++
++static int codel_init(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct codel_sched_data *q = qdisc_priv(sch);
++
++ sch->limit = DEFAULT_CODEL_LIMIT;
++
++ codel_params_init(&q->params);
++ codel_vars_init(&q->vars);
++ codel_stats_init(&q->stats);
++
++ if (opt) {
++ int err = codel_change(sch, opt);
++
++ if (err)
++ return err;
++ }
++
++ if (sch->limit >= 1)
++ sch->flags |= TCQ_F_CAN_BYPASS;
++ else
++ sch->flags &= ~TCQ_F_CAN_BYPASS;
++
++ return 0;
++}
++
++static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++ struct codel_sched_data *q = qdisc_priv(sch);
++ struct nlattr *opts;
++
++ opts = nla_nest_start(skb, TCA_OPTIONS);
++ if (opts == NULL)
++ goto nla_put_failure;
++
++ if (nla_put_u32(skb, TCA_CODEL_TARGET,
++ codel_time_to_us(q->params.target)) ||
++ nla_put_u32(skb, TCA_CODEL_LIMIT,
++ sch->limit) ||
++ nla_put_u32(skb, TCA_CODEL_INTERVAL,
++ codel_time_to_us(q->params.interval)) ||
++ nla_put_u32(skb, TCA_CODEL_ECN,
++ q->params.ecn))
++ goto nla_put_failure;
++
++ return nla_nest_end(skb, opts);
++
++nla_put_failure:
++ nla_nest_cancel(skb, opts);
++ return -1;
++}
++
++static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
++{
++ const struct codel_sched_data *q = qdisc_priv(sch);
++ struct tc_codel_xstats st = {
++ .maxpacket = q->stats.maxpacket,
++ .count = q->vars.count,
++ .lastcount = q->vars.lastcount,
++ .drop_overlimit = q->drop_overlimit,
++ .ldelay = codel_time_to_us(q->vars.ldelay),
++ .dropping = q->vars.dropping,
++ .ecn_mark = q->stats.ecn_mark,
++ };
++
++ if (q->vars.dropping) {
++ codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
++
++ if (delta >= 0)
++ st.drop_next = codel_time_to_us(delta);
++ else
++ st.drop_next = -codel_time_to_us(-delta);
++ }
++
++ return gnet_stats_copy_app(d, &st, sizeof(st));
++}
++
++static void codel_reset(struct Qdisc *sch)
++{
++ struct codel_sched_data *q = qdisc_priv(sch);
++
++ qdisc_reset_queue(sch);
++ codel_vars_init(&q->vars);
++}
++
++static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
++ .id = "codel",
++ .priv_size = sizeof(struct codel_sched_data),
++
++ .enqueue = codel_qdisc_enqueue,
++ .dequeue = codel_qdisc_dequeue,
++ .peek = qdisc_peek_dequeued,
++ .init = codel_init,
++ .reset = codel_reset,
++ .change = codel_change,
++ .dump = codel_dump,
++ .dump_stats = codel_dump_stats,
++ .owner = THIS_MODULE,
++};
++
++static int __init codel_module_init(void)
++{
++ return register_qdisc(&codel_qdisc_ops);
++}
++
++static void __exit codel_module_exit(void)
++{
++ unregister_qdisc(&codel_qdisc_ops);
++}
++
++module_init(codel_module_init)
++module_exit(codel_module_exit)
++
++MODULE_DESCRIPTION("Controlled Delay queue discipline");
++MODULE_AUTHOR("Dave Taht");
++MODULE_AUTHOR("Eric Dumazet");
++MODULE_LICENSE("Dual BSD/GPL");
diff --git a/target/linux/generic/patches-3.3/041-codel-use-Newton-method-instead-of-sqrt-and-divides.patch b/target/linux/generic/patches-3.3/041-codel-use-Newton-method-instead-of-sqrt-and-divides.patch
new file mode 100644
index 000000000..f91b42eda
--- /dev/null
+++ b/target/linux/generic/patches-3.3/041-codel-use-Newton-method-instead-of-sqrt-and-divides.patch
@@ -0,0 +1,185 @@
+From 4a8056dfeef49b306ad6af24a5563d7d6867aae0 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 12 May 2012 03:32:13 +0000
+Subject: [PATCH] codel: use Newton method instead of sqrt() and divides
+
+commit 536edd67109df5e0cdb2c4ee759e9bade7976367 upstream.
+
+As Van pointed out, interval/sqrt(count) can be implemented using
+multiplies only.
+
+http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+
+This patch implements the Newton method and reciprocal divide.
+
+Total cost is 15 cycles instead of 120 on my Corei5 machine (64bit
+kernel).
+
+There is a small 'error' for count values < 5, but we don't really care.
+
+I reuse a hole in struct codel_vars :
+ - pack the dropping boolean into one bit
+ - use 31bit to store the reciprocal value of sqrt(count).
+
+Suggested-by: Van Jacobson <van@pollere.net>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Dave Taht <dave.taht@bufferbloat.net>
+Cc: Kathleen Nichols <nichols@pollere.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Matt Mathis <mattmathis@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Nandita Dukkipati <nanditad@google.com>
+Cc: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/net/codel.h | 68 ++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 37 insertions(+), 31 deletions(-)
+
+--- a/include/net/codel.h
++++ b/include/net/codel.h
+@@ -46,6 +46,7 @@
+ #include <linux/skbuff.h>
+ #include <net/pkt_sched.h>
+ #include <net/inet_ecn.h>
++#include <linux/reciprocal_div.h>
+
+ /* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+@@ -123,6 +124,7 @@ struct codel_params {
+ * entered dropping state
+ * @lastcount: count at entry to dropping state
+ * @dropping: set to true if in dropping state
++ * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
+ * @first_above_time: when we went (or will go) continuously above target
+ * for interval
+ * @drop_next: time to drop next packet, or when we dropped last
+@@ -131,7 +133,8 @@ struct codel_params {
+ struct codel_vars {
+ u32 count;
+ u32 lastcount;
+- bool dropping;
++ bool dropping:1;
++ u32 rec_inv_sqrt:31;
+ codel_time_t first_above_time;
+ codel_time_t drop_next;
+ codel_time_t ldelay;
+@@ -158,11 +161,7 @@ static void codel_params_init(struct cod
+
+ static void codel_vars_init(struct codel_vars *vars)
+ {
+- vars->drop_next = 0;
+- vars->first_above_time = 0;
+- vars->dropping = false; /* exit dropping state */
+- vars->count = 0;
+- vars->lastcount = 0;
++ memset(vars, 0, sizeof(*vars));
+ }
+
+ static void codel_stats_init(struct codel_stats *stats)
+@@ -170,38 +169,37 @@ static void codel_stats_init(struct code
+ stats->maxpacket = 256;
+ }
+
+-/* return interval/sqrt(x) with good precision
+- * relies on int_sqrt(unsigned long x) kernel implementation
++/*
++ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
++ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
++ *
++ * Here, invsqrt is a fixed point number (< 1.0), 31bit mantissa)
+ */
+-static u32 codel_inv_sqrt(u32 _interval, u32 _x)
++static void codel_Newton_step(struct codel_vars *vars)
+ {
+- u64 interval = _interval;
+- unsigned long x = _x;
+-
+- /* Scale operands for max precision */
++ u32 invsqrt = vars->rec_inv_sqrt;
++ u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 31;
++ u64 val = (3LL << 31) - ((u64)vars->count * invsqrt2);
+
+-#if BITS_PER_LONG == 64
+- x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
+- interval <<= 16;
+-#endif
++ val = (val * invsqrt) >> 32;
+
+- while (x < (1UL << (BITS_PER_LONG - 2))) {
+- x <<= 2;
+- interval <<= 1;
+- }
+- do_div(interval, int_sqrt(x));
+- return (u32)interval;
++ vars->rec_inv_sqrt = val;
+ }
+
++/*
++ * CoDel control_law is t + interval/sqrt(count)
++ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
++ * both sqrt() and divide operation.
++ */
+ static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+- u32 count)
++ u32 rec_inv_sqrt)
+ {
+- return t + codel_inv_sqrt(interval, count);
++ return t + reciprocal_divide(interval, rec_inv_sqrt << 1);
+ }
+
+
+-static bool codel_should_drop(struct sk_buff *skb,
++static bool codel_should_drop(const struct sk_buff *skb,
+ unsigned int *backlog,
+ struct codel_vars *vars,
+ struct codel_params *params,
+@@ -274,14 +272,16 @@ static struct sk_buff *codel_dequeue(str
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+- if (++vars->count == 0) /* avoid zero divides */
+- vars->count = ~0U;
++ vars->count++; /* dont care of possible wrap
++ * since there is no more divide
++ */
++ codel_Newton_step(vars);
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+- vars->count);
++ vars->rec_inv_sqrt);
+ goto end;
+ }
+ qdisc_drop(skb, sch);
+@@ -296,7 +296,7 @@ static struct sk_buff *codel_dequeue(str
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+- vars->count);
++ vars->rec_inv_sqrt);
+ }
+ }
+ }
+@@ -319,12 +319,18 @@ static struct sk_buff *codel_dequeue(str
+ if (codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+ vars->count = (vars->count - vars->lastcount) | 1;
++ /* we dont care if rec_inv_sqrt approximation
++ * is not very precise :
++ * Next Newton steps will correct it quadratically.
++ */
++ codel_Newton_step(vars);
+ } else {
+ vars->count = 1;
++ vars->rec_inv_sqrt = 0x7fffffff;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
+- vars->count);
++ vars->rec_inv_sqrt);
+ }
+ end:
+ return skb;
diff --git a/target/linux/generic/patches-3.3/042-fq_codel-Fair-Queue-Codel-AQM.patch b/target/linux/generic/patches-3.3/042-fq_codel-Fair-Queue-Codel-AQM.patch
new file mode 100644
index 000000000..26bbbcb49
--- /dev/null
+++ b/target/linux/generic/patches-3.3/042-fq_codel-Fair-Queue-Codel-AQM.patch
@@ -0,0 +1,839 @@
+From f8cf19c19528a468cc0b9846c0328a94cccdc605 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 11 May 2012 09:30:50 +0000
+Subject: [PATCH] fq_codel: Fair Queue Codel AQM
+
+commit 4b549a2ef4bef9965d97cbd992ba67930cd3e0fe upstream.
+
+Fair Queue Codel packet scheduler
+
+Principles :
+
+- Packets are classified (internal classifier or external) on flows.
+- This is a Stochastic model (as we use a hash, several flows might
+ be hashed on same slot)
+- Each flow has a CoDel managed queue.
+- Flows are linked onto two (Round Robin) lists,
+ so that new flows have priority on old ones.
+
+- For a given flow, packets are not reordered (CoDel uses a FIFO)
+- head drops only.
+- ECN capability is on by default.
+- Very low memory footprint (64 bytes per flow)
+
+tc qdisc ... fq_codel [ limit PACKETS ] [ flows number ]
+ [ target TIME ] [ interval TIME ] [ noecn ]
+ [ quantum BYTES ]
+
+defaults : 1024 flows, 10240 packets limit, quantum : device MTU
+ target : 5ms (CoDel default)
+ interval : 100ms (CoDel default)
+
+Impressive results on load :
+
+class htb 1:1 root leaf 10: prio 0 quantum 1514 rate 200000Kbit ceil 200000Kbit burst 1475b/8 mpu 0b overhead 0b cburst 1475b/8 mpu 0b overhead 0b level 0
+ Sent 43304920109 bytes 33063109 pkt (dropped 0, overlimits 0 requeues 0)
+ rate 201691Kbit 28595pps backlog 0b 312p requeues 0
+ lended: 33063109 borrowed: 0 giants: 0
+ tokens: -912 ctokens: -912
+
+class fq_codel 10:1735 parent 10:
+ (dropped 1292, overlimits 0 requeues 0)
+ backlog 15140b 10p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 7.1ms
+class fq_codel 10:4524 parent 10:
+ (dropped 1291, overlimits 0 requeues 0)
+ backlog 16654b 11p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 7.1ms
+class fq_codel 10:4e74 parent 10:
+ (dropped 1290, overlimits 0 requeues 0)
+ backlog 6056b 4p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 6.4ms dropping drop_next 92.0ms
+class fq_codel 10:628a parent 10:
+ (dropped 1289, overlimits 0 requeues 0)
+ backlog 7570b 5p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 5.4ms dropping drop_next 90.9ms
+class fq_codel 10:a4b3 parent 10:
+ (dropped 302, overlimits 0 requeues 0)
+ backlog 16654b 11p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 7.1ms
+class fq_codel 10:c3c2 parent 10:
+ (dropped 1284, overlimits 0 requeues 0)
+ backlog 13626b 9p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 5.9ms
+class fq_codel 10:d331 parent 10:
+ (dropped 299, overlimits 0 requeues 0)
+ backlog 15140b 10p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 7.0ms
+class fq_codel 10:d526 parent 10:
+ (dropped 12160, overlimits 0 requeues 0)
+ backlog 35870b 211p requeues 0
+ deficit 1508 count 12160 lastcount 1 ldelay 15.3ms dropping drop_next 247us
+class fq_codel 10:e2c6 parent 10:
+ (dropped 1288, overlimits 0 requeues 0)
+ backlog 15140b 10p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 7.1ms
+class fq_codel 10:eab5 parent 10:
+ (dropped 1285, overlimits 0 requeues 0)
+ backlog 16654b 11p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 5.9ms
+class fq_codel 10:f220 parent 10:
+ (dropped 1289, overlimits 0 requeues 0)
+ backlog 15140b 10p requeues 0
+ deficit 1514 count 1 lastcount 1 ldelay 7.1ms
+
+qdisc htb 1: root refcnt 6 r2q 10 default 1 direct_packets_stat 0 ver 3.17
+ Sent 43331086547 bytes 33092812 pkt (dropped 0, overlimits 66063544 requeues 71)
+ rate 201697Kbit 28602pps backlog 0b 260p requeues 71
+qdisc fq_codel 10: parent 1:1 limit 10240p flows 65536 target 5.0ms interval 100.0ms ecn
+ Sent 43331086547 bytes 33092812 pkt (dropped 949359, overlimits 0 requeues 0)
+ rate 201697Kbit 28602pps backlog 189352b 260p requeues 0
+ maxpacket 1514 drop_overlimit 0 new_flow_count 5582 ecn_mark 125593
+ new_flows_len 0 old_flows_len 11
+
+PING 172.30.42.18 (172.30.42.18) 56(84) bytes of data.
+64 bytes from 172.30.42.18: icmp_req=1 ttl=64 time=0.227 ms
+64 bytes from 172.30.42.18: icmp_req=2 ttl=64 time=0.165 ms
+64 bytes from 172.30.42.18: icmp_req=3 ttl=64 time=0.166 ms
+64 bytes from 172.30.42.18: icmp_req=4 ttl=64 time=0.151 ms
+64 bytes from 172.30.42.18: icmp_req=5 ttl=64 time=0.164 ms
+64 bytes from 172.30.42.18: icmp_req=6 ttl=64 time=0.172 ms
+64 bytes from 172.30.42.18: icmp_req=7 ttl=64 time=0.175 ms
+64 bytes from 172.30.42.18: icmp_req=8 ttl=64 time=0.183 ms
+64 bytes from 172.30.42.18: icmp_req=9 ttl=64 time=0.158 ms
+64 bytes from 172.30.42.18: icmp_req=10 ttl=64 time=0.200 ms
+
+10 packets transmitted, 10 received, 0% packet loss, time 8999ms
+rtt min/avg/max/mdev = 0.151/0.176/0.227/0.022 ms
+
+Much better than SFQ because of priority given to new flows, and fast
+path dirtying less cache lines.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/linux/pkt_sched.h | 54 ++++
+ net/sched/Kconfig | 11 +
+ net/sched/Makefile | 1 +
+ net/sched/sch_fq_codel.c | 624 +++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 690 insertions(+)
+ create mode 100644 net/sched/sch_fq_codel.c
+
+--- a/include/linux/pkt_sched.h
++++ b/include/linux/pkt_sched.h
+@@ -659,4 +659,58 @@ struct tc_codel_xstats {
+ __u32 dropping; /* are we in dropping state ? */
+ };
+
++/* FQ_CODEL */
++
++enum {
++ TCA_FQ_CODEL_UNSPEC,
++ TCA_FQ_CODEL_TARGET,
++ TCA_FQ_CODEL_LIMIT,
++ TCA_FQ_CODEL_INTERVAL,
++ TCA_FQ_CODEL_ECN,
++ TCA_FQ_CODEL_FLOWS,
++ TCA_FQ_CODEL_QUANTUM,
++ __TCA_FQ_CODEL_MAX
++};
++
++#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
++
++enum {
++ TCA_FQ_CODEL_XSTATS_QDISC,
++ TCA_FQ_CODEL_XSTATS_CLASS,
++};
++
++struct tc_fq_codel_qd_stats {
++ __u32 maxpacket; /* largest packet we've seen so far */
++ __u32 drop_overlimit; /* number of time max qdisc
++ * packet limit was hit
++ */
++ __u32 ecn_mark; /* number of packets we ECN marked
++ * instead of being dropped
++ */
++ __u32 new_flow_count; /* number of time packets
++ * created a 'new flow'
++ */
++ __u32 new_flows_len; /* count of flows in new list */
++ __u32 old_flows_len; /* count of flows in old list */
++};
++
++struct tc_fq_codel_cl_stats {
++ __s32 deficit;
++ __u32 ldelay; /* in-queue delay seen by most recently
++ * dequeued packet
++ */
++ __u32 count;
++ __u32 lastcount;
++ __u32 dropping;
++ __s32 drop_next;
++};
++
++struct tc_fq_codel_xstats {
++ __u32 type;
++ union {
++ struct tc_fq_codel_qd_stats qdisc_stats;
++ struct tc_fq_codel_cl_stats class_stats;
++ };
++};
++
+ #endif
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -261,6 +261,17 @@ config NET_SCH_CODEL
+
+ If unsure, say N.
+
++config NET_SCH_FQ_CODEL
++ tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)"
++ help
++ Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL)
++ packet scheduling algorithm.
++
++ To compile this driver as a module, choose M here: the module
++ will be called sch_fq_codel.
++
++ If unsure, say N.
++
+ config NET_SCH_INGRESS
+ tristate "Ingress Qdisc"
+ depends on NET_CLS_ACT
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -37,6 +37,7 @@ obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqpr
+ obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
+ obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
+ obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
++obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
+
+ obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
+ obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
+--- /dev/null
++++ b/net/sched/sch_fq_codel.c
+@@ -0,0 +1,624 @@
++/*
++ * Fair Queue CoDel discipline
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/jiffies.h>
++#include <linux/string.h>
++#include <linux/in.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/skbuff.h>
++#include <linux/jhash.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <net/netlink.h>
++#include <net/pkt_sched.h>
++#include <net/flow_keys.h>
++#include <net/codel.h>
++
++/* Fair Queue CoDel.
++ *
++ * Principles :
++ * Packets are classified (internal classifier or external) on flows.
++ * This is a Stochastic model (as we use a hash, several flows
++ * might be hashed on same slot)
++ * Each flow has a CoDel managed queue.
++ * Flows are linked onto two (Round Robin) lists,
++ * so that new flows have priority on old ones.
++ *
++ * For a given flow, packets are not reordered (CoDel uses a FIFO)
++ * head drops only.
++ * ECN capability is on by default.
++ * Low memory footprint (64 bytes per flow)
++ */
++
++struct fq_codel_flow {
++ struct sk_buff *head;
++ struct sk_buff *tail;
++ struct list_head flowchain;
++ int deficit;
++ u32 dropped; /* number of drops (or ECN marks) on this flow */
++ struct codel_vars cvars;
++}; /* please try to keep this structure <= 64 bytes */
++
++struct fq_codel_sched_data {
++ struct tcf_proto *filter_list; /* optional external classifier */
++ struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
++ u32 *backlogs; /* backlog table [flows_cnt] */
++ u32 flows_cnt; /* number of flows */
++ u32 perturbation; /* hash perturbation */
++ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
++ struct codel_params cparams;
++ struct codel_stats cstats;
++ u32 drop_overlimit;
++ u32 new_flow_count;
++
++ struct list_head new_flows; /* list of new flows */
++ struct list_head old_flows; /* list of old flows */
++};
++
++static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
++ const struct sk_buff *skb)
++{
++ struct flow_keys keys;
++ unsigned int hash;
++
++ skb_flow_dissect(skb, &keys);
++ hash = jhash_3words((__force u32)keys.dst,
++ (__force u32)keys.src ^ keys.ip_proto,
++ (__force u32)keys.ports, q->perturbation);
++ return ((u64)hash * q->flows_cnt) >> 32;
++}
++
++static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qerr)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ struct tcf_result res;
++ int result;
++
++ if (TC_H_MAJ(skb->priority) == sch->handle &&
++ TC_H_MIN(skb->priority) > 0 &&
++ TC_H_MIN(skb->priority) <= q->flows_cnt)
++ return TC_H_MIN(skb->priority);
++
++ if (!q->filter_list)
++ return fq_codel_hash(q, skb) + 1;
++
++ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
++ result = tc_classify(skb, q->filter_list, &res);
++ if (result >= 0) {
++#ifdef CONFIG_NET_CLS_ACT
++ switch (result) {
++ case TC_ACT_STOLEN:
++ case TC_ACT_QUEUED:
++ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
++ case TC_ACT_SHOT:
++ return 0;
++ }
++#endif
++ if (TC_H_MIN(res.classid) <= q->flows_cnt)
++ return TC_H_MIN(res.classid);
++ }
++ return 0;
++}
++
++/* helper functions : might be changed when/if skb use a standard list_head */
++
++/* remove one skb from head of slot queue */
++static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
++{
++ struct sk_buff *skb = flow->head;
++
++ flow->head = skb->next;
++ skb->next = NULL;
++ return skb;
++}
++
++/* add skb to flow queue (tail add) */
++static inline void flow_queue_add(struct fq_codel_flow *flow,
++ struct sk_buff *skb)
++{
++ if (flow->head == NULL)
++ flow->head = skb;
++ else
++ flow->tail->next = skb;
++ flow->tail = skb;
++ skb->next = NULL;
++}
++
++static unsigned int fq_codel_drop(struct Qdisc *sch)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ struct sk_buff *skb;
++ unsigned int maxbacklog = 0, idx = 0, i, len;
++ struct fq_codel_flow *flow;
++
++ /* Queue is full! Find the fat flow and drop packet from it.
++ * This might sound expensive, but with 1024 flows, we scan
++ * 4KB of memory, and we dont need to handle a complex tree
++ * in fast path (packet queue/enqueue) with many cache misses.
++ */
++ for (i = 0; i < q->flows_cnt; i++) {
++ if (q->backlogs[i] > maxbacklog) {
++ maxbacklog = q->backlogs[i];
++ idx = i;
++ }
++ }
++ flow = &q->flows[idx];
++ skb = dequeue_head(flow);
++ len = qdisc_pkt_len(skb);
++ q->backlogs[idx] -= len;
++ kfree_skb(skb);
++ sch->q.qlen--;
++ sch->qstats.drops++;
++ sch->qstats.backlog -= len;
++ flow->dropped++;
++ return idx;
++}
++
++static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ unsigned int idx;
++ struct fq_codel_flow *flow;
++ int uninitialized_var(ret);
++
++ idx = fq_codel_classify(skb, sch, &ret);
++ if (idx == 0) {
++ if (ret & __NET_XMIT_BYPASS)
++ sch->qstats.drops++;
++ kfree_skb(skb);
++ return ret;
++ }
++ idx--;
++
++ codel_set_enqueue_time(skb);
++ flow = &q->flows[idx];
++ flow_queue_add(flow, skb);
++ q->backlogs[idx] += qdisc_pkt_len(skb);
++ sch->qstats.backlog += qdisc_pkt_len(skb);
++
++ if (list_empty(&flow->flowchain)) {
++ list_add_tail(&flow->flowchain, &q->new_flows);
++ codel_vars_init(&flow->cvars);
++ q->new_flow_count++;
++ flow->deficit = q->quantum;
++ flow->dropped = 0;
++ }
++ if (++sch->q.qlen < sch->limit)
++ return NET_XMIT_SUCCESS;
++
++ q->drop_overlimit++;
++ /* Return Congestion Notification only if we dropped a packet
++ * from this flow.
++ */
++ if (fq_codel_drop(sch) == idx)
++ return NET_XMIT_CN;
++
++ /* As we dropped a packet, better let upper stack know this */
++ qdisc_tree_decrease_qlen(sch, 1);
++ return NET_XMIT_SUCCESS;
++}
++
++/* This is the specific function called from codel_dequeue()
++ * to dequeue a packet from queue. Note: backlog is handled in
++ * codel, we dont need to reduce it here.
++ */
++static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
++{
++ struct fq_codel_flow *flow;
++ struct sk_buff *skb = NULL;
++
++ flow = container_of(vars, struct fq_codel_flow, cvars);
++ if (flow->head) {
++ skb = dequeue_head(flow);
++ sch->qstats.backlog -= qdisc_pkt_len(skb);
++ sch->q.qlen--;
++ }
++ return skb;
++}
++
++static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ struct sk_buff *skb;
++ struct fq_codel_flow *flow;
++ struct list_head *head;
++ u32 prev_drop_count, prev_ecn_mark;
++
++begin:
++ head = &q->new_flows;
++ if (list_empty(head)) {
++ head = &q->old_flows;
++ if (list_empty(head))
++ return NULL;
++ }
++ flow = list_first_entry(head, struct fq_codel_flow, flowchain);
++
++ if (flow->deficit <= 0) {
++ flow->deficit += q->quantum;
++ list_move_tail(&flow->flowchain, &q->old_flows);
++ goto begin;
++ }
++
++ prev_drop_count = q->cstats.drop_count;
++ prev_ecn_mark = q->cstats.ecn_mark;
++
++ skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
++ dequeue, &q->backlogs[flow - q->flows]);
++
++ flow->dropped += q->cstats.drop_count - prev_drop_count;
++ flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
++
++ if (!skb) {
++ /* force a pass through old_flows to prevent starvation */
++ if ((head == &q->new_flows) && !list_empty(&q->old_flows))
++ list_move_tail(&flow->flowchain, &q->old_flows);
++ else
++ list_del_init(&flow->flowchain);
++ goto begin;
++ }
++ qdisc_bstats_update(sch, skb);
++ flow->deficit -= qdisc_pkt_len(skb);
++ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
++ * or HTB crashes. Defer it for next round.
++ */
++ if (q->cstats.drop_count && sch->q.qlen) {
++ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
++ q->cstats.drop_count = 0;
++ }
++ return skb;
++}
++
++static void fq_codel_reset(struct Qdisc *sch)
++{
++ struct sk_buff *skb;
++
++ while ((skb = fq_codel_dequeue(sch)) != NULL)
++ kfree_skb(skb);
++}
++
++static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
++ [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
++ [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
++ [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
++ [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
++ [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
++ [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
++};
++
++static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
++ int err;
++
++ if (!opt)
++ return -EINVAL;
++
++ err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
++ if (err < 0)
++ return err;
++ if (tb[TCA_FQ_CODEL_FLOWS]) {
++ if (q->flows)
++ return -EINVAL;
++ q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
++ if (!q->flows_cnt ||
++ q->flows_cnt > 65536)
++ return -EINVAL;
++ }
++ sch_tree_lock(sch);
++
++ if (tb[TCA_FQ_CODEL_TARGET]) {
++ u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
++
++ q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
++ }
++
++ if (tb[TCA_FQ_CODEL_INTERVAL]) {
++ u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
++
++ q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
++ }
++
++ if (tb[TCA_FQ_CODEL_LIMIT])
++ sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
++
++ if (tb[TCA_FQ_CODEL_ECN])
++ q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
++
++ if (tb[TCA_FQ_CODEL_QUANTUM])
++ q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
++
++ while (sch->q.qlen > sch->limit) {
++ struct sk_buff *skb = fq_codel_dequeue(sch);
++
++ kfree_skb(skb);
++ q->cstats.drop_count++;
++ }
++ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
++ q->cstats.drop_count = 0;
++
++ sch_tree_unlock(sch);
++ return 0;
++}
++
++static void *fq_codel_zalloc(size_t sz)
++{
++ void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
++
++ if (!ptr)
++ ptr = vzalloc(sz);
++ return ptr;
++}
++
++static void fq_codel_free(void *addr)
++{
++ if (addr) {
++ if (is_vmalloc_addr(addr))
++ vfree(addr);
++ else
++ kfree(addr);
++ }
++}
++
++static void fq_codel_destroy(struct Qdisc *sch)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++
++ tcf_destroy_chain(&q->filter_list);
++ fq_codel_free(q->backlogs);
++ fq_codel_free(q->flows);
++}
++
++static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ int i;
++
++ sch->limit = 10*1024;
++ q->flows_cnt = 1024;
++ q->quantum = psched_mtu(qdisc_dev(sch));
++ q->perturbation = net_random();
++ INIT_LIST_HEAD(&q->new_flows);
++ INIT_LIST_HEAD(&q->old_flows);
++ codel_params_init(&q->cparams);
++ codel_stats_init(&q->cstats);
++ q->cparams.ecn = true;
++
++ if (opt) {
++ int err = fq_codel_change(sch, opt);
++ if (err)
++ return err;
++ }
++
++ if (!q->flows) {
++ q->flows = fq_codel_zalloc(q->flows_cnt *
++ sizeof(struct fq_codel_flow));
++ if (!q->flows)
++ return -ENOMEM;
++ q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
++ if (!q->backlogs) {
++ fq_codel_free(q->flows);
++ return -ENOMEM;
++ }
++ for (i = 0; i < q->flows_cnt; i++) {
++ struct fq_codel_flow *flow = q->flows + i;
++
++ INIT_LIST_HEAD(&flow->flowchain);
++ }
++ }
++ if (sch->limit >= 1)
++ sch->flags |= TCQ_F_CAN_BYPASS;
++ else
++ sch->flags &= ~TCQ_F_CAN_BYPASS;
++ return 0;
++}
++
++static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ struct nlattr *opts;
++
++ opts = nla_nest_start(skb, TCA_OPTIONS);
++ if (opts == NULL)
++ goto nla_put_failure;
++
++ if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
++ codel_time_to_us(q->cparams.target)) ||
++ nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
++ sch->limit) ||
++ nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
++ codel_time_to_us(q->cparams.interval)) ||
++ nla_put_u32(skb, TCA_FQ_CODEL_ECN,
++ q->cparams.ecn) ||
++ nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
++ q->quantum) ||
++ nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
++ q->flows_cnt))
++ goto nla_put_failure;
++
++ nla_nest_end(skb, opts);
++ return skb->len;
++
++nla_put_failure:
++ return -1;
++}
++
++static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ struct tc_fq_codel_xstats st = {
++ .type = TCA_FQ_CODEL_XSTATS_QDISC,
++ .qdisc_stats.maxpacket = q->cstats.maxpacket,
++ .qdisc_stats.drop_overlimit = q->drop_overlimit,
++ .qdisc_stats.ecn_mark = q->cstats.ecn_mark,
++ .qdisc_stats.new_flow_count = q->new_flow_count,
++ };
++ struct list_head *pos;
++
++ list_for_each(pos, &q->new_flows)
++ st.qdisc_stats.new_flows_len++;
++
++ list_for_each(pos, &q->old_flows)
++ st.qdisc_stats.old_flows_len++;
++
++ return gnet_stats_copy_app(d, &st, sizeof(st));
++}
++
++static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
++{
++ return NULL;
++}
++
++static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
++{
++ return 0;
++}
++
++static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
++ u32 classid)
++{
++ /* we cannot bypass queue discipline anymore */
++ sch->flags &= ~TCQ_F_CAN_BYPASS;
++ return 0;
++}
++
++static void fq_codel_put(struct Qdisc *q, unsigned long cl)
++{
++}
++
++static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++
++ if (cl)
++ return NULL;
++ return &q->filter_list;
++}
++
++static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
++ struct sk_buff *skb, struct tcmsg *tcm)
++{
++ tcm->tcm_handle |= TC_H_MIN(cl);
++ return 0;
++}
++
++static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
++ struct gnet_dump *d)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ u32 idx = cl - 1;
++ struct gnet_stats_queue qs = { 0 };
++ struct tc_fq_codel_xstats xstats;
++
++ if (idx < q->flows_cnt) {
++ const struct fq_codel_flow *flow = &q->flows[idx];
++ const struct sk_buff *skb = flow->head;
++
++ memset(&xstats, 0, sizeof(xstats));
++ xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
++ xstats.class_stats.deficit = flow->deficit;
++ xstats.class_stats.ldelay =
++ codel_time_to_us(flow->cvars.ldelay);
++ xstats.class_stats.count = flow->cvars.count;
++ xstats.class_stats.lastcount = flow->cvars.lastcount;
++ xstats.class_stats.dropping = flow->cvars.dropping;
++ if (flow->cvars.dropping) {
++ codel_tdiff_t delta = flow->cvars.drop_next -
++ codel_get_time();
++
++ xstats.class_stats.drop_next = (delta >= 0) ?
++ codel_time_to_us(delta) :
++ -codel_time_to_us(-delta);
++ }
++ while (skb) {
++ qs.qlen++;
++ skb = skb->next;
++ }
++ qs.backlog = q->backlogs[idx];
++ qs.drops = flow->dropped;
++ }
++ if (gnet_stats_copy_queue(d, &qs) < 0)
++ return -1;
++ if (idx < q->flows_cnt)
++ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
++ return 0;
++}
++
++static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
++{
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
++ unsigned int i;
++
++ if (arg->stop)
++ return;
++
++ for (i = 0; i < q->flows_cnt; i++) {
++ if (list_empty(&q->flows[i].flowchain) ||
++ arg->count < arg->skip) {
++ arg->count++;
++ continue;
++ }
++ if (arg->fn(sch, i + 1, arg) < 0) {
++ arg->stop = 1;
++ break;
++ }
++ arg->count++;
++ }
++}
++
++static const struct Qdisc_class_ops fq_codel_class_ops = {
++ .leaf = fq_codel_leaf,
++ .get = fq_codel_get,
++ .put = fq_codel_put,
++ .tcf_chain = fq_codel_find_tcf,
++ .bind_tcf = fq_codel_bind,
++ .unbind_tcf = fq_codel_put,
++ .dump = fq_codel_dump_class,
++ .dump_stats = fq_codel_dump_class_stats,
++ .walk = fq_codel_walk,
++};
++
++static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
++ .cl_ops = &fq_codel_class_ops,
++ .id = "fq_codel",
++ .priv_size = sizeof(struct fq_codel_sched_data),
++ .enqueue = fq_codel_enqueue,
++ .dequeue = fq_codel_dequeue,
++ .peek = qdisc_peek_dequeued,
++ .drop = fq_codel_drop,
++ .init = fq_codel_init,
++ .reset = fq_codel_reset,
++ .destroy = fq_codel_destroy,
++ .change = fq_codel_change,
++ .dump = fq_codel_dump,
++ .dump_stats = fq_codel_dump_stats,
++ .owner = THIS_MODULE,
++};
++
++static int __init fq_codel_module_init(void)
++{
++ return register_qdisc(&fq_codel_qdisc_ops);
++}
++
++static void __exit fq_codel_module_exit(void)
++{
++ unregister_qdisc(&fq_codel_qdisc_ops);
++}
++
++module_init(fq_codel_module_init)
++module_exit(fq_codel_module_exit)
++MODULE_AUTHOR("Eric Dumazet");
++MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/patches-3.3/043-net-codel-Add-missing-include-linux-prefetch.h.patch b/target/linux/generic/patches-3.3/043-net-codel-Add-missing-include-linux-prefetch.h.patch
new file mode 100644
index 000000000..3510ee041
--- /dev/null
+++ b/target/linux/generic/patches-3.3/043-net-codel-Add-missing-include-linux-prefetch.h.patch
@@ -0,0 +1,33 @@
+From 18c12e496ead3306de00a82d0bc73d71b34d7c24 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Mon, 14 May 2012 09:47:05 +0000
+Subject: [PATCH] net/codel: Add missing #include <linux/prefetch.h>
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit ce5b4b977127ee20c3f9c3fd3637cd3796f649f5 upstream.
+
+m68k allmodconfig:
+
+net/sched/sch_codel.c: In function ‘dequeue’:
+net/sched/sch_codel.c:70: error: implicit declaration of function ‘prefetch’
+make[1]: *** [net/sched/sch_codel.o] Error 1
+
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/sched/sch_codel.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -46,6 +46,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/skbuff.h>
++#include <linux/prefetch.h>
+ #include <net/pkt_sched.h>
+ #include <net/codel.h>
+
diff --git a/target/linux/generic/patches-3.3/044-net-codel-fix-build-errors.patch b/target/linux/generic/patches-3.3/044-net-codel-fix-build-errors.patch
new file mode 100644
index 000000000..219285d78
--- /dev/null
+++ b/target/linux/generic/patches-3.3/044-net-codel-fix-build-errors.patch
@@ -0,0 +1,51 @@
+From b49ab5f6bb7e609190065cb9a605de809e50ab60 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <levinsasha928@gmail.com>
+Date: Mon, 14 May 2012 11:57:06 +0000
+Subject: [PATCH] net: codel: fix build errors
+
+commit 669d67bf777def468970f2dcba1537edf3b2d329 upstream.
+
+Fix the following build error:
+
+net/sched/sch_fq_codel.c: In function 'fq_codel_dump_stats':
+net/sched/sch_fq_codel.c:464:3: error: unknown field 'qdisc_stats' specified in initializer
+net/sched/sch_fq_codel.c:464:3: warning: missing braces around initializer
+net/sched/sch_fq_codel.c:464:3: warning: (near initialization for 'st.<anonymous>')
+net/sched/sch_fq_codel.c:465:3: error: unknown field 'qdisc_stats' specified in initializer
+net/sched/sch_fq_codel.c:465:3: warning: excess elements in struct initializer
+net/sched/sch_fq_codel.c:465:3: warning: (near initialization for 'st')
+net/sched/sch_fq_codel.c:466:3: error: unknown field 'qdisc_stats' specified in initializer
+net/sched/sch_fq_codel.c:466:3: warning: excess elements in struct initializer
+net/sched/sch_fq_codel.c:466:3: warning: (near initialization for 'st')
+net/sched/sch_fq_codel.c:467:3: error: unknown field 'qdisc_stats' specified in initializer
+net/sched/sch_fq_codel.c:467:3: warning: excess elements in struct initializer
+net/sched/sch_fq_codel.c:467:3: warning: (near initialization for 'st')
+make[1]: *** [net/sched/sch_fq_codel.o] Error 1
+
+Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/sched/sch_fq_codel.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -461,13 +461,14 @@ static int fq_codel_dump_stats(struct Qd
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct tc_fq_codel_xstats st = {
+ .type = TCA_FQ_CODEL_XSTATS_QDISC,
+- .qdisc_stats.maxpacket = q->cstats.maxpacket,
+- .qdisc_stats.drop_overlimit = q->drop_overlimit,
+- .qdisc_stats.ecn_mark = q->cstats.ecn_mark,
+- .qdisc_stats.new_flow_count = q->new_flow_count,
+ };
+ struct list_head *pos;
+
++ st.qdisc_stats.maxpacket = q->cstats.maxpacket;
++ st.qdisc_stats.drop_overlimit = q->drop_overlimit;
++ st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
++ st.qdisc_stats.new_flow_count = q->new_flow_count;
++
+ list_for_each(pos, &q->new_flows)
+ st.qdisc_stats.new_flows_len++;
+
diff --git a/target/linux/generic/patches-3.3/045-codel-use-u16-field-instead-of-31bits-for-rec_inv_sq.patch b/target/linux/generic/patches-3.3/045-codel-use-u16-field-instead-of-31bits-for-rec_inv_sq.patch
new file mode 100644
index 000000000..933a3dcda
--- /dev/null
+++ b/target/linux/generic/patches-3.3/045-codel-use-u16-field-instead-of-31bits-for-rec_inv_sq.patch
@@ -0,0 +1,86 @@
+From 03333931c17d9c62ba4063d4e4fec1578c0729a7 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Sat, 12 May 2012 21:23:23 +0000
+Subject: [PATCH] codel: use u16 field instead of 31bits for rec_inv_sqrt
+
+commit 6ff272c9ad65eda219cd975b9da2dbc31cc812ee upstream.
+
+David pointed out gcc might generate poor code with 31bit fields.
+
+Using u16 is more than enough and permits a better code output.
+
+Also make the code intent more readable using constants, fixed point arithmetic
+not being trivial for everybody.
+
+Suggested-by: David Miller <davem@davemloft.net>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/net/codel.h | 25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+--- a/include/net/codel.h
++++ b/include/net/codel.h
+@@ -133,13 +133,17 @@ struct codel_params {
+ struct codel_vars {
+ u32 count;
+ u32 lastcount;
+- bool dropping:1;
+- u32 rec_inv_sqrt:31;
++ bool dropping;
++ u16 rec_inv_sqrt;
+ codel_time_t first_above_time;
+ codel_time_t drop_next;
+ codel_time_t ldelay;
+ };
+
++#define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */
++/* needed shift to get a Q0.32 number from rec_inv_sqrt */
++#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS)
++
+ /**
+ * struct codel_stats - contains codel shared variables and stats
+ * @maxpacket: largest packet we've seen so far
+@@ -173,17 +177,18 @@ static void codel_stats_init(struct code
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+- * Here, invsqrt is a fixed point number (< 1.0), 31bit mantissa)
++ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+ static void codel_Newton_step(struct codel_vars *vars)
+ {
+- u32 invsqrt = vars->rec_inv_sqrt;
+- u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 31;
+- u64 val = (3LL << 31) - ((u64)vars->count * invsqrt2);
++ u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
++ u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
++ u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+- val = (val * invsqrt) >> 32;
++ val >>= 2; /* avoid overflow in following multiply */
++ val = (val * invsqrt) >> (32 - 2 + 1);
+
+- vars->rec_inv_sqrt = val;
++ vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
+ }
+
+ /*
+@@ -195,7 +200,7 @@ static codel_time_t codel_control_law(co
+ codel_time_t interval,
+ u32 rec_inv_sqrt)
+ {
+- return t + reciprocal_divide(interval, rec_inv_sqrt << 1);
++ return t + reciprocal_divide(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
+ }
+
+
+@@ -326,7 +331,7 @@ static struct sk_buff *codel_dequeue(str
+ codel_Newton_step(vars);
+ } else {
+ vars->count = 1;
+- vars->rec_inv_sqrt = 0x7fffffff;
++ vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
diff --git a/target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch b/target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch
new file mode 100644
index 000000000..f9bcbbd08
--- /dev/null
+++ b/target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch
@@ -0,0 +1,132 @@
+From 7bd90773f89001ea4960ed47676b550137f3facb Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 16 May 2012 04:39:09 +0000
+Subject: [PATCH] fq_codel: should use qdisc backlog as threshold
+
+commit 865ec5523dadbedefbc5710a68969f686a28d928 upstream.
+
+codel_should_drop() logic allows a packet being not dropped if queue
+size is under max packet size.
+
+In fq_codel, we have two possible backlogs : The qdisc global one, and
+the flow local one.
+
+The meaningful one for codel_should_drop() should be the global backlog,
+not the per flow one, so that thin flows can have a non zero drop/mark
+probability.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Dave Taht <dave.taht@bufferbloat.net>
+Cc: Kathleen Nichols <nichols@pollere.com>
+Cc: Van Jacobson <van@pollere.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/net/codel.h | 15 +++++++--------
+ net/sched/sch_codel.c | 4 ++--
+ net/sched/sch_fq_codel.c | 5 +++--
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+--- a/include/net/codel.h
++++ b/include/net/codel.h
+@@ -205,7 +205,7 @@ static codel_time_t codel_control_law(co
+
+
+ static bool codel_should_drop(const struct sk_buff *skb,
+- unsigned int *backlog,
++ struct Qdisc *sch,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+@@ -219,13 +219,13 @@ static bool codel_should_drop(const stru
+ }
+
+ vars->ldelay = now - codel_get_enqueue_time(skb);
+- *backlog -= qdisc_pkt_len(skb);
++ sch->qstats.backlog -= qdisc_pkt_len(skb);
+
+ if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
+ stats->maxpacket = qdisc_pkt_len(skb);
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+- *backlog <= stats->maxpacket) {
++ sch->qstats.backlog <= stats->maxpacket) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+@@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(str
+ struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+- codel_skb_dequeue_t dequeue_func,
+- u32 *backlog)
++ codel_skb_dequeue_t dequeue_func)
+ {
+ struct sk_buff *skb = dequeue_func(vars, sch);
+ codel_time_t now;
+@@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(str
+ return skb;
+ }
+ now = codel_get_time();
+- drop = codel_should_drop(skb, backlog, vars, params, stats, now);
++ drop = codel_should_drop(skb, sch, vars, params, stats, now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+@@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(str
+ qdisc_drop(skb, sch);
+ stats->drop_count++;
+ skb = dequeue_func(vars, sch);
+- if (!codel_should_drop(skb, backlog,
++ if (!codel_should_drop(skb, sch,
+ vars, params, stats, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+@@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(str
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, sch);
+- drop = codel_should_drop(skb, backlog, vars, params,
++ drop = codel_should_drop(skb, sch, vars, params,
+ stats, now);
+ }
+ vars->dropping = true;
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_deque
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+- skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
+- dequeue, &sch->qstats.backlog);
++ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
++
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_bu
+ */
+ static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+ {
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct fq_codel_flow *flow;
+ struct sk_buff *skb = NULL;
+
+ flow = container_of(vars, struct fq_codel_flow, cvars);
+ if (flow->head) {
+ skb = dequeue_head(flow);
+- sch->qstats.backlog -= qdisc_pkt_len(skb);
++ q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
+ sch->q.qlen--;
+ }
+ return skb;
+@@ -256,7 +257,7 @@ begin:
+ prev_ecn_mark = q->cstats.ecn_mark;
+
+ skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+- dequeue, &q->backlogs[flow - q->flows]);
++ dequeue);
+
+ flow->dropped += q->cstats.drop_count - prev_drop_count;
+ flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
diff --git a/target/linux/generic/patches-3.3/047-spi_message_queue.patch b/target/linux/generic/patches-3.3/047-spi_message_queue.patch
new file mode 100644
index 000000000..9aff0be2a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/047-spi_message_queue.patch
@@ -0,0 +1,603 @@
+commit ffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0
+Author: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed Feb 22 10:05:38 2012 +0100
+
+ spi: create a message queueing infrastructure
+
+ This rips the message queue in the PL022 driver out and pushes
+ it into (optional) common infrastructure. Drivers that want to
+ use the message pumping thread will need to define the new
+ per-messags transfer methods and leave the deprecated transfer()
+ method as NULL.
+
+ Most of the design is described in the documentation changes that
+ are included in this patch.
+
+ Since there is a queue that need to be stopped when the system
+ is suspending/resuming, two new calls are implemented for the
+ device drivers to call in their suspend()/resume() functions:
+ spi_master_suspend() and spi_master_resume().
+
+ ChangeLog v1->v2:
+ - Remove Kconfig entry and do not make the queue support optional
+ at all, instead be more agressive and have it as part of the
+ compulsory infrastructure.
+ - If the .transfer() method is implemented, delete print a small
+ deprecation notice and do not start the transfer pump.
+ - Fix a bitrotted comment.
+ ChangeLog v2->v3:
+ - Fix up a problematic sequence courtesy of Chris Blair.
+ - Stop rather than destroy the queue on suspend() courtesy of
+ Chris Blair.
+
+ Signed-off-by: Chris Blair <chris.blair@stericsson.com>
+ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+ Tested-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
+
+[Florian: dropped the changes on drivers/spi/spi-pl022.c, removed
+the dev_info() about unqueued drivers still using the master function]
+
+--- a/Documentation/spi/spi-summary
++++ b/Documentation/spi/spi-summary
+@@ -1,7 +1,7 @@
+ Overview of Linux kernel SPI support
+ ====================================
+
+-21-May-2007
++02-Feb-2012
+
+ What is SPI?
+ ------------
+@@ -483,9 +483,9 @@ also initialize its own internal state.
+ and those methods.)
+
+ After you initialize the spi_master, then use spi_register_master() to
+-publish it to the rest of the system. At that time, device nodes for
+-the controller and any predeclared spi devices will be made available,
+-and the driver model core will take care of binding them to drivers.
++publish it to the rest of the system. At that time, device nodes for the
++controller and any predeclared spi devices will be made available, and
++the driver model core will take care of binding them to drivers.
+
+ If you need to remove your SPI controller driver, spi_unregister_master()
+ will reverse the effect of spi_register_master().
+@@ -521,21 +521,53 @@ SPI MASTER METHODS
+ ** When you code setup(), ASSUME that the controller
+ ** is actively processing transfers for another device.
+
+- master->transfer(struct spi_device *spi, struct spi_message *message)
+- This must not sleep. Its responsibility is arrange that the
+- transfer happens and its complete() callback is issued. The two
+- will normally happen later, after other transfers complete, and
+- if the controller is idle it will need to be kickstarted.
+-
+ master->cleanup(struct spi_device *spi)
+ Your controller driver may use spi_device.controller_state to hold
+ state it dynamically associates with that device. If you do that,
+ be sure to provide the cleanup() method to free that state.
+
++ master->prepare_transfer_hardware(struct spi_master *master)
++ This will be called by the queue mechanism to signal to the driver
++ that a message is coming in soon, so the subsystem requests the
++ driver to prepare the transfer hardware by issuing this call.
++ This may sleep.
++
++ master->unprepare_transfer_hardware(struct spi_master *master)
++ This will be called by the queue mechanism to signal to the driver
++ that there are no more messages pending in the queue and it may
++ relax the hardware (e.g. by power management calls). This may sleep.
++
++ master->transfer_one_message(struct spi_master *master,
++ struct spi_message *mesg)
++ The subsystem calls the driver to transfer a single message while
++ queuing transfers that arrive in the meantime. When the driver is
++ finished with this message, it must call
++ spi_finalize_current_message() so the subsystem can issue the next
++ transfer. This may sleep.
++
++ DEPRECATED METHODS
++
++ master->transfer(struct spi_device *spi, struct spi_message *message)
++ This must not sleep. Its responsibility is arrange that the
++ transfer happens and its complete() callback is issued. The two
++ will normally happen later, after other transfers complete, and
++ if the controller is idle it will need to be kickstarted. This
++ method is not used on queued controllers and must be NULL if
++ transfer_one_message() and (un)prepare_transfer_hardware() are
++ implemented.
++
+
+ SPI MESSAGE QUEUE
+
+-The bulk of the driver will be managing the I/O queue fed by transfer().
++If you are happy with the standard queueing mechanism provided by the
++SPI subsystem, just implement the queued methods specified above. Using
++the message queue has the upside of centralizing a lot of code and
++providing pure process-context execution of methods. The message queue
++can also be elevated to realtime priority on high-priority SPI traffic.
++
++Unless the queueing mechanism in the SPI subsystem is selected, the bulk
++of the driver will be managing the I/O queue fed by the now deprecated
++function transfer().
+
+ That queue could be purely conceptual. For example, a driver used only
+ for low-frequency sensor access might be fine using synchronous PIO.
+@@ -561,4 +593,6 @@ Stephen Street
+ Mark Underwood
+ Andrew Victor
+ Vitaly Wool
+-
++Grant Likely
++Mark Brown
++Linus Walleij
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -30,6 +30,9 @@
+ #include <linux/of_spi.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/export.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/kthread.h>
+
+ static void spidev_release(struct device *dev)
+ {
+@@ -507,6 +510,293 @@ spi_register_board_info(struct spi_board
+
+ /*-------------------------------------------------------------------------*/
+
++/**
++ * spi_pump_messages - kthread work function which processes spi message queue
++ * @work: pointer to kthread work struct contained in the master struct
++ *
++ * This function checks if there is any spi message in the queue that
++ * needs processing and if so call out to the driver to initialize hardware
++ * and transfer each message.
++ *
++ */
++static void spi_pump_messages(struct kthread_work *work)
++{
++ struct spi_master *master =
++ container_of(work, struct spi_master, pump_messages);
++ unsigned long flags;
++ bool was_busy = false;
++ int ret;
++
++ /* Lock queue and check for queue work */
++ spin_lock_irqsave(&master->queue_lock, flags);
++ if (list_empty(&master->queue) || !master->running) {
++ if (master->busy) {
++ ret = master->unprepare_transfer_hardware(master);
++ if (ret) {
++ dev_err(&master->dev,
++ "failed to unprepare transfer hardware\n");
++ return;
++ }
++ }
++ master->busy = false;
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ return;
++ }
++
++ /* Make sure we are not already running a message */
++ if (master->cur_msg) {
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ return;
++ }
++ /* Extract head of queue */
++ master->cur_msg =
++ list_entry(master->queue.next, struct spi_message, queue);
++
++ list_del_init(&master->cur_msg->queue);
++ if (master->busy)
++ was_busy = true;
++ else
++ master->busy = true;
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++
++ if (!was_busy) {
++ ret = master->prepare_transfer_hardware(master);
++ if (ret) {
++ dev_err(&master->dev,
++ "failed to prepare transfer hardware\n");
++ return;
++ }
++ }
++
++ ret = master->transfer_one_message(master, master->cur_msg);
++ if (ret) {
++ dev_err(&master->dev,
++ "failed to transfer one message from queue\n");
++ return;
++ }
++}
++
++static int spi_init_queue(struct spi_master *master)
++{
++ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
++
++ INIT_LIST_HEAD(&master->queue);
++ spin_lock_init(&master->queue_lock);
++
++ master->running = false;
++ master->busy = false;
++
++ init_kthread_worker(&master->kworker);
++ master->kworker_task = kthread_run(kthread_worker_fn,
++ &master->kworker,
++ dev_name(&master->dev));
++ if (IS_ERR(master->kworker_task)) {
++ dev_err(&master->dev, "failed to create message pump task\n");
++ return -ENOMEM;
++ }
++ init_kthread_work(&master->pump_messages, spi_pump_messages);
++
++ /*
++ * Master config will indicate if this controller should run the
++ * message pump with high (realtime) priority to reduce the transfer
++ * latency on the bus by minimising the delay between a transfer
++ * request and the scheduling of the message pump thread. Without this
++ * setting the message pump thread will remain at default priority.
++ */
++ if (master->rt) {
++ dev_info(&master->dev,
++ "will run message pump with realtime priority\n");
++ sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
++ }
++
++ return 0;
++}
++
++/**
++ * spi_get_next_queued_message() - called by driver to check for queued
++ * messages
++ * @master: the master to check for queued messages
++ *
++ * If there are more messages in the queue, the next message is returned from
++ * this call.
++ */
++struct spi_message *spi_get_next_queued_message(struct spi_master *master)
++{
++ struct spi_message *next;
++ unsigned long flags;
++
++ /* get a pointer to the next message, if any */
++ spin_lock_irqsave(&master->queue_lock, flags);
++ if (list_empty(&master->queue))
++ next = NULL;
++ else
++ next = list_entry(master->queue.next,
++ struct spi_message, queue);
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++
++ return next;
++}
++EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
++
++/**
++ * spi_finalize_current_message() - the current message is complete
++ * @master: the master to return the message to
++ *
++ * Called by the driver to notify the core that the message in the front of the
++ * queue is complete and can be removed from the queue.
++ */
++void spi_finalize_current_message(struct spi_master *master)
++{
++ struct spi_message *mesg;
++ unsigned long flags;
++
++ spin_lock_irqsave(&master->queue_lock, flags);
++ mesg = master->cur_msg;
++ master->cur_msg = NULL;
++
++ queue_kthread_work(&master->kworker, &master->pump_messages);
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++
++ mesg->state = NULL;
++ if (mesg->complete)
++ mesg->complete(mesg->context);
++}
++EXPORT_SYMBOL_GPL(spi_finalize_current_message);
++
++static int spi_start_queue(struct spi_master *master)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&master->queue_lock, flags);
++
++ if (master->running || master->busy) {
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ return -EBUSY;
++ }
++
++ master->running = true;
++ master->cur_msg = NULL;
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++
++ queue_kthread_work(&master->kworker, &master->pump_messages);
++
++ return 0;
++}
++
++static int spi_stop_queue(struct spi_master *master)
++{
++ unsigned long flags;
++ unsigned limit = 500;
++ int ret = 0;
++
++ spin_lock_irqsave(&master->queue_lock, flags);
++
++ /*
++ * This is a bit lame, but is optimized for the common execution path.
++ * A wait_queue on the master->busy could be used, but then the common
++ * execution path (pump_messages) would be required to call wake_up or
++ * friends on every SPI message. Do this instead.
++ */
++ while ((!list_empty(&master->queue) || master->busy) && limit--) {
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ msleep(10);
++ spin_lock_irqsave(&master->queue_lock, flags);
++ }
++
++ if (!list_empty(&master->queue) || master->busy)
++ ret = -EBUSY;
++ else
++ master->running = false;
++
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++
++ if (ret) {
++ dev_warn(&master->dev,
++ "could not stop message queue\n");
++ return ret;
++ }
++ return ret;
++}
++
++static int spi_destroy_queue(struct spi_master *master)
++{
++ int ret;
++
++ ret = spi_stop_queue(master);
++
++ /*
++ * flush_kthread_worker will block until all work is done.
++ * If the reason that stop_queue timed out is that the work will never
++ * finish, then it does no good to call flush/stop thread, so
++ * return anyway.
++ */
++ if (ret) {
++ dev_err(&master->dev, "problem destroying queue\n");
++ return ret;
++ }
++
++ flush_kthread_worker(&master->kworker);
++ kthread_stop(master->kworker_task);
++
++ return 0;
++}
++
++/**
++ * spi_queued_transfer - transfer function for queued transfers
++ * @spi: spi device which is requesting transfer
++ * @msg: spi message which is to handled is queued to driver queue
++ */
++static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
++{
++ struct spi_master *master = spi->master;
++ unsigned long flags;
++
++ spin_lock_irqsave(&master->queue_lock, flags);
++
++ if (!master->running) {
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ return -ESHUTDOWN;
++ }
++ msg->actual_length = 0;
++ msg->status = -EINPROGRESS;
++
++ list_add_tail(&msg->queue, &master->queue);
++ if (master->running && !master->busy)
++ queue_kthread_work(&master->kworker, &master->pump_messages);
++
++ spin_unlock_irqrestore(&master->queue_lock, flags);
++ return 0;
++}
++
++static int spi_master_initialize_queue(struct spi_master *master)
++{
++ int ret;
++
++ master->queued = true;
++ master->transfer = spi_queued_transfer;
++
++ /* Initialize and start queue */
++ ret = spi_init_queue(master);
++ if (ret) {
++ dev_err(&master->dev, "problem initializing queue\n");
++ goto err_init_queue;
++ }
++ ret = spi_start_queue(master);
++ if (ret) {
++ dev_err(&master->dev, "problem starting queue\n");
++ goto err_start_queue;
++ }
++
++ return 0;
++
++err_start_queue:
++err_init_queue:
++ spi_destroy_queue(master);
++ return ret;
++}
++
++/*-------------------------------------------------------------------------*/
++
+ static void spi_master_release(struct device *dev)
+ {
+ struct spi_master *master;
+@@ -522,6 +812,7 @@ static struct class spi_master_class = {
+ };
+
+
++
+ /**
+ * spi_alloc_master - allocate SPI master controller
+ * @dev: the controller, possibly using the platform_bus
+@@ -621,6 +912,15 @@ int spi_register_master(struct spi_maste
+ dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
+ dynamic ? " (dynamic)" : "");
+
++ /* If we're using a queued driver, start the queue */
++ if (!master->transfer) {
++ status = spi_master_initialize_queue(master);
++ if (status) {
++ device_unregister(&master->dev);
++ goto done;
++ }
++ }
++
+ mutex_lock(&board_lock);
+ list_add_tail(&master->list, &spi_master_list);
+ list_for_each_entry(bi, &board_list, list)
+@@ -636,7 +936,6 @@ done:
+ }
+ EXPORT_SYMBOL_GPL(spi_register_master);
+
+-
+ static int __unregister(struct device *dev, void *null)
+ {
+ spi_unregister_device(to_spi_device(dev));
+@@ -657,6 +956,11 @@ void spi_unregister_master(struct spi_ma
+ {
+ int dummy;
+
++ if (master->queued) {
++ if (spi_destroy_queue(master))
++ dev_err(&master->dev, "queue remove failed\n");
++ }
++
+ mutex_lock(&board_lock);
+ list_del(&master->list);
+ mutex_unlock(&board_lock);
+@@ -666,6 +970,37 @@ void spi_unregister_master(struct spi_ma
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_master);
+
++int spi_master_suspend(struct spi_master *master)
++{
++ int ret;
++
++ /* Basically no-ops for non-queued masters */
++ if (!master->queued)
++ return 0;
++
++ ret = spi_stop_queue(master);
++ if (ret)
++ dev_err(&master->dev, "queue stop failed\n");
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(spi_master_suspend);
++
++int spi_master_resume(struct spi_master *master)
++{
++ int ret;
++
++ if (!master->queued)
++ return 0;
++
++ ret = spi_start_queue(master);
++ if (ret)
++ dev_err(&master->dev, "queue restart failed\n");
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(spi_master_resume);
++
+ static int __spi_master_match(struct device *dev, void *data)
+ {
+ struct spi_master *m;
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -22,6 +22,7 @@
+ #include <linux/device.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/slab.h>
++#include <linux/kthread.h>
+
+ /*
+ * INTERFACES between SPI master-side drivers and SPI infrastructure.
+@@ -235,6 +236,27 @@ static inline void spi_unregister_driver
+ * the device whose settings are being modified.
+ * @transfer: adds a message to the controller's transfer queue.
+ * @cleanup: frees controller-specific state
++ * @queued: whether this master is providing an internal message queue
++ * @kworker: thread struct for message pump
++ * @kworker_task: pointer to task for message pump kworker thread
++ * @pump_messages: work struct for scheduling work to the message pump
++ * @queue_lock: spinlock to syncronise access to message queue
++ * @queue: message queue
++ * @cur_msg: the currently in-flight message
++ * @busy: message pump is busy
++ * @running: message pump is running
++ * @rt: whether this queue is set to run as a realtime task
++ * @prepare_transfer_hardware: a message will soon arrive from the queue
++ * so the subsystem requests the driver to prepare the transfer hardware
++ * by issuing this call
++ * @transfer_one_message: the subsystem calls the driver to transfer a single
++ * message while queuing transfers that arrive in the meantime. When the
++ * driver is finished with this message, it must call
++ * spi_finalize_current_message() so the subsystem can issue the next
++ * transfer
++ * @prepare_transfer_hardware: there are currently no more messages on the
++ * queue so the subsystem notifies the driver that it may relax the
++ * hardware by issuing this call
+ *
+ * Each SPI master controller can communicate with one or more @spi_device
+ * children. These make a small bus, sharing MOSI, MISO and SCK signals
+@@ -318,6 +340,28 @@ struct spi_master {
+
+ /* called on release() to free memory provided by spi_master */
+ void (*cleanup)(struct spi_device *spi);
++
++ /*
++ * These hooks are for drivers that want to use the generic
++ * master transfer queueing mechanism. If these are used, the
++ * transfer() function above must NOT be specified by the driver.
++ * Over time we expect SPI drivers to be phased over to this API.
++ */
++ bool queued;
++ struct kthread_worker kworker;
++ struct task_struct *kworker_task;
++ struct kthread_work pump_messages;
++ spinlock_t queue_lock;
++ struct list_head queue;
++ struct spi_message *cur_msg;
++ bool busy;
++ bool running;
++ bool rt;
++
++ int (*prepare_transfer_hardware)(struct spi_master *master);
++ int (*transfer_one_message)(struct spi_master *master,
++ struct spi_message *mesg);
++ int (*unprepare_transfer_hardware)(struct spi_master *master);
+ };
+
+ static inline void *spi_master_get_devdata(struct spi_master *master)
+@@ -343,6 +387,13 @@ static inline void spi_master_put(struct
+ put_device(&master->dev);
+ }
+
++/* PM calls that need to be issued by the driver */
++extern int spi_master_suspend(struct spi_master *master);
++extern int spi_master_resume(struct spi_master *master);
++
++/* Calls the driver make to interact with the message queue */
++extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
++extern void spi_finalize_current_message(struct spi_master *master);
+
+ /* the spi driver core manages memory for the spi_master classdev */
+ extern struct spi_master *
diff --git a/target/linux/generic/patches-3.3/048-spi-Dont-call-prepare-unprepare-transfer-if-not-popu.patch b/target/linux/generic/patches-3.3/048-spi-Dont-call-prepare-unprepare-transfer-if-not-popu.patch
new file mode 100644
index 000000000..deef32e45
--- /dev/null
+++ b/target/linux/generic/patches-3.3/048-spi-Dont-call-prepare-unprepare-transfer-if-not-popu.patch
@@ -0,0 +1,39 @@
+From 7dfd2bd70228d1f8d468d58cb3d12ecd618479ed Mon Sep 17 00:00:00 2001
+From: Shubhrajyoti D <shubhrajyoti@ti.com>
+Date: Thu, 10 May 2012 19:20:41 +0530
+Subject: [PATCH] spi: Dont call prepare/unprepare transfer if not populated
+
+Currently the prepare/unprepare transfer are called unconditionally.
+The assumption is that every driver using the spi core queue infrastructure
+has to populate the prepare and unprepare functions. This encourages
+drivers to populate empty functions to prevent crashing.
+This patch prevents the call to prepare/unprepare if not populated.
+
+Signed-off-by: Shubhrajyoti D <shubhrajyoti@ti.com>
+Acked-by: Linus Walleij <linus.walleij@linaro.org>
+[grant.likely: fix whitespace defect]
+Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
+---
+ drivers/spi/spi.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -530,7 +530,7 @@ static void spi_pump_messages(struct kth
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&master->queue_lock, flags);
+ if (list_empty(&master->queue) || !master->running) {
+- if (master->busy) {
++ if (master->busy && master->unprepare_transfer_hardware) {
+ ret = master->unprepare_transfer_hardware(master);
+ if (ret) {
+ dev_err(&master->dev,
+@@ -559,7 +559,7 @@ static void spi_pump_messages(struct kth
+ master->busy = true;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+- if (!was_busy) {
++ if (!was_busy && master->prepare_transfer_hardware) {
+ ret = master->prepare_transfer_hardware(master);
+ if (ret) {
+ dev_err(&master->dev,
diff --git a/target/linux/generic/patches-3.3/049-codel-refine-one-condition-to-avoid-a-nul-rec_inv_sqrt.patch b/target/linux/generic/patches-3.3/049-codel-refine-one-condition-to-avoid-a-nul-rec_inv_sqrt.patch
new file mode 100644
index 000000000..bfb6ae205
--- /dev/null
+++ b/target/linux/generic/patches-3.3/049-codel-refine-one-condition-to-avoid-a-nul-rec_inv_sqrt.patch
@@ -0,0 +1,52 @@
+From b8fc328668a74e1314a19266755a54abd875e5a6 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 29 Jul 2012 20:52:21 +0000
+Subject: [PATCH] codel: refine one condition to avoid a nul rec_inv_sqrt
+
+commit 2359a47671fc4fb0fe5e9945f76c2cb10792c0f8 upstream.
+
+One condition before codel_Newton_step() was not good if
+we never left the dropping state for a flow. As a result
+rec_inv_sqrt was 0, instead of the ~0 initial value.
+
+codel control law was then set to a very aggressive mode, dropping
+many packets before reaching 'target' and recovering from this problem.
+
+To keep codel_vars_init() as efficient as possible, refine
+the condition to make sure rec_inv_sqrt initial value is correct
+
+Many thanks to Anton Mich for discovering the issue and suggesting
+a fix.
+
+Reported-by: Anton Mich <lp2s1h@gmail.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/net/codel.h | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/include/net/codel.h
++++ b/include/net/codel.h
+@@ -305,6 +305,8 @@ static struct sk_buff *codel_dequeue(str
+ }
+ }
+ } else if (drop) {
++ u32 delta;
++
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+@@ -320,9 +322,11 @@ static struct sk_buff *codel_dequeue(str
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+- if (codel_time_before(now - vars->drop_next,
++ delta = vars->count - vars->lastcount;
++ if (delta > 1 &&
++ codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+- vars->count = (vars->count - vars->lastcount) | 1;
++ vars->count = delta;
+ /* we dont care if rec_inv_sqrt approximation
+ * is not very precise :
+ * Next Newton steps will correct it quadratically.
diff --git a/target/linux/generic/patches-3.3/050-rng_git_backport.patch b/target/linux/generic/patches-3.3/050-rng_git_backport.patch
new file mode 100644
index 000000000..825eb73a4
--- /dev/null
+++ b/target/linux/generic/patches-3.3/050-rng_git_backport.patch
@@ -0,0 +1,783 @@
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -125,21 +125,26 @@
+ * The current exported interfaces for gathering environmental noise
+ * from the devices are:
+ *
++ * void add_device_randomness(const void *buf, unsigned int size);
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+- * void add_interrupt_randomness(int irq);
++ * void add_interrupt_randomness(int irq, int irq_flags);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+- * add_interrupt_randomness() uses the inter-interrupt timing as random
+- * inputs to the entropy pool. Note that not all interrupts are good
+- * sources of randomness! For example, the timer interrupts is not a
+- * good choice, because the periodicity of the interrupts is too
+- * regular, and hence predictable to an attacker. Network Interface
+- * Controller interrupts are a better measure, since the timing of the
+- * NIC interrupts are more unpredictable.
++ * add_interrupt_randomness() uses the interrupt timing as random
++ * inputs to the entropy pool. Using the cycle counters and the irq source
++ * as inputs, it feeds the randomness roughly once a second.
++ *
++ * add_device_randomness() is for adding data to the random pool that
++ * is likely to differ between two devices (or possibly even per boot).
++ * This would be things like MAC addresses or serial numbers, or the
++ * read-out of the RTC. This does *not* add any actual entropy to the
++ * pool, but it initializes the pool to different values for devices
++ * that might otherwise be identical and have very little entropy
++ * available to them (particularly common in the embedded world).
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+@@ -248,6 +253,7 @@
+ #include <linux/percpu.h>
+ #include <linux/cryptohash.h>
+ #include <linux/fips.h>
++#include <linux/ptrace.h>
+
+ #ifdef CONFIG_GENERIC_HARDIRQS
+ # include <linux/irq.h>
+@@ -256,8 +262,12 @@
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
+ #include <asm/irq.h>
++#include <asm/irq_regs.h>
+ #include <asm/io.h>
+
++#define CREATE_TRACE_POINTS
++#include <trace/events/random.h>
++
+ /*
+ * Configuration information
+ */
+@@ -420,8 +430,10 @@ struct entropy_store {
+ /* read-write data: */
+ spinlock_t lock;
+ unsigned add_ptr;
++ unsigned input_rotate;
+ int entropy_count;
+- int input_rotate;
++ int entropy_total;
++ unsigned int initialized:1;
+ __u8 last_data[EXTRACT_SIZE];
+ };
+
+@@ -454,6 +466,10 @@ static struct entropy_store nonblocking_
+ .pool = nonblocking_pool_data
+ };
+
++static __u32 const twist_table[8] = {
++ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
++ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
++
+ /*
+ * This function adds bytes into the entropy "pool". It does not
+ * update the entropy estimate. The caller should call
+@@ -464,29 +480,24 @@ static struct entropy_store nonblocking_
+ * it's cheap to do so and helps slightly in the expected case where
+ * the entropy is concentrated in the low-order bits.
+ */
+-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+- int nbytes, __u8 out[64])
++static void _mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
+ {
+- static __u32 const twist_table[8] = {
+- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
+ int wordmask = r->poolinfo->poolwords - 1;
+ const char *bytes = in;
+ __u32 w;
+- unsigned long flags;
+
+- /* Taps are constant, so we can load them without holding r->lock. */
+ tap1 = r->poolinfo->tap1;
+ tap2 = r->poolinfo->tap2;
+ tap3 = r->poolinfo->tap3;
+ tap4 = r->poolinfo->tap4;
+ tap5 = r->poolinfo->tap5;
+
+- spin_lock_irqsave(&r->lock, flags);
+- input_rotate = r->input_rotate;
+- i = r->add_ptr;
++ smp_rmb();
++ input_rotate = ACCESS_ONCE(r->input_rotate);
++ i = ACCESS_ONCE(r->add_ptr);
+
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+@@ -513,19 +524,61 @@ static void mix_pool_bytes_extract(struc
+ input_rotate += i ? 7 : 14;
+ }
+
+- r->input_rotate = input_rotate;
+- r->add_ptr = i;
++ ACCESS_ONCE(r->input_rotate) = input_rotate;
++ ACCESS_ONCE(r->add_ptr) = i;
++ smp_wmb();
+
+ if (out)
+ for (j = 0; j < 16; j++)
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
++}
++
++static void __mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
++{
++ trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
++ _mix_pool_bytes(r, in, nbytes, out);
++}
+
++static void mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
++{
++ unsigned long flags;
++
++ trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
++ spin_lock_irqsave(&r->lock, flags);
++ _mix_pool_bytes(r, in, nbytes, out);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
++struct fast_pool {
++ __u32 pool[4];
++ unsigned long last;
++ unsigned short count;
++ unsigned char rotate;
++ unsigned char last_timer_intr;
++};
++
++/*
++ * This is a fast mixing routine used by the interrupt randomness
++ * collector. It's hardcoded for an 128 bit pool and assumes that any
++ * locks that might be needed are taken by the caller.
++ */
++static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
+ {
+- mix_pool_bytes_extract(r, in, bytes, NULL);
++ const char *bytes = in;
++ __u32 w;
++ unsigned i = f->count;
++ unsigned input_rotate = f->rotate;
++
++ while (nbytes--) {
++ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
++ f->pool[(i + 1) & 3];
++ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
++ input_rotate += (i++ & 3) ? 7 : 14;
++ }
++ f->count = i;
++ f->rotate = input_rotate;
+ }
+
+ /*
+@@ -533,30 +586,38 @@ static void mix_pool_bytes(struct entrop
+ */
+ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ {
+- unsigned long flags;
+- int entropy_count;
++ int entropy_count, orig;
+
+ if (!nbits)
+ return;
+
+- spin_lock_irqsave(&r->lock, flags);
+-
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+- entropy_count = r->entropy_count;
++retry:
++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count += nbits;
++
+ if (entropy_count < 0) {
+ DEBUG_ENT("negative entropy/overflow\n");
+ entropy_count = 0;
+ } else if (entropy_count > r->poolinfo->POOLBITS)
+ entropy_count = r->poolinfo->POOLBITS;
+- r->entropy_count = entropy_count;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++
++ if (!r->initialized && nbits > 0) {
++ r->entropy_total += nbits;
++ if (r->entropy_total > 128)
++ r->initialized = 1;
++ }
++
++ trace_credit_entropy_bits(r->name, nbits, entropy_count,
++ r->entropy_total, _RET_IP_);
+
+ /* should we wake readers? */
+ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ }
+- spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+ /*********************************************************************
+@@ -609,6 +670,25 @@ static void set_timer_rand_state(unsigne
+ }
+ #endif
+
++/*
++ * Add device- or boot-specific data to the input and nonblocking
++ * pools to help initialize them to unique values.
++ *
++ * None of this adds any entropy, it is meant to avoid the
++ * problem of the nonblocking pool having similar initial state
++ * across largely identical devices.
++ */
++void add_device_randomness(const void *buf, unsigned int size)
++{
++ unsigned long time = get_cycles() ^ jiffies;
++
++ mix_pool_bytes(&input_pool, buf, size, NULL);
++ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
++ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
++ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
++}
++EXPORT_SYMBOL(add_device_randomness);
++
+ static struct timer_rand_state input_timer_state;
+
+ /*
+@@ -637,13 +717,9 @@ static void add_timer_randomness(struct
+ goto out;
+
+ sample.jiffies = jiffies;
+-
+- /* Use arch random value, fall back to cycles */
+- if (!arch_get_random_int(&sample.cycles))
+- sample.cycles = get_cycles();
+-
++ sample.cycles = get_cycles();
+ sample.num = num;
+- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
++ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+
+ /*
+ * Calculate number of bits of randomness we probably added.
+@@ -700,17 +776,48 @@ void add_input_randomness(unsigned int t
+ }
+ EXPORT_SYMBOL_GPL(add_input_randomness);
+
+-void add_interrupt_randomness(int irq)
++static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
++
++void add_interrupt_randomness(int irq, int irq_flags)
+ {
+- struct timer_rand_state *state;
++ struct entropy_store *r;
++ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
++ struct pt_regs *regs = get_irq_regs();
++ unsigned long now = jiffies;
++ __u32 input[4], cycles = get_cycles();
++
++ input[0] = cycles ^ jiffies;
++ input[1] = irq;
++ if (regs) {
++ __u64 ip = instruction_pointer(regs);
++ input[2] = ip;
++ input[3] = ip >> 32;
++ }
+
+- state = get_timer_rand_state(irq);
++ fast_mix(fast_pool, input, sizeof(input));
+
+- if (state == NULL)
++ if ((fast_pool->count & 1023) &&
++ !time_after(now, fast_pool->last + HZ))
+ return;
+
+- DEBUG_ENT("irq event %d\n", irq);
+- add_timer_randomness(state, 0x100 + irq);
++ fast_pool->last = now;
++
++ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
++ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
++ /*
++ * If we don't have a valid cycle counter, and we see
++ * back-to-back timer interrupts, then skip giving credit for
++ * any entropy.
++ */
++ if (cycles == 0) {
++ if (irq_flags & __IRQF_TIMER) {
++ if (fast_pool->last_timer_intr)
++ return;
++ fast_pool->last_timer_intr = 1;
++ } else
++ fast_pool->last_timer_intr = 0;
++ }
++ credit_entropy_bits(r, 1);
+ }
+
+ #ifdef CONFIG_BLOCK
+@@ -742,7 +849,11 @@ static ssize_t extract_entropy(struct en
+ */
+ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+ {
+- __u32 tmp[OUTPUT_POOL_WORDS];
++ union {
++ __u32 tmp[OUTPUT_POOL_WORDS];
++ long hwrand[4];
++ } u;
++ int i;
+
+ if (r->pull && r->entropy_count < nbytes * 8 &&
+ r->entropy_count < r->poolinfo->POOLBITS) {
+@@ -753,17 +864,22 @@ static void xfer_secondary_pool(struct e
+ /* pull at least as many as BYTES as wakeup BITS */
+ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+ /* but never more than the buffer size */
+- bytes = min_t(int, bytes, sizeof(tmp));
++ bytes = min_t(int, bytes, sizeof(u.tmp));
+
+ DEBUG_ENT("going to reseed %s with %d bits "
+ "(%d of %d requested)\n",
+ r->name, bytes * 8, nbytes * 8, r->entropy_count);
+
+- bytes = extract_entropy(r->pull, tmp, bytes,
++ bytes = extract_entropy(r->pull, u.tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+- mix_pool_bytes(r, tmp, bytes);
++ mix_pool_bytes(r, u.tmp, bytes, NULL);
+ credit_entropy_bits(r, bytes*8);
+ }
++ for (i = 0; i < 4; i++)
++ if (arch_get_random_long(&u.hwrand[i]))
++ break;
++ if (i)
++ mix_pool_bytes(r, &u.hwrand, i * sizeof(u.hwrand[0]), 0);
+ }
+
+ /*
+@@ -822,9 +938,11 @@ static void extract_buf(struct entropy_s
+ int i;
+ __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
++ unsigned long flags;
+
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+ sha_init(hash);
++ spin_lock_irqsave(&r->lock, flags);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+ sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+
+@@ -837,7 +955,8 @@ static void extract_buf(struct entropy_s
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
+ */
+- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
++ __mix_pool_bytes(r, hash, sizeof(hash), extract);
++ spin_unlock_irqrestore(&r->lock, flags);
+
+ /*
+ * To avoid duplicates, we atomically extract a portion of the
+@@ -860,12 +979,12 @@ static void extract_buf(struct entropy_s
+ }
+
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+- size_t nbytes, int min, int reserved)
++ size_t nbytes, int min, int reserved)
+ {
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+- unsigned long flags;
+
++ trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, min, reserved);
+
+@@ -873,6 +992,8 @@ static ssize_t extract_entropy(struct en
+ extract_buf(r, tmp);
+
+ if (fips_enabled) {
++ unsigned long flags;
++
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+@@ -898,6 +1019,7 @@ static ssize_t extract_entropy_user(stru
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+
++ trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, 0, 0);
+
+@@ -931,17 +1053,35 @@ static ssize_t extract_entropy_user(stru
+
+ /*
+ * This function is the exported kernel interface. It returns some
+- * number of good random numbers, suitable for seeding TCP sequence
+- * numbers, etc.
++ * number of good random numbers, suitable for key generation, seeding
++ * TCP sequence numbers, etc. It does not use the hw random number
++ * generator, if available; use get_random_bytes_arch() for that.
+ */
+ void get_random_bytes(void *buf, int nbytes)
+ {
++ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
++}
++EXPORT_SYMBOL(get_random_bytes);
++
++/*
++ * This function will use the architecture-specific hardware random
++ * number generator if it is available. The arch-specific hw RNG will
++ * almost certainly be faster than what we can do in software, but it
++ * is impossible to verify that it is implemented securely (as
++ * opposed, to, say, the AES encryption of a sequence number using a
++ * key known by the NSA). So it's useful if we need the speed, but
++ * only if we're willing to trust the hardware manufacturer not to
++ * have put in a back door.
++ */
++void get_random_bytes_arch(void *buf, int nbytes)
++{
+ char *p = buf;
+
++ trace_get_random_bytes(nbytes, _RET_IP_);
+ while (nbytes) {
+ unsigned long v;
+ int chunk = min(nbytes, (int)sizeof(unsigned long));
+-
++
+ if (!arch_get_random_long(&v))
+ break;
+
+@@ -950,9 +1090,11 @@ void get_random_bytes(void *buf, int nby
+ nbytes -= chunk;
+ }
+
+- extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
++ if (nbytes)
++ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+ }
+-EXPORT_SYMBOL(get_random_bytes);
++EXPORT_SYMBOL(get_random_bytes_arch);
++
+
+ /*
+ * init_std_data - initialize pool with system data
+@@ -966,21 +1108,18 @@ EXPORT_SYMBOL(get_random_bytes);
+ static void init_std_data(struct entropy_store *r)
+ {
+ int i;
+- ktime_t now;
+- unsigned long flags;
++ ktime_t now = ktime_get_real();
++ unsigned long rv;
+
+- spin_lock_irqsave(&r->lock, flags);
+ r->entropy_count = 0;
+- spin_unlock_irqrestore(&r->lock, flags);
+-
+- now = ktime_get_real();
+- mix_pool_bytes(r, &now, sizeof(now));
+- for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
+- if (!arch_get_random_long(&flags))
++ r->entropy_total = 0;
++ mix_pool_bytes(r, &now, sizeof(now), NULL);
++ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
++ if (!arch_get_random_long(&rv))
+ break;
+- mix_pool_bytes(r, &flags, sizeof(flags));
++ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
+ }
+- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
++ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+ }
+
+ static int rand_initialize(void)
+@@ -1117,7 +1256,7 @@ write_pool(struct entropy_store *r, cons
+ count -= bytes;
+ p += bytes;
+
+- mix_pool_bytes(r, buf, bytes);
++ mix_pool_bytes(r, buf, bytes, NULL);
+ cond_resched();
+ }
+
+@@ -1274,6 +1413,7 @@ static int proc_do_uuid(ctl_table *table
+ }
+
+ static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
++extern ctl_table random_table[];
+ ctl_table random_table[] = {
+ {
+ .procname = "poolsize",
+@@ -1339,7 +1479,7 @@ late_initcall(random_int_secret_init);
+ * value is not cryptographically secure but for several uses the cost of
+ * depleting entropy is too high
+ */
+-DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
++static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+ unsigned int get_random_int(void)
+ {
+ __u32 *hash;
+--- a/drivers/mfd/ab3100-core.c
++++ b/drivers/mfd/ab3100-core.c
+@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(in
+ u32 fatevent;
+ int err;
+
+- add_interrupt_randomness(irq);
+-
+ err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
+ event_regs, 3);
+ if (err)
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -24,6 +24,7 @@
+ #include <linux/kthread.h>
+ #include <linux/mutex.h>
+ #include <linux/freezer.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/byteorder.h>
+@@ -1896,6 +1897,14 @@ int usb_new_device(struct usb_device *ud
+ /* Tell the world! */
+ announce_device(udev);
+
++ if (udev->serial)
++ add_device_randomness(udev->serial, strlen(udev->serial));
++ if (udev->product)
++ add_device_randomness(udev->product, strlen(udev->product));
++ if (udev->manufacturer)
++ add_device_randomness(udev->manufacturer,
++ strlen(udev->manufacturer));
++
+ device_enable_async_suspend(&udev->dev);
+ /* Register the device. The device driver is responsible
+ * for configuring the device and invoking the add-device
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -50,11 +50,13 @@ struct rnd_state {
+
+ extern void rand_initialize_irq(int irq);
+
++extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+-extern void add_interrupt_randomness(int irq);
++extern void add_interrupt_randomness(int irq, int irq_flags);
+
+ extern void get_random_bytes(void *buf, int nbytes);
++extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+
+ #ifndef MODULE
+--- /dev/null
++++ b/include/trace/events/random.h
+@@ -0,0 +1,134 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM random
++
++#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RANDOM_H
++
++#include <linux/writeback.h>
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(random__mix_pool_bytes,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, bytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->bytes = bytes;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: bytes %d caller %pF",
++ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
++);
++
++DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP)
++);
++
++DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP)
++);
++
++TRACE_EVENT(credit_entropy_bits,
++ TP_PROTO(const char *pool_name, int bits, int entropy_count,
++ int entropy_total, unsigned long IP),
++
++ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, bits )
++ __field( int, entropy_count )
++ __field( int, entropy_total )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->bits = bits;
++ __entry->entropy_count = entropy_count;
++ __entry->entropy_total = entropy_total;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
++ "caller %pF", __entry->pool_name, __entry->bits,
++ __entry->entropy_count, __entry->entropy_total,
++ (void *)__entry->IP)
++);
++
++TRACE_EVENT(get_random_bytes,
++ TP_PROTO(int nbytes, unsigned long IP),
++
++ TP_ARGS(nbytes, IP),
++
++ TP_STRUCT__entry(
++ __field( int, nbytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->nbytes = nbytes;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
++);
++
++DECLARE_EVENT_CLASS(random__extract_entropy,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, nbytes )
++ __field( int, entropy_count )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->nbytes = nbytes;
++ __entry->entropy_count = entropy_count;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
++ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
++ (void *)__entry->IP)
++);
++
++
++DEFINE_EVENT(random__extract_entropy, extract_entropy,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
++);
++
++DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
++);
++
++
++
++#endif /* _TRACE_RANDOM_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -117,7 +117,7 @@ irqreturn_t
+ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ {
+ irqreturn_t retval = IRQ_NONE;
+- unsigned int random = 0, irq = desc->irq_data.irq;
++ unsigned int flags = 0, irq = desc->irq_data.irq;
+
+ do {
+ irqreturn_t res;
+@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc
+
+ /* Fall through to add to randomness */
+ case IRQ_HANDLED:
+- random |= action->flags;
++ flags |= action->flags;
+ break;
+
+ default:
+@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc
+ action = action->next;
+ } while (action);
+
+- if (random & IRQF_SAMPLE_RANDOM)
+- add_interrupt_randomness(irq);
++ add_interrupt_randomness(irq, flags);
+
+ if (!noirqdebug)
+ note_interrupt(irq, desc, retval);
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1176,6 +1176,7 @@ static int __dev_open(struct net_device
+ net_dmaengine_get();
+ dev_set_rx_mode(dev);
+ dev_activate(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ }
+
+ return ret;
+@@ -4823,6 +4824,7 @@ int dev_set_mac_address(struct net_devic
+ err = ops->ndo_set_mac_address(dev, sa);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ return err;
+ }
+ EXPORT_SYMBOL(dev_set_mac_address);
+@@ -5602,6 +5604,7 @@ int register_netdevice(struct net_device
+ dev_init_scheduler(dev);
+ dev_hold(dev);
+ list_netdevice(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+
+ /* Notify protocols, that a new device appeared. */
+ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1371,6 +1371,7 @@ static int do_setlink(struct net_device
+ goto errout;
+ send_addr_notify = 1;
+ modified = 1;
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ }
+
+ if (tb[IFLA_MTU]) {
diff --git a/target/linux/generic/patches-3.3/051-rng_git_backport-remove_irqf_sample_random.patch b/target/linux/generic/patches-3.3/051-rng_git_backport-remove_irqf_sample_random.patch
new file mode 100644
index 000000000..93706ed3f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/051-rng_git_backport-remove_irqf_sample_random.patch
@@ -0,0 +1,543 @@
+--- a/arch/arm/mach-omap1/board-palmz71.c
++++ b/arch/arm/mach-omap1/board-palmz71.c
+@@ -291,8 +291,7 @@ palmz71_gpio_setup(int early)
+ }
+ gpio_direction_input(PALMZ71_USBDETECT_GPIO);
+ if (request_irq(gpio_to_irq(PALMZ71_USBDETECT_GPIO),
+- palmz71_powercable, IRQF_SAMPLE_RANDOM,
+- "palmz71-cable", 0))
++ palmz71_powercable, 0, "palmz71-cable", 0))
+ printk(KERN_ERR
+ "IRQ request for power cable failed!\n");
+ palmz71_powercable(gpio_to_irq(PALMZ71_USBDETECT_GPIO), 0);
+--- a/arch/arm/mach-pxa/lubbock.c
++++ b/arch/arm/mach-pxa/lubbock.c
+@@ -455,7 +455,7 @@ static int lubbock_mci_init(struct devic
+ init_timer(&mmc_timer);
+ mmc_timer.data = (unsigned long) data;
+ return request_irq(LUBBOCK_SD_IRQ, lubbock_detect_int,
+- IRQF_SAMPLE_RANDOM, "lubbock-sd-detect", data);
++ 0, "lubbock-sd-detect", data);
+ }
+
+ static int lubbock_mci_get_ro(struct device *dev)
+--- a/arch/arm/mach-pxa/magician.c
++++ b/arch/arm/mach-pxa/magician.c
+@@ -617,9 +617,8 @@ static struct platform_device bq24022 =
+ static int magician_mci_init(struct device *dev,
+ irq_handler_t detect_irq, void *data)
+ {
+- return request_irq(IRQ_MAGICIAN_SD, detect_irq,
+- IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
+- "mmc card detect", data);
++ return request_irq(IRQ_MAGICIAN_SD, detect_irq, IRQF_DISABLED,
++ "mmc card detect", data);
+ }
+
+ static void magician_mci_exit(struct device *dev, void *data)
+--- a/arch/arm/mach-pxa/trizeps4.c
++++ b/arch/arm/mach-pxa/trizeps4.c
+@@ -332,8 +332,8 @@ static int trizeps4_mci_init(struct devi
+ int err;
+
+ err = request_irq(TRIZEPS4_MMC_IRQ, mci_detect_int,
+- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_SAMPLE_RANDOM,
+- "MMC card detect", data);
++ IRQF_DISABLED | IRQF_TRIGGER_RISING,
++ "MMC card detect", data);
+ if (err) {
+ printk(KERN_ERR "trizeps4_mci_init: MMC/SD: can't request"
+ "MMC card detect IRQ\n");
+--- a/arch/ia64/kernel/irq_ia64.c
++++ b/arch/ia64/kernel/irq_ia64.c
+@@ -23,7 +23,6 @@
+ #include <linux/ioport.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/ptrace.h>
+-#include <linux/random.h> /* for rand_initialize_irq() */
+ #include <linux/signal.h>
+ #include <linux/smp.h>
+ #include <linux/threads.h>
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -1250,14 +1250,12 @@ int ldc_bind(struct ldc_channel *lp, con
+ snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
+ snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
+
+- err = request_irq(lp->cfg.rx_irq, ldc_rx,
+- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
++ err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED,
+ lp->rx_irq_name, lp);
+ if (err)
+ return err;
+
+- err = request_irq(lp->cfg.tx_irq, ldc_tx,
+- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
++ err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED,
+ lp->tx_irq_name, lp);
+ if (err) {
+ free_irq(lp->cfg.rx_irq, lp);
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -371,7 +371,7 @@ static irqreturn_t line_write_interrupt(
+ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
+ {
+ const struct line_driver *driver = line->driver;
+- int err = 0, flags = IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM;
++ int err = 0, flags = IRQF_DISABLED | IRQF_SHARED;
+
+ if (input)
+ err = um_request_irq(driver->read_irq, fd, IRQ_READ,
+@@ -807,7 +807,7 @@ void register_winch_irq(int fd, int tty_
+ .stack = stack });
+
+ if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
+- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
++ IRQF_DISABLED | IRQF_SHARED,
+ "winch", winch) < 0) {
+ printk(KERN_ERR "register_winch_irq - failed to register "
+ "IRQ\n");
+--- a/arch/um/drivers/mconsole_kern.c
++++ b/arch/um/drivers/mconsole_kern.c
+@@ -773,7 +773,7 @@ static int __init mconsole_init(void)
+ register_reboot_notifier(&reboot_notifier);
+
+ err = um_request_irq(MCONSOLE_IRQ, sock, IRQ_READ, mconsole_interrupt,
+- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
++ IRQF_DISABLED | IRQF_SHARED,
+ "mconsole", (void *)sock);
+ if (err) {
+ printk(KERN_ERR "Failed to get IRQ for management console\n");
+--- a/arch/um/drivers/port_kern.c
++++ b/arch/um/drivers/port_kern.c
+@@ -100,7 +100,7 @@ static int port_accept(struct port_list
+ .port = port });
+
+ if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
+- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
++ IRQF_DISABLED | IRQF_SHARED,
+ "telnetd", conn)) {
+ printk(KERN_ERR "port_accept : failed to get IRQ for "
+ "telnetd\n");
+@@ -184,7 +184,7 @@ void *port_data(int port_num)
+ }
+
+ if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
+- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
++ IRQF_DISABLED | IRQF_SHARED,
+ "port", port)) {
+ printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
+ goto out_close;
+--- a/arch/um/drivers/random.c
++++ b/arch/um/drivers/random.c
+@@ -131,8 +131,7 @@ static int __init rng_init (void)
+ random_fd = err;
+
+ err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
+- IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "random",
+- NULL);
++ IRQF_DISABLED, "random", NULL);
+ if (err)
+ goto err_out_cleanup_hw;
+
+--- a/arch/um/drivers/xterm_kern.c
++++ b/arch/um/drivers/xterm_kern.c
+@@ -50,8 +50,7 @@ int xterm_fd(int socket, int *pid_out)
+ init_completion(&data->ready);
+
+ err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt,
+- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+- "xterm", data);
++ IRQF_DISABLED | IRQF_SHARED, "xterm", data);
+ if (err) {
+ printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, "
+ "err = %d\n", err);
+--- a/arch/um/kernel/sigio.c
++++ b/arch/um/kernel/sigio.c
+@@ -25,8 +25,7 @@ int write_sigio_irq(int fd)
+ int err;
+
+ err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
+- IRQF_DISABLED|IRQF_SAMPLE_RANDOM, "write sigio",
+- NULL);
++ IRQF_DISABLED, "write sigio", NULL);
+ if (err) {
+ printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
+ "err = %d\n", err);
+--- a/Documentation/feature-removal-schedule.txt
++++ b/Documentation/feature-removal-schedule.txt
+@@ -71,20 +71,6 @@ Who: Luis R. Rodriguez <lrodriguez@ather
+
+ ---------------------------
+
+-What: IRQF_SAMPLE_RANDOM
+-Check: IRQF_SAMPLE_RANDOM
+-When: July 2009
+-
+-Why: Many of IRQF_SAMPLE_RANDOM users are technically bogus as entropy
+- sources in the kernel's current entropy model. To resolve this, every
+- input point to the kernel's entropy pool needs to better document the
+- type of entropy source it actually is. This will be replaced with
+- additional add_*_randomness functions in drivers/char/random.c
+-
+-Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
+-
+----------------------------
+-
+ What: The ieee80211_regdom module parameter
+ When: March 2010 / desktop catchup
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -852,9 +852,8 @@ static int setup_blkring(struct xenbus_d
+ if (err)
+ goto fail;
+
+- err = bind_evtchn_to_irqhandler(info->evtchn,
+- blkif_interrupt,
+- IRQF_SAMPLE_RANDOM, "blkif", info);
++ err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
++ "blkif", info);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err,
+ "bind_evtchn_to_irqhandler failed");
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -633,43 +633,6 @@ struct timer_rand_state {
+ unsigned dont_count_entropy:1;
+ };
+
+-#ifndef CONFIG_GENERIC_HARDIRQS
+-
+-static struct timer_rand_state *irq_timer_state[NR_IRQS];
+-
+-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+-{
+- return irq_timer_state[irq];
+-}
+-
+-static void set_timer_rand_state(unsigned int irq,
+- struct timer_rand_state *state)
+-{
+- irq_timer_state[irq] = state;
+-}
+-
+-#else
+-
+-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+-{
+- struct irq_desc *desc;
+-
+- desc = irq_to_desc(irq);
+-
+- return desc->timer_rand_state;
+-}
+-
+-static void set_timer_rand_state(unsigned int irq,
+- struct timer_rand_state *state)
+-{
+- struct irq_desc *desc;
+-
+- desc = irq_to_desc(irq);
+-
+- desc->timer_rand_state = state;
+-}
+-#endif
+-
+ /*
+ * Add device- or boot-specific data to the input and nonblocking
+ * pools to help initialize them to unique values.
+@@ -1131,24 +1094,6 @@ static int rand_initialize(void)
+ }
+ module_init(rand_initialize);
+
+-void rand_initialize_irq(int irq)
+-{
+- struct timer_rand_state *state;
+-
+- state = get_timer_rand_state(irq);
+-
+- if (state)
+- return;
+-
+- /*
+- * If kzalloc returns null, we just won't use that entropy
+- * source.
+- */
+- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+- if (state)
+- set_timer_rand_state(irq, state);
+-}
+-
+ #ifdef CONFIG_BLOCK
+ void rand_initialize_disk(struct gendisk *disk)
+ {
+--- a/drivers/crypto/n2_core.c
++++ b/drivers/crypto/n2_core.c
+@@ -1607,8 +1607,7 @@ static int spu_map_ino(struct platform_d
+
+ sprintf(p->irq_name, "%s-%d", irq_name, index);
+
+- return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
+- p->irq_name, p);
++ return request_irq(p->irq, handler, 0, p->irq_name, p);
+ }
+
+ static struct kmem_cache *queue_cache[2];
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -545,8 +545,7 @@ static int vmbus_bus_init(int irq)
+ if (ret)
+ goto err_cleanup;
+
+- ret = request_irq(irq, vmbus_isr, IRQF_SAMPLE_RANDOM,
+- driver_name, hv_acpi_dev);
++ ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
+
+ if (ret != 0) {
+ pr_err("Unable to request IRQ %d\n",
+--- a/drivers/i2c/busses/i2c-pmcmsp.c
++++ b/drivers/i2c/busses/i2c-pmcmsp.c
+@@ -306,8 +306,7 @@ static int __devinit pmcmsptwi_probe(str
+ pmcmsptwi_data.irq = platform_get_irq(pldev, 0);
+ if (pmcmsptwi_data.irq) {
+ rc = request_irq(pmcmsptwi_data.irq, &pmcmsptwi_interrupt,
+- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+- pldev->name, &pmcmsptwi_data);
++ IRQF_SHARED, pldev->name, &pmcmsptwi_data);
+ if (rc == 0) {
+ /*
+ * Enable 'DONE' interrupt only.
+--- a/drivers/input/serio/hp_sdc.c
++++ b/drivers/input/serio/hp_sdc.c
+@@ -879,7 +879,7 @@ static int __init hp_sdc_init(void)
+ #endif
+
+ errstr = "IRQ not available for";
+- if (request_irq(hp_sdc.irq, &hp_sdc_isr, IRQF_SHARED|IRQF_SAMPLE_RANDOM,
++ if (request_irq(hp_sdc.irq, &hp_sdc_isr, IRQF_SHARED,
+ "HP SDC", &hp_sdc))
+ goto err1;
+
+--- a/drivers/mfd/ab3100-core.c
++++ b/drivers/mfd/ab3100-core.c
+@@ -937,9 +937,6 @@ static int __devinit ab3100_probe(struct
+
+ err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
+ IRQF_ONESHOT, "ab3100-core", ab3100);
+- /* This real unpredictable IRQ is of course sampled for entropy */
+- rand_initialize_irq(client->irq);
+-
+ if (err)
+ goto exit_no_irq;
+
+--- a/drivers/mfd/tps65010.c
++++ b/drivers/mfd/tps65010.c
+@@ -563,8 +563,7 @@ static int tps65010_probe(struct i2c_cli
+ */
+ if (client->irq > 0) {
+ status = request_irq(client->irq, tps65010_irq,
+- IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_FALLING,
+- DRIVER_NAME, tps);
++ IRQF_TRIGGER_FALLING, DRIVER_NAME, tps);
+ if (status < 0) {
+ dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
+ client->irq, status);
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -9415,7 +9415,7 @@ static int tg3_test_interrupt(struct tg3
+ }
+
+ err = request_irq(tnapi->irq_vec, tg3_test_isr,
+- IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
++ IRQF_SHARED, dev->name, tnapi);
+ if (err)
+ return err;
+
+--- a/drivers/power/pda_power.c
++++ b/drivers/power/pda_power.c
+@@ -24,11 +24,7 @@
+
+ static inline unsigned int get_irq_flags(struct resource *res)
+ {
+- unsigned int flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
+-
+- flags |= res->flags & IRQF_TRIGGER_MASK;
+-
+- return flags;
++ return IRQF_SHARED | (res->flags & IRQF_TRIGGER_MASK);
+ }
+
+ static struct device *dev;
+--- a/drivers/tty/serial/uartlite.c
++++ b/drivers/tty/serial/uartlite.c
+@@ -216,8 +216,7 @@ static int ulite_startup(struct uart_por
+ {
+ int ret;
+
+- ret = request_irq(port->irq, ulite_isr,
+- IRQF_SHARED | IRQF_SAMPLE_RANDOM, "uartlite", port);
++ ret = request_irq(port->irq, ulite_isr, IRQF_SHARED, "uartlite", port);
+ if (ret)
+ return ret;
+
+--- a/drivers/usb/gadget/goku_udc.c
++++ b/drivers/usb/gadget/goku_udc.c
+@@ -1839,7 +1839,7 @@ static int goku_probe(struct pci_dev *pd
+ /* init to known state, then setup irqs */
+ udc_reset(dev);
+ udc_reinit (dev);
+- if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
++ if (request_irq(pdev->irq, goku_irq, IRQF_SHARED,
+ driver_name, dev) != 0) {
+ DBG(dev, "request interrupt %d failed\n", pdev->irq);
+ retval = -EBUSY;
+--- a/drivers/usb/gadget/omap_udc.c
++++ b/drivers/usb/gadget/omap_udc.c
+@@ -2943,7 +2943,7 @@ known:
+
+ /* USB general purpose IRQ: ep0, state changes, dma, etc */
+ status = request_irq(pdev->resource[1].start, omap_udc_irq,
+- IRQF_SAMPLE_RANDOM, driver_name, udc);
++ 0, driver_name, udc);
+ if (status != 0) {
+ ERR("can't get irq %d, err %d\n",
+ (int) pdev->resource[1].start, status);
+@@ -2952,7 +2952,7 @@ known:
+
+ /* USB "non-iso" IRQ (PIO for all but ep0) */
+ status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
+- IRQF_SAMPLE_RANDOM, "omap_udc pio", udc);
++ 0, "omap_udc pio", udc);
+ if (status != 0) {
+ ERR("can't get irq %d, err %d\n",
+ (int) pdev->resource[2].start, status);
+--- a/drivers/usb/gadget/pxa25x_udc.c
++++ b/drivers/usb/gadget/pxa25x_udc.c
+@@ -2202,19 +2202,15 @@ static int __init pxa25x_udc_probe(struc
+
+ #ifdef CONFIG_ARCH_LUBBOCK
+ if (machine_is_lubbock()) {
+- retval = request_irq(LUBBOCK_USB_DISC_IRQ,
+- lubbock_vbus_irq,
+- IRQF_SAMPLE_RANDOM,
+- driver_name, dev);
++ retval = request_irq(LUBBOCK_USB_DISC_IRQ, lubbock_vbus_irq,
++ 0, driver_name, dev);
+ if (retval != 0) {
+ pr_err("%s: can't get irq %i, err %d\n",
+ driver_name, LUBBOCK_USB_DISC_IRQ, retval);
+ goto err_irq_lub;
+ }
+- retval = request_irq(LUBBOCK_USB_IRQ,
+- lubbock_vbus_irq,
+- IRQF_SAMPLE_RANDOM,
+- driver_name, dev);
++ retval = request_irq(LUBBOCK_USB_IRQ, lubbock_vbus_irq,
++ 0, driver_name, dev);
+ if (retval != 0) {
+ pr_err("%s: can't get irq %i, err %d\n",
+ driver_name, LUBBOCK_USB_IRQ, retval);
+--- a/drivers/usb/otg/gpio_vbus.c
++++ b/drivers/usb/otg/gpio_vbus.c
+@@ -51,8 +51,7 @@ struct gpio_vbus_data {
+ * edges might be workable.
+ */
+ #define VBUS_IRQ_FLAGS \
+- ( IRQF_SAMPLE_RANDOM | IRQF_SHARED \
+- | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING )
++ ( IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING )
+
+
+ /* interface to regulator framework */
+@@ -253,7 +252,7 @@ static int __init gpio_vbus_probe(struct
+ if (res) {
+ irq = res->start;
+ res->flags &= IRQF_TRIGGER_MASK;
+- res->flags |= IRQF_SAMPLE_RANDOM | IRQF_SHARED;
++ res->flags |= IRQF_SHARED;
+ } else
+ irq = gpio_to_irq(gpio);
+
+--- a/drivers/usb/otg/isp1301_omap.c
++++ b/drivers/usb/otg/isp1301_omap.c
+@@ -1567,7 +1567,6 @@ isp1301_probe(struct i2c_client *i2c, co
+ isp->irq_type = IRQF_TRIGGER_FALLING;
+ }
+
+- isp->irq_type |= IRQF_SAMPLE_RANDOM;
+ status = request_irq(i2c->irq, isp1301_irq,
+ isp->irq_type, DRIVER_NAME, isp);
+ if (status < 0) {
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -44,7 +44,6 @@
+ *
+ * IRQF_DISABLED - keep irqs disabled when calling the action handler.
+ * DEPRECATED. This flag is a NOOP and scheduled to be removed
+- * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
+ * IRQF_SHARED - allow sharing the irq among several devices
+ * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
+ * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
+@@ -63,7 +62,6 @@
+ * resume time.
+ */
+ #define IRQF_DISABLED 0x00000020
+-#define IRQF_SAMPLE_RANDOM 0x00000040
+ #define IRQF_SHARED 0x00000080
+ #define IRQF_PROBE_SHARED 0x00000100
+ #define __IRQF_TIMER 0x00000200
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -39,7 +39,6 @@ struct module;
+ */
+ struct irq_desc {
+ struct irq_data irq_data;
+- struct timer_rand_state *timer_rand_state;
+ unsigned int __percpu *kstat_irqs;
+ irq_flow_handler_t handle_irq;
+ #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -48,8 +48,6 @@ struct rnd_state {
+
+ #ifdef __KERNEL__
+
+-extern void rand_initialize_irq(int irq);
+-
+ extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -891,22 +891,6 @@ __setup_irq(unsigned int irq, struct irq
+ return -ENOSYS;
+ if (!try_module_get(desc->owner))
+ return -ENODEV;
+- /*
+- * Some drivers like serial.c use request_irq() heavily,
+- * so we have to be careful not to interfere with a
+- * running system.
+- */
+- if (new->flags & IRQF_SAMPLE_RANDOM) {
+- /*
+- * This function might sleep, we want to call it first,
+- * outside of the atomic block.
+- * Yes, this might clear the entropy pool if the wrong
+- * driver is attempted to be loaded, without actually
+- * installing a new handler, but is this really a problem,
+- * only the sysadmin is able to do this.
+- */
+- rand_initialize_irq(irq);
+- }
+
+ /*
+ * Check whether the interrupt nests into another interrupt
+@@ -1342,7 +1326,6 @@ EXPORT_SYMBOL(free_irq);
+ * Flags:
+ *
+ * IRQF_SHARED Interrupt is shared
+- * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
+ * IRQF_TRIGGER_* Specify active edge(s) or level
+ *
+ */
diff --git a/target/linux/generic/patches-3.3/100-overlayfs_v12.patch b/target/linux/generic/patches-3.3/100-overlayfs_v12.patch
new file mode 100644
index 000000000..14e655897
--- /dev/null
+++ b/target/linux/generic/patches-3.3/100-overlayfs_v12.patch
@@ -0,0 +1,3232 @@
+--- a/Documentation/filesystems/Locking
++++ b/Documentation/filesystems/Locking
+@@ -62,6 +62,7 @@ ata *);
+ int (*removexattr) (struct dentry *, const char *);
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
++ struct file *(*open)(struct dentry *,struct file *,const struct cred *);
+
+ locking rules:
+ all may block
+@@ -89,6 +90,7 @@ listxattr: no
+ removexattr: yes
+ truncate_range: yes
+ fiemap: no
++open: no
+ Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
+ victim.
+ cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
+--- /dev/null
++++ b/Documentation/filesystems/overlayfs.txt
+@@ -0,0 +1,199 @@
++Written by: Neil Brown <neilb@suse.de>
++
++Overlay Filesystem
++==================
++
++This document describes a prototype for a new approach to providing
++overlay-filesystem functionality in Linux (sometimes referred to as
++union-filesystems). An overlay-filesystem tries to present a
++filesystem which is the result over overlaying one filesystem on top
++of the other.
++
++The result will inevitably fail to look exactly like a normal
++filesystem for various technical reasons. The expectation is that
++many use cases will be able to ignore these differences.
++
++This approach is 'hybrid' because the objects that appear in the
++filesystem do not all appear to belong to that filesystem. In many
++cases an object accessed in the union will be indistinguishable
++from accessing the corresponding object from the original filesystem.
++This is most obvious from the 'st_dev' field returned by stat(2).
++
++While directories will report an st_dev from the overlay-filesystem,
++all non-directory objects will report an st_dev from the lower or
++upper filesystem that is providing the object. Similarly st_ino will
++only be unique when combined with st_dev, and both of these can change
++over the lifetime of a non-directory object. Many applications and
++tools ignore these values and will not be affected.
++
++Upper and Lower
++---------------
++
++An overlay filesystem combines two filesystems - an 'upper' filesystem
++and a 'lower' filesystem. When a name exists in both filesystems, the
++object in the 'upper' filesystem is visible while the object in the
++'lower' filesystem is either hidden or, in the case of directories,
++merged with the 'upper' object.
++
++It would be more correct to refer to an upper and lower 'directory
++tree' rather than 'filesystem' as it is quite possible for both
++directory trees to be in the same filesystem and there is no
++requirement that the root of a filesystem be given for either upper or
++lower.
++
++The lower filesystem can be any filesystem supported by Linux and does
++not need to be writable. The lower filesystem can even be another
++overlayfs. The upper filesystem will normally be writable and if it
++is it must support the creation of trusted.* extended attributes, and
++must provide valid d_type in readdir responses, at least for symbolic
++links - so NFS is not suitable.
++
++A read-only overlay of two read-only filesystems may use any
++filesystem type.
++
++Directories
++-----------
++
++Overlaying mainly involved directories. If a given name appears in both
++upper and lower filesystems and refers to a non-directory in either,
++then the lower object is hidden - the name refers only to the upper
++object.
++
++Where both upper and lower objects are directories, a merged directory
++is formed.
++
++At mount time, the two directories given as mount options are combined
++into a merged directory:
++
++ mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper /overlay
++
++Then whenever a lookup is requested in such a merged directory, the
++lookup is performed in each actual directory and the combined result
++is cached in the dentry belonging to the overlay filesystem. If both
++actual lookups find directories, both are stored and a merged
++directory is created, otherwise only one is stored: the upper if it
++exists, else the lower.
++
++Only the lists of names from directories are merged. Other content
++such as metadata and extended attributes are reported for the upper
++directory only. These attributes of the lower directory are hidden.
++
++whiteouts and opaque directories
++--------------------------------
++
++In order to support rm and rmdir without changing the lower
++filesystem, an overlay filesystem needs to record in the upper filesystem
++that files have been removed. This is done using whiteouts and opaque
++directories (non-directories are always opaque).
++
++The overlay filesystem uses extended attributes with a
++"trusted.overlay." prefix to record these details.
++
++A whiteout is created as a symbolic link with target
++"(overlay-whiteout)" and with xattr "trusted.overlay.whiteout" set to "y".
++When a whiteout is found in the upper level of a merged directory, any
++matching name in the lower level is ignored, and the whiteout itself
++is also hidden.
++
++A directory is made opaque by setting the xattr "trusted.overlay.opaque"
++to "y". Where the upper filesystem contains an opaque directory, any
++directory in the lower filesystem with the same name is ignored.
++
++readdir
++-------
++
++When a 'readdir' request is made on a merged directory, the upper and
++lower directories are each read and the name lists merged in the
++obvious way (upper is read first, then lower - entries that already
++exist are not re-added). This merged name list is cached in the
++'struct file' and so remains as long as the file is kept open. If the
++directory is opened and read by two processes at the same time, they
++will each have separate caches. A seekdir to the start of the
++directory (offset 0) followed by a readdir will cause the cache to be
++discarded and rebuilt.
++
++This means that changes to the merged directory do not appear while a
++directory is being read. This is unlikely to be noticed by many
++programs.
++
++seek offsets are assigned sequentially when the directories are read.
++Thus if
++ - read part of a directory
++ - remember an offset, and close the directory
++ - re-open the directory some time later
++ - seek to the remembered offset
++
++there may be little correlation between the old and new locations in
++the list of filenames, particularly if anything has changed in the
++directory.
++
++Readdir on directories that are not merged is simply handled by the
++underlying directory (upper or lower).
++
++
++Non-directories
++---------------
++
++Objects that are not directories (files, symlinks, device-special
++files etc.) are presented either from the upper or lower filesystem as
++appropriate. When a file in the lower filesystem is accessed in a way
++the requires write-access, such as opening for write access, changing
++some metadata etc., the file is first copied from the lower filesystem
++to the upper filesystem (copy_up). Note that creating a hard-link
++also requires copy_up, though of course creation of a symlink does
++not.
++
++The copy_up may turn out to be unnecessary, for example if the file is
++opened for read-write but the data is not modified.
++
++The copy_up process first makes sure that the containing directory
++exists in the upper filesystem - creating it and any parents as
++necessary. It then creates the object with the same metadata (owner,
++mode, mtime, symlink-target etc.) and then if the object is a file, the
++data is copied from the lower to the upper filesystem. Finally any
++extended attributes are copied up.
++
++Once the copy_up is complete, the overlay filesystem simply
++provides direct access to the newly created file in the upper
++filesystem - future operations on the file are barely noticed by the
++overlay filesystem (though an operation on the name of the file such as
++rename or unlink will of course be noticed and handled).
++
++
++Non-standard behavior
++---------------------
++
++The copy_up operation essentially creates a new, identical file and
++moves it over to the old name. The new file may be on a different
++filesystem, so both st_dev and st_ino of the file may change.
++
++Any open files referring to this inode will access the old data and
++metadata. Similarly any file locks obtained before copy_up will not
++apply to the copied up file.
++
++On a file is opened with O_RDONLY fchmod(2), fchown(2), futimesat(2)
++and fsetxattr(2) will fail with EROFS.
++
++If a file with multiple hard links is copied up, then this will
++"break" the link. Changes will not be propagated to other names
++referring to the same inode.
++
++Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory
++object in overlayfs will not contain vaid absolute paths, only
++relative paths leading up to the filesystem's root. This will be
++fixed in the future.
++
++Some operations are not atomic, for example a crash during copy_up or
++rename will leave the filesystem in an inconsitent state. This will
++be addressed in the future.
++
++Changes to underlying filesystems
++---------------------------------
++
++Offline changes, when the overlay is not mounted, are allowed to either
++the upper or the lower trees.
++
++Changes to the underlying filesystems while part of a mounted overlay
++filesystem are not allowed. If the underlying filesystem is changed,
++the behavior of the overlay is undefined, though it will not result in
++a crash or deadlock.
+--- a/Documentation/filesystems/vfs.txt
++++ b/Documentation/filesystems/vfs.txt
+@@ -364,6 +364,8 @@ struct inode_operations {
+ ssize_t (*listxattr) (struct dentry *, char *, size_t);
+ int (*removexattr) (struct dentry *, const char *);
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
++ struct file *(*open) (struct dentry *, struct file *,
++ const struct cred *);
+ };
+
+ Again, all methods are called without any locks being held, unless
+@@ -475,6 +477,12 @@ otherwise noted.
+ truncate_range: a method provided by the underlying filesystem to truncate a
+ range of blocks , i.e. punch a hole somewhere in a file.
+
++ open: this is an alternative to f_op->open(), the difference is that this
++ method may return any open file, not necessarily originating from the
++ same filesystem as the one i_op->open() was called on. It may be useful
++ for stacking filesystems which want to allow native I/O directly on
++ underlying files.
++
+
+ The Address Space Object
+ ========================
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4955,6 +4955,13 @@ F: drivers/scsi/osd/
+ F: include/scsi/osd_*
+ F: fs/exofs/
+
++OVERLAYFS FILESYSTEM
++M: Miklos Szeredi <miklos@szeredi.hu>
++L: linux-fsdevel@vger.kernel.org
++S: Supported
++F: fs/overlayfs/*
++F: Documentation/filesystems/overlayfs.txt
++
+ P54 WIRELESS DRIVER
+ M: Christian Lamparter <chunkeey@googlemail.com>
+ L: linux-wireless@vger.kernel.org
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -63,6 +63,7 @@ source "fs/quota/Kconfig"
+
+ source "fs/autofs4/Kconfig"
+ source "fs/fuse/Kconfig"
++source "fs/overlayfs/Kconfig"
+
+ config CUSE
+ tristate "Character device in Userspace support"
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -105,6 +105,7 @@ obj-$(CONFIG_QNX4FS_FS) += qnx4/
+ obj-$(CONFIG_AUTOFS4_FS) += autofs4/
+ obj-$(CONFIG_ADFS_FS) += adfs/
+ obj-$(CONFIG_FUSE_FS) += fuse/
++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs/
+ obj-$(CONFIG_UDF_FS) += udf/
+ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
+ obj-$(CONFIG_OMFS_FS) += omfs/
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -544,6 +544,13 @@ static struct dentry *ecryptfs_mount(str
+ s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+ s->s_blocksize = path.dentry->d_sb->s_blocksize;
+ s->s_magic = ECRYPTFS_SUPER_MAGIC;
++ s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
++
++ rc = -EINVAL;
++ if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++ printk(KERN_ERR "eCryptfs: maximum fs stacking depth exceeded\n");
++ goto out_free;
++ }
+
+ inode = ecryptfs_get_inode(path.dentry->d_inode, s);
+ rc = PTR_ERR(inode);
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1325,6 +1325,24 @@ void drop_collected_mounts(struct vfsmou
+ release_mounts(&umount_list);
+ }
+
++struct vfsmount *clone_private_mount(struct path *path)
++{
++ struct mount *old_mnt = real_mount(path->mnt);
++ struct mount *new_mnt;
++
++ if (IS_MNT_UNBINDABLE(old_mnt))
++ return ERR_PTR(-EINVAL);
++
++ down_read(&namespace_sem);
++ new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
++ up_read(&namespace_sem);
++ if (!new_mnt)
++ return ERR_PTR(-ENOMEM);
++
++ return &new_mnt->mnt;
++}
++EXPORT_SYMBOL_GPL(clone_private_mount);
++
+ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
+ struct vfsmount *root)
+ {
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -644,24 +644,24 @@ static inline int __get_file_write_acces
+ return error;
+ }
+
+-static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
+- struct file *f,
+- int (*open)(struct inode *, struct file *),
+- const struct cred *cred)
++static struct file *__dentry_open(struct path *path, struct file *f,
++ int (*open)(struct inode *, struct file *),
++ const struct cred *cred)
+ {
+ static const struct file_operations empty_fops = {};
+ struct inode *inode;
+ int error;
+
++ path_get(path);
+ f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
+ FMODE_PREAD | FMODE_PWRITE;
+
+ if (unlikely(f->f_flags & O_PATH))
+ f->f_mode = FMODE_PATH;
+
+- inode = dentry->d_inode;
++ inode = path->dentry->d_inode;
+ if (f->f_mode & FMODE_WRITE) {
+- error = __get_file_write_access(inode, mnt);
++ error = __get_file_write_access(inode, path->mnt);
+ if (error)
+ goto cleanup_file;
+ if (!special_file(inode->i_mode))
+@@ -669,8 +669,7 @@ static struct file *__dentry_open(struct
+ }
+
+ f->f_mapping = inode->i_mapping;
+- f->f_path.dentry = dentry;
+- f->f_path.mnt = mnt;
++ f->f_path = *path;
+ f->f_pos = 0;
+ file_sb_list_add(f, inode->i_sb);
+
+@@ -727,7 +726,7 @@ cleanup_all:
+ * here, so just reset the state.
+ */
+ file_reset_write(f);
+- mnt_drop_write(mnt);
++ mnt_drop_write(path->mnt);
+ }
+ }
+ file_sb_list_del(f);
+@@ -735,8 +734,7 @@ cleanup_all:
+ f->f_path.mnt = NULL;
+ cleanup_file:
+ put_filp(f);
+- dput(dentry);
+- mntput(mnt);
++ path_put(path);
+ return ERR_PTR(error);
+ }
+
+@@ -762,14 +760,14 @@ cleanup_file:
+ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+ int (*open)(struct inode *, struct file *))
+ {
++ struct path path = { .dentry = dentry, .mnt = nd->path.mnt };
+ const struct cred *cred = current_cred();
+
+ if (IS_ERR(nd->intent.open.file))
+ goto out;
+ if (IS_ERR(dentry))
+ goto out_err;
+- nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
+- nd->intent.open.file,
++ nd->intent.open.file = __dentry_open(&path, nd->intent.open.file,
+ open, cred);
+ out:
+ return nd->intent.open.file;
+@@ -797,11 +795,9 @@ struct file *nameidata_to_filp(struct na
+ nd->intent.open.file = NULL;
+
+ /* Has the filesystem initialised the file for us? */
+- if (filp->f_path.dentry == NULL) {
+- path_get(&nd->path);
+- filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
+- NULL, cred);
+- }
++ if (filp->f_path.dentry == NULL)
++ filp = vfs_open(&nd->path, filp, cred);
++
+ return filp;
+ }
+
+@@ -812,27 +808,48 @@ struct file *nameidata_to_filp(struct na
+ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
+ const struct cred *cred)
+ {
+- int error;
+ struct file *f;
++ struct file *ret;
++ struct path path = { .dentry = dentry, .mnt = mnt };
+
+ validate_creds(cred);
+
+ /* We must always pass in a valid mount pointer. */
+ BUG_ON(!mnt);
+
+- error = -ENFILE;
++ ret = ERR_PTR(-ENFILE);
+ f = get_empty_filp();
+- if (f == NULL) {
+- dput(dentry);
+- mntput(mnt);
+- return ERR_PTR(error);
++ if (f != NULL) {
++ f->f_flags = flags;
++ ret = vfs_open(&path, f, cred);
+ }
++ path_put(&path);
+
+- f->f_flags = flags;
+- return __dentry_open(dentry, mnt, f, NULL, cred);
++ return ret;
+ }
+ EXPORT_SYMBOL(dentry_open);
+
++/**
++ * vfs_open - open the file at the given path
++ * @path: path to open
++ * @filp: newly allocated file with f_flag initialized
++ * @cred: credentials to use
++ *
++ * Open the file. If successful, the returned file will have acquired
++ * an additional reference for path.
++ */
++struct file *vfs_open(struct path *path, struct file *filp,
++ const struct cred *cred)
++{
++ struct inode *inode = path->dentry->d_inode;
++
++ if (inode->i_op->open)
++ return inode->i_op->open(path->dentry, filp, cred);
++ else
++ return __dentry_open(path, filp, NULL, cred);
++}
++EXPORT_SYMBOL(vfs_open);
++
+ static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+ {
+ struct fdtable *fdt = files_fdtable(files);
+--- /dev/null
++++ b/fs/overlayfs/Kconfig
+@@ -0,0 +1,4 @@
++config OVERLAYFS_FS
++ tristate "Overlay filesystem support"
++ help
++ Add support for overlay filesystem.
+--- /dev/null
++++ b/fs/overlayfs/Makefile
+@@ -0,0 +1,7 @@
++#
++# Makefile for the overlay filesystem.
++#
++
++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o
++
++overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o
+--- /dev/null
++++ b/fs/overlayfs/copy_up.c
+@@ -0,0 +1,384 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/file.h>
++#include <linux/splice.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include <linux/uaccess.h>
++#include "overlayfs.h"
++
++#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
++
++static int ovl_copy_up_xattr(struct dentry *old, struct dentry *new)
++{
++ ssize_t list_size, size;
++ char *buf, *name, *value;
++ int error;
++
++ if (!old->d_inode->i_op->getxattr ||
++ !new->d_inode->i_op->getxattr)
++ return 0;
++
++ list_size = vfs_listxattr(old, NULL, 0);
++ if (list_size <= 0) {
++ if (list_size == -EOPNOTSUPP)
++ return 0;
++ return list_size;
++ }
++
++ buf = kzalloc(list_size, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ error = -ENOMEM;
++ value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
++ if (!value)
++ goto out;
++
++ list_size = vfs_listxattr(old, buf, list_size);
++ if (list_size <= 0) {
++ error = list_size;
++ goto out_free_value;
++ }
++
++ for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
++ size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
++ if (size <= 0) {
++ error = size;
++ goto out_free_value;
++ }
++ error = vfs_setxattr(new, name, value, size, 0);
++ if (error)
++ goto out_free_value;
++ }
++
++out_free_value:
++ kfree(value);
++out:
++ kfree(buf);
++ return error;
++}
++
++static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
++{
++ struct file *old_file;
++ struct file *new_file;
++ int error = 0;
++
++ if (len == 0)
++ return 0;
++
++ old_file = ovl_path_open(old, O_RDONLY);
++ if (IS_ERR(old_file))
++ return PTR_ERR(old_file);
++
++ new_file = ovl_path_open(new, O_WRONLY);
++ if (IS_ERR(new_file)) {
++ error = PTR_ERR(new_file);
++ goto out_fput;
++ }
++
++ /* FIXME: copy up sparse files efficiently */
++ while (len) {
++ loff_t offset = new_file->f_pos;
++ size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
++ long bytes;
++
++ if (len < this_len)
++ this_len = len;
++
++ if (signal_pending_state(TASK_KILLABLE, current)) {
++ error = -EINTR;
++ break;
++ }
++
++ bytes = do_splice_direct(old_file, &offset, new_file, this_len,
++ SPLICE_F_MOVE);
++ if (bytes <= 0) {
++ error = bytes;
++ break;
++ }
++
++ len -= bytes;
++ }
++
++ fput(new_file);
++out_fput:
++ fput(old_file);
++ return error;
++}
++
++static char *ovl_read_symlink(struct dentry *realdentry)
++{
++ int res;
++ char *buf;
++ struct inode *inode = realdentry->d_inode;
++ mm_segment_t old_fs;
++
++ res = -EINVAL;
++ if (!inode->i_op->readlink)
++ goto err;
++
++ res = -ENOMEM;
++ buf = (char *) __get_free_page(GFP_KERNEL);
++ if (!buf)
++ goto err;
++
++ old_fs = get_fs();
++ set_fs(get_ds());
++ /* The cast to a user pointer is valid due to the set_fs() */
++ res = inode->i_op->readlink(realdentry,
++ (char __user *)buf, PAGE_SIZE - 1);
++ set_fs(old_fs);
++ if (res < 0) {
++ free_page((unsigned long) buf);
++ goto err;
++ }
++ buf[res] = '\0';
++
++ return buf;
++
++err:
++ return ERR_PTR(res);
++}
++
++static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
++{
++ struct iattr attr = {
++ .ia_valid =
++ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
++ .ia_atime = stat->atime,
++ .ia_mtime = stat->mtime,
++ };
++
++ return notify_change(upperdentry, &attr);
++}
++
++static int ovl_set_mode(struct dentry *upperdentry, umode_t mode)
++{
++ struct iattr attr = {
++ .ia_valid = ATTR_MODE,
++ .ia_mode = mode,
++ };
++
++ return notify_change(upperdentry, &attr);
++}
++
++static int ovl_copy_up_locked(struct dentry *upperdir, struct dentry *dentry,
++ struct path *lowerpath, struct kstat *stat,
++ const char *link)
++{
++ int err;
++ struct path newpath;
++ umode_t mode = stat->mode;
++
++ /* Can't properly set mode on creation because of the umask */
++ stat->mode &= S_IFMT;
++
++ ovl_path_upper(dentry, &newpath);
++ WARN_ON(newpath.dentry);
++ newpath.dentry = ovl_upper_create(upperdir, dentry, stat, link);
++ if (IS_ERR(newpath.dentry))
++ return PTR_ERR(newpath.dentry);
++
++ if (S_ISREG(stat->mode)) {
++ err = ovl_copy_up_data(lowerpath, &newpath, stat->size);
++ if (err)
++ goto err_remove;
++ }
++
++ err = ovl_copy_up_xattr(lowerpath->dentry, newpath.dentry);
++ if (err)
++ goto err_remove;
++
++ mutex_lock(&newpath.dentry->d_inode->i_mutex);
++ if (!S_ISLNK(stat->mode))
++ err = ovl_set_mode(newpath.dentry, mode);
++ if (!err)
++ err = ovl_set_timestamps(newpath.dentry, stat);
++ mutex_unlock(&newpath.dentry->d_inode->i_mutex);
++ if (err)
++ goto err_remove;
++
++ ovl_dentry_update(dentry, newpath.dentry);
++
++ /*
++ * Easiest way to get rid of the lower dentry reference is to
++ * drop this dentry. This is neither needed nor possible for
++ * directories.
++ */
++ if (!S_ISDIR(stat->mode))
++ d_drop(dentry);
++
++ return 0;
++
++err_remove:
++ if (S_ISDIR(stat->mode))
++ vfs_rmdir(upperdir->d_inode, newpath.dentry);
++ else
++ vfs_unlink(upperdir->d_inode, newpath.dentry);
++
++ dput(newpath.dentry);
++
++ return err;
++}
++
++/*
++ * Copy up a single dentry
++ *
++ * Directory renames only allowed on "pure upper" (already created on
++ * upper filesystem, never copied up). Directories which are on lower or
++ * are merged may not be renamed. For these -EXDEV is returned and
++ * userspace has to deal with it. This means, when copying up a
++ * directory we can rely on it and ancestors being stable.
++ *
++ * Non-directory renames start with copy up of source if necessary. The
++ * actual rename will only proceed once the copy up was successful. Copy
++ * up uses upper parent i_mutex for exclusion. Since rename can change
++ * d_parent it is possible that the copy up will lock the old parent. At
++ * that point the file will have already been copied up anyway.
++ */
++static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
++ struct path *lowerpath, struct kstat *stat)
++{
++ int err;
++ struct kstat pstat;
++ struct path parentpath;
++ struct dentry *upperdir;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++ char *link = NULL;
++
++ ovl_path_upper(parent, &parentpath);
++ upperdir = parentpath.dentry;
++
++ err = vfs_getattr(parentpath.mnt, parentpath.dentry, &pstat);
++ if (err)
++ return err;
++
++ if (S_ISLNK(stat->mode)) {
++ link = ovl_read_symlink(lowerpath->dentry);
++ if (IS_ERR(link))
++ return PTR_ERR(link);
++ }
++
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out_free_link;
++
++ override_cred->fsuid = stat->uid;
++ override_cred->fsgid = stat->gid;
++ /*
++ * CAP_SYS_ADMIN for copying up extended attributes
++ * CAP_DAC_OVERRIDE for create
++ * CAP_FOWNER for chmod, timestamp update
++ * CAP_FSETID for chmod
++ * CAP_MKNOD for mknod
++ */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++ cap_raise(override_cred->cap_effective, CAP_FOWNER);
++ cap_raise(override_cred->cap_effective, CAP_FSETID);
++ cap_raise(override_cred->cap_effective, CAP_MKNOD);
++ old_cred = override_creds(override_cred);
++
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++ if (ovl_path_type(dentry) != OVL_PATH_LOWER) {
++ err = 0;
++ } else {
++ err = ovl_copy_up_locked(upperdir, dentry, lowerpath,
++ stat, link);
++ if (!err) {
++ /* Restore timestamps on parent (best effort) */
++ ovl_set_timestamps(upperdir, &pstat);
++ }
++ }
++
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++out_free_link:
++ if (link)
++ free_page((unsigned long) link);
++
++ return err;
++}
++
++int ovl_copy_up(struct dentry *dentry)
++{
++ int err;
++
++ err = 0;
++ while (!err) {
++ struct dentry *next;
++ struct dentry *parent;
++ struct path lowerpath;
++ struct kstat stat;
++ enum ovl_path_type type = ovl_path_type(dentry);
++
++ if (type != OVL_PATH_LOWER)
++ break;
++
++ next = dget(dentry);
++ /* find the topmost dentry not yet copied up */
++ for (;;) {
++ parent = dget_parent(next);
++
++ type = ovl_path_type(parent);
++ if (type != OVL_PATH_LOWER)
++ break;
++
++ dput(next);
++ next = parent;
++ }
++
++ ovl_path_lower(next, &lowerpath);
++ err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat);
++ if (!err)
++ err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
++
++ dput(parent);
++ dput(next);
++ }
++
++ return err;
++}
++
++/* Optimize by not copying up the file first and truncating later */
++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size)
++{
++ int err;
++ struct kstat stat;
++ struct path lowerpath;
++ struct dentry *parent = dget_parent(dentry);
++
++ err = ovl_copy_up(parent);
++ if (err)
++ goto out_dput_parent;
++
++ ovl_path_lower(dentry, &lowerpath);
++ err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat);
++ if (err)
++ goto out_dput_parent;
++
++ if (size < stat.size)
++ stat.size = size;
++
++ err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
++
++out_dput_parent:
++ dput(parent);
++ return err;
++}
+--- /dev/null
++++ b/fs/overlayfs/dir.c
+@@ -0,0 +1,596 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include "overlayfs.h"
++
++static const char *ovl_whiteout_symlink = "(overlay-whiteout)";
++
++static int ovl_whiteout(struct dentry *upperdir, struct dentry *dentry)
++{
++ int err;
++ struct dentry *newdentry;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ /* FIXME: recheck lower dentry to see if whiteout is really needed */
++
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out;
++
++ /*
++ * CAP_SYS_ADMIN for setxattr
++ * CAP_DAC_OVERRIDE for symlink creation
++ * CAP_FOWNER for unlink in sticky directory
++ */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++ cap_raise(override_cred->cap_effective, CAP_FOWNER);
++ override_cred->fsuid = 0;
++ override_cred->fsgid = 0;
++ old_cred = override_creds(override_cred);
++
++ newdentry = lookup_one_len(dentry->d_name.name, upperdir,
++ dentry->d_name.len);
++ err = PTR_ERR(newdentry);
++ if (IS_ERR(newdentry))
++ goto out_put_cred;
++
++ /* Just been removed within the same locked region */
++ WARN_ON(newdentry->d_inode);
++
++ err = vfs_symlink(upperdir->d_inode, newdentry, ovl_whiteout_symlink);
++ if (err)
++ goto out_dput;
++
++ ovl_dentry_version_inc(dentry->d_parent);
++
++ err = vfs_setxattr(newdentry, ovl_whiteout_xattr, "y", 1, 0);
++ if (err)
++ vfs_unlink(upperdir->d_inode, newdentry);
++
++out_dput:
++ dput(newdentry);
++out_put_cred:
++ revert_creds(old_cred);
++ put_cred(override_cred);
++out:
++ if (err) {
++ /*
++ * There's no way to recover from failure to whiteout.
++ * What should we do? Log a big fat error and... ?
++ */
++ printk(KERN_ERR "overlayfs: ERROR - failed to whiteout '%s'\n",
++ dentry->d_name.name);
++ }
++
++ return err;
++}
++
++static struct dentry *ovl_lookup_create(struct dentry *upperdir,
++ struct dentry *template)
++{
++ int err;
++ struct dentry *newdentry;
++ struct qstr *name = &template->d_name;
++
++ newdentry = lookup_one_len(name->name, upperdir, name->len);
++ if (IS_ERR(newdentry))
++ return newdentry;
++
++ if (newdentry->d_inode) {
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ /* No need to check whiteout if lower parent is non-existent */
++ err = -EEXIST;
++ if (!ovl_dentry_lower(template->d_parent))
++ goto out_dput;
++
++ if (!S_ISLNK(newdentry->d_inode->i_mode))
++ goto out_dput;
++
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out_dput;
++
++ /*
++ * CAP_SYS_ADMIN for getxattr
++ * CAP_FOWNER for unlink in sticky directory
++ */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_FOWNER);
++ old_cred = override_creds(override_cred);
++
++ err = -EEXIST;
++ if (ovl_is_whiteout(newdentry))
++ err = vfs_unlink(upperdir->d_inode, newdentry);
++
++ revert_creds(old_cred);
++ put_cred(override_cred);
++ if (err)
++ goto out_dput;
++
++ dput(newdentry);
++ newdentry = lookup_one_len(name->name, upperdir, name->len);
++ if (IS_ERR(newdentry)) {
++ ovl_whiteout(upperdir, template);
++ return newdentry;
++ }
++
++ /*
++ * Whiteout just been successfully removed, parent
++ * i_mutex is still held, there's no way the lookup
++ * could return positive.
++ */
++ WARN_ON(newdentry->d_inode);
++ }
++
++ return newdentry;
++
++out_dput:
++ dput(newdentry);
++ return ERR_PTR(err);
++}
++
++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
++ struct kstat *stat, const char *link)
++{
++ int err;
++ struct dentry *newdentry;
++ struct inode *dir = upperdir->d_inode;
++
++ newdentry = ovl_lookup_create(upperdir, dentry);
++ if (IS_ERR(newdentry))
++ goto out;
++
++ switch (stat->mode & S_IFMT) {
++ case S_IFREG:
++ err = vfs_create(dir, newdentry, stat->mode, NULL);
++ break;
++
++ case S_IFDIR:
++ err = vfs_mkdir(dir, newdentry, stat->mode);
++ break;
++
++ case S_IFCHR:
++ case S_IFBLK:
++ case S_IFIFO:
++ case S_IFSOCK:
++ err = vfs_mknod(dir, newdentry, stat->mode, stat->rdev);
++ break;
++
++ case S_IFLNK:
++ err = vfs_symlink(dir, newdentry, link);
++ break;
++
++ default:
++ err = -EPERM;
++ }
++ if (err) {
++ if (ovl_dentry_is_opaque(dentry))
++ ovl_whiteout(upperdir, dentry);
++ dput(newdentry);
++ newdentry = ERR_PTR(err);
++ } else if (WARN_ON(!newdentry->d_inode)) {
++ /*
++ * Not quite sure if non-instantiated dentry is legal or not.
++ * VFS doesn't seem to care so check and warn here.
++ */
++ dput(newdentry);
++ newdentry = ERR_PTR(-ENOENT);
++ }
++
++out:
++ return newdentry;
++
++}
++
++static int ovl_set_opaque(struct dentry *upperdentry)
++{
++ int err;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++
++ /* CAP_SYS_ADMIN for setxattr of "trusted" namespace */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ old_cred = override_creds(override_cred);
++ err = vfs_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++ return err;
++}
++
++static int ovl_remove_opaque(struct dentry *upperdentry)
++{
++ int err;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++
++ /* CAP_SYS_ADMIN for removexattr of "trusted" namespace */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ old_cred = override_creds(override_cred);
++ err = vfs_removexattr(upperdentry, ovl_opaque_xattr);
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++ return err;
++}
++
++static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
++ struct kstat *stat)
++{
++ int err;
++ enum ovl_path_type type;
++ struct path realpath;
++
++ type = ovl_path_real(dentry, &realpath);
++ err = vfs_getattr(realpath.mnt, realpath.dentry, stat);
++ if (err)
++ return err;
++
++ stat->dev = dentry->d_sb->s_dev;
++ stat->ino = dentry->d_inode->i_ino;
++
++ /*
++ * It's probably not worth it to count subdirs to get the
++ * correct link count. nlink=1 seems to pacify 'find' and
++ * other utilities.
++ */
++ if (type == OVL_PATH_MERGE)
++ stat->nlink = 1;
++
++ return 0;
++}
++
++static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
++ const char *link)
++{
++ int err;
++ struct dentry *newdentry;
++ struct dentry *upperdir;
++ struct inode *inode;
++ struct kstat stat = {
++ .mode = mode,
++ .rdev = rdev,
++ };
++
++ err = -ENOMEM;
++ inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata);
++ if (!inode)
++ goto out;
++
++ err = ovl_copy_up(dentry->d_parent);
++ if (err)
++ goto out_iput;
++
++ upperdir = ovl_dentry_upper(dentry->d_parent);
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++
++ newdentry = ovl_upper_create(upperdir, dentry, &stat, link);
++ err = PTR_ERR(newdentry);
++ if (IS_ERR(newdentry))
++ goto out_unlock;
++
++ ovl_dentry_version_inc(dentry->d_parent);
++ if (ovl_dentry_is_opaque(dentry) && S_ISDIR(mode)) {
++ err = ovl_set_opaque(newdentry);
++ if (err) {
++ vfs_rmdir(upperdir->d_inode, newdentry);
++ ovl_whiteout(upperdir, dentry);
++ goto out_dput;
++ }
++ }
++ ovl_dentry_update(dentry, newdentry);
++ d_instantiate(dentry, inode);
++ inode = NULL;
++ newdentry = NULL;
++ err = 0;
++
++out_dput:
++ dput(newdentry);
++out_unlock:
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++out_iput:
++ iput(inode);
++out:
++ return err;
++}
++
++static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++ struct nameidata *nd)
++{
++ return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
++}
++
++static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
++{
++ return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
++}
++
++static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++ dev_t rdev)
++{
++ return ovl_create_object(dentry, mode, rdev, NULL);
++}
++
++static int ovl_symlink(struct inode *dir, struct dentry *dentry,
++ const char *link)
++{
++ return ovl_create_object(dentry, S_IFLNK, 0, link);
++}
++
++static int ovl_do_remove(struct dentry *dentry, bool is_dir)
++{
++ int err;
++ enum ovl_path_type type;
++ struct path realpath;
++ struct dentry *upperdir;
++
++ err = ovl_copy_up(dentry->d_parent);
++ if (err)
++ return err;
++
++ upperdir = ovl_dentry_upper(dentry->d_parent);
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++ type = ovl_path_real(dentry, &realpath);
++ if (type != OVL_PATH_LOWER) {
++ err = -ESTALE;
++ if (realpath.dentry->d_parent != upperdir)
++ goto out_d_drop;
++
++ /* FIXME: create whiteout up front and rename to target */
++
++ if (is_dir)
++ err = vfs_rmdir(upperdir->d_inode, realpath.dentry);
++ else
++ err = vfs_unlink(upperdir->d_inode, realpath.dentry);
++ if (err)
++ goto out_d_drop;
++
++ ovl_dentry_version_inc(dentry->d_parent);
++ }
++
++ if (type != OVL_PATH_UPPER || ovl_dentry_is_opaque(dentry))
++ err = ovl_whiteout(upperdir, dentry);
++
++ /*
++ * Keeping this dentry hashed would mean having to release
++ * upperpath/lowerpath, which could only be done if we are the
++ * sole user of this dentry. Too tricky... Just unhash for
++ * now.
++ */
++out_d_drop:
++ d_drop(dentry);
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++
++ return err;
++}
++
++static int ovl_unlink(struct inode *dir, struct dentry *dentry)
++{
++ return ovl_do_remove(dentry, false);
++}
++
++
++static int ovl_rmdir(struct inode *dir, struct dentry *dentry)
++{
++ int err;
++ enum ovl_path_type type;
++
++ type = ovl_path_type(dentry);
++ if (type != OVL_PATH_UPPER) {
++ err = ovl_check_empty_and_clear(dentry, type);
++ if (err)
++ return err;
++ }
++
++ return ovl_do_remove(dentry, true);
++}
++
++static int ovl_link(struct dentry *old, struct inode *newdir,
++ struct dentry *new)
++{
++ int err;
++ struct dentry *olddentry;
++ struct dentry *newdentry;
++ struct dentry *upperdir;
++
++ err = ovl_copy_up(old);
++ if (err)
++ goto out;
++
++ err = ovl_copy_up(new->d_parent);
++ if (err)
++ goto out;
++
++ upperdir = ovl_dentry_upper(new->d_parent);
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++ newdentry = ovl_lookup_create(upperdir, new);
++ err = PTR_ERR(newdentry);
++ if (IS_ERR(newdentry))
++ goto out_unlock;
++
++ olddentry = ovl_dentry_upper(old);
++ err = vfs_link(olddentry, upperdir->d_inode, newdentry);
++ if (!err) {
++ if (WARN_ON(!newdentry->d_inode)) {
++ dput(newdentry);
++ err = -ENOENT;
++ goto out_unlock;
++ }
++
++ ovl_dentry_version_inc(new->d_parent);
++ ovl_dentry_update(new, newdentry);
++
++ ihold(old->d_inode);
++ d_instantiate(new, old->d_inode);
++ } else {
++ if (ovl_dentry_is_opaque(new))
++ ovl_whiteout(upperdir, new);
++ dput(newdentry);
++ }
++out_unlock:
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++out:
++ return err;
++
++}
++
++static int ovl_rename(struct inode *olddir, struct dentry *old,
++ struct inode *newdir, struct dentry *new)
++{
++ int err;
++ enum ovl_path_type old_type;
++ enum ovl_path_type new_type;
++ struct dentry *old_upperdir;
++ struct dentry *new_upperdir;
++ struct dentry *olddentry;
++ struct dentry *newdentry;
++ struct dentry *trap;
++ bool old_opaque;
++ bool new_opaque;
++ bool new_create = false;
++ bool is_dir = S_ISDIR(old->d_inode->i_mode);
++
++ /* Don't copy up directory trees */
++ old_type = ovl_path_type(old);
++ if (old_type != OVL_PATH_UPPER && is_dir)
++ return -EXDEV;
++
++ if (new->d_inode) {
++ new_type = ovl_path_type(new);
++
++ if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
++ if (ovl_dentry_lower(old)->d_inode ==
++ ovl_dentry_lower(new)->d_inode)
++ return 0;
++ }
++ if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
++ if (ovl_dentry_upper(old)->d_inode ==
++ ovl_dentry_upper(new)->d_inode)
++ return 0;
++ }
++
++ if (new_type != OVL_PATH_UPPER &&
++ S_ISDIR(new->d_inode->i_mode)) {
++ err = ovl_check_empty_and_clear(new, new_type);
++ if (err)
++ return err;
++ }
++ } else {
++ new_type = OVL_PATH_UPPER;
++ }
++
++ err = ovl_copy_up(old);
++ if (err)
++ return err;
++
++ err = ovl_copy_up(new->d_parent);
++ if (err)
++ return err;
++
++ old_upperdir = ovl_dentry_upper(old->d_parent);
++ new_upperdir = ovl_dentry_upper(new->d_parent);
++
++ trap = lock_rename(new_upperdir, old_upperdir);
++
++ olddentry = ovl_dentry_upper(old);
++ newdentry = ovl_dentry_upper(new);
++ if (newdentry) {
++ dget(newdentry);
++ } else {
++ new_create = true;
++ newdentry = ovl_lookup_create(new_upperdir, new);
++ err = PTR_ERR(newdentry);
++ if (IS_ERR(newdentry))
++ goto out_unlock;
++ }
++
++ err = -ESTALE;
++ if (olddentry->d_parent != old_upperdir)
++ goto out_dput;
++ if (newdentry->d_parent != new_upperdir)
++ goto out_dput;
++ if (olddentry == trap)
++ goto out_dput;
++ if (newdentry == trap)
++ goto out_dput;
++
++ old_opaque = ovl_dentry_is_opaque(old);
++ new_opaque = ovl_dentry_is_opaque(new) || new_type != OVL_PATH_UPPER;
++
++ if (is_dir && !old_opaque && new_opaque) {
++ err = ovl_set_opaque(olddentry);
++ if (err)
++ goto out_dput;
++ }
++
++ err = vfs_rename(old_upperdir->d_inode, olddentry,
++ new_upperdir->d_inode, newdentry);
++
++ if (err) {
++ if (new_create && ovl_dentry_is_opaque(new))
++ ovl_whiteout(new_upperdir, new);
++ if (is_dir && !old_opaque && new_opaque)
++ ovl_remove_opaque(olddentry);
++ goto out_dput;
++ }
++
++ if (old_type != OVL_PATH_UPPER || old_opaque)
++ err = ovl_whiteout(old_upperdir, old);
++ if (is_dir && old_opaque && !new_opaque)
++ ovl_remove_opaque(olddentry);
++
++ if (old_opaque != new_opaque)
++ ovl_dentry_set_opaque(old, new_opaque);
++
++ ovl_dentry_version_inc(old->d_parent);
++ ovl_dentry_version_inc(new->d_parent);
++
++out_dput:
++ dput(newdentry);
++out_unlock:
++ unlock_rename(new_upperdir, old_upperdir);
++ return err;
++}
++
++const struct inode_operations ovl_dir_inode_operations = {
++ .lookup = ovl_lookup,
++ .mkdir = ovl_mkdir,
++ .symlink = ovl_symlink,
++ .unlink = ovl_unlink,
++ .rmdir = ovl_rmdir,
++ .rename = ovl_rename,
++ .link = ovl_link,
++ .setattr = ovl_setattr,
++ .create = ovl_create,
++ .mknod = ovl_mknod,
++ .permission = ovl_permission,
++ .getattr = ovl_dir_getattr,
++ .setxattr = ovl_setxattr,
++ .getxattr = ovl_getxattr,
++ .listxattr = ovl_listxattr,
++ .removexattr = ovl_removexattr,
++};
+--- /dev/null
++++ b/fs/overlayfs/inode.c
+@@ -0,0 +1,384 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/xattr.h>
++#include "overlayfs.h"
++
++int ovl_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ struct dentry *upperdentry;
++ int err;
++
++ if ((attr->ia_valid & ATTR_SIZE) && !ovl_dentry_upper(dentry))
++ err = ovl_copy_up_truncate(dentry, attr->ia_size);
++ else
++ err = ovl_copy_up(dentry);
++ if (err)
++ return err;
++
++ upperdentry = ovl_dentry_upper(dentry);
++
++ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++ attr->ia_valid &= ~ATTR_MODE;
++
++ mutex_lock(&upperdentry->d_inode->i_mutex);
++ err = notify_change(upperdentry, attr);
++ mutex_unlock(&upperdentry->d_inode->i_mutex);
++
++ return err;
++}
++
++static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry,
++ struct kstat *stat)
++{
++ struct path realpath;
++
++ ovl_path_real(dentry, &realpath);
++ return vfs_getattr(realpath.mnt, realpath.dentry, stat);
++}
++
++int ovl_permission(struct inode *inode, int mask)
++{
++ struct ovl_entry *oe;
++ struct dentry *alias = NULL;
++ struct inode *realinode;
++ struct dentry *realdentry;
++ bool is_upper;
++ int err;
++
++ if (S_ISDIR(inode->i_mode)) {
++ oe = inode->i_private;
++ } else if (mask & MAY_NOT_BLOCK) {
++ return -ECHILD;
++ } else {
++ /*
++ * For non-directories find an alias and get the info
++ * from there.
++ */
++ spin_lock(&inode->i_lock);
++ if (WARN_ON(list_empty(&inode->i_dentry))) {
++ spin_unlock(&inode->i_lock);
++ return -ENOENT;
++ }
++ alias = list_entry(inode->i_dentry.next,
++ struct dentry, d_alias);
++ dget(alias);
++ spin_unlock(&inode->i_lock);
++ oe = alias->d_fsdata;
++ }
++
++ realdentry = ovl_entry_real(oe, &is_upper);
++
++ /* Careful in RCU walk mode */
++ realinode = ACCESS_ONCE(realdentry->d_inode);
++ if (!realinode) {
++ WARN_ON(!(mask & MAY_NOT_BLOCK));
++ err = -ENOENT;
++ goto out_dput;
++ }
++
++ if (mask & MAY_WRITE) {
++ umode_t mode = realinode->i_mode;
++
++ /*
++ * Writes will always be redirected to upper layer, so
++ * ignore lower layer being read-only.
++ *
++ * If the overlay itself is read-only then proceed
++ * with the permission check, don't return EROFS.
++ * This will only happen if this is the lower layer of
++ * another overlayfs.
++ *
++ * If upper fs becomes read-only after the overlay was
++ * constructed return EROFS to prevent modification of
++ * upper layer.
++ */
++ err = -EROFS;
++ if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
++ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
++ goto out_dput;
++
++ /*
++ * Nobody gets write access to an immutable file.
++ */
++ err = -EACCES;
++ if (IS_IMMUTABLE(realinode))
++ goto out_dput;
++ }
++
++ if (realinode->i_op->permission)
++ err = realinode->i_op->permission(realinode, mask);
++ else
++ err = generic_permission(realinode, mask);
++out_dput:
++ dput(alias);
++ return err;
++}
++
++
++struct ovl_link_data {
++ struct dentry *realdentry;
++ void *cookie;
++};
++
++static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ void *ret;
++ struct dentry *realdentry;
++ struct inode *realinode;
++
++ realdentry = ovl_dentry_real(dentry);
++ realinode = realdentry->d_inode;
++
++ if (WARN_ON(!realinode->i_op->follow_link))
++ return ERR_PTR(-EPERM);
++
++ ret = realinode->i_op->follow_link(realdentry, nd);
++ if (IS_ERR(ret))
++ return ret;
++
++ if (realinode->i_op->put_link) {
++ struct ovl_link_data *data;
++
++ data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL);
++ if (!data) {
++ realinode->i_op->put_link(realdentry, nd, ret);
++ return ERR_PTR(-ENOMEM);
++ }
++ data->realdentry = realdentry;
++ data->cookie = ret;
++
++ return data;
++ } else {
++ return NULL;
++ }
++}
++
++static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
++{
++ struct inode *realinode;
++ struct ovl_link_data *data = c;
++
++ if (!data)
++ return;
++
++ realinode = data->realdentry->d_inode;
++ realinode->i_op->put_link(data->realdentry, nd, data->cookie);
++ kfree(data);
++}
++
++static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
++{
++ struct path realpath;
++ struct inode *realinode;
++
++ ovl_path_real(dentry, &realpath);
++ realinode = realpath.dentry->d_inode;
++
++ if (!realinode->i_op->readlink)
++ return -EINVAL;
++
++ touch_atime(realpath.mnt, realpath.dentry);
++
++ return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
++}
++
++
++static bool ovl_is_private_xattr(const char *name)
++{
++ return strncmp(name, "trusted.overlay.", 14) == 0;
++}
++
++int ovl_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ int err;
++ struct dentry *upperdentry;
++
++ if (ovl_is_private_xattr(name))
++ return -EPERM;
++
++ err = ovl_copy_up(dentry);
++ if (err)
++ return err;
++
++ upperdentry = ovl_dentry_upper(dentry);
++ return vfs_setxattr(upperdentry, name, value, size, flags);
++}
++
++ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
++ void *value, size_t size)
++{
++ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
++ ovl_is_private_xattr(name))
++ return -ENODATA;
++
++ return vfs_getxattr(ovl_dentry_real(dentry), name, value, size);
++}
++
++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
++{
++ ssize_t res;
++ int off;
++
++ res = vfs_listxattr(ovl_dentry_real(dentry), list, size);
++ if (res <= 0 || size == 0)
++ return res;
++
++ if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE)
++ return res;
++
++ /* filter out private xattrs */
++ for (off = 0; off < res;) {
++ char *s = list + off;
++ size_t slen = strlen(s) + 1;
++
++ BUG_ON(off + slen > res);
++
++ if (ovl_is_private_xattr(s)) {
++ res -= slen;
++ memmove(s, s + slen, res - off);
++ } else {
++ off += slen;
++ }
++ }
++
++ return res;
++}
++
++int ovl_removexattr(struct dentry *dentry, const char *name)
++{
++ int err;
++ struct path realpath;
++ enum ovl_path_type type;
++
++ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE &&
++ ovl_is_private_xattr(name))
++ return -ENODATA;
++
++ type = ovl_path_real(dentry, &realpath);
++ if (type == OVL_PATH_LOWER) {
++ err = vfs_getxattr(realpath.dentry, name, NULL, 0);
++ if (err < 0)
++ return err;
++
++ err = ovl_copy_up(dentry);
++ if (err)
++ return err;
++
++ ovl_path_upper(dentry, &realpath);
++ }
++
++ return vfs_removexattr(realpath.dentry, name);
++}
++
++static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
++ struct dentry *realdentry)
++{
++ if (type != OVL_PATH_LOWER)
++ return false;
++
++ if (special_file(realdentry->d_inode->i_mode))
++ return false;
++
++ if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC))
++ return false;
++
++ return true;
++}
++
++static struct file *ovl_open(struct dentry *dentry, struct file *file,
++ const struct cred *cred)
++{
++ int err;
++ struct path realpath;
++ enum ovl_path_type type;
++
++ type = ovl_path_real(dentry, &realpath);
++ if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) {
++ if (file->f_flags & O_TRUNC)
++ err = ovl_copy_up_truncate(dentry, 0);
++ else
++ err = ovl_copy_up(dentry);
++ if (err)
++ return ERR_PTR(err);
++
++ ovl_path_upper(dentry, &realpath);
++ }
++
++ return vfs_open(&realpath, file, cred);
++}
++
++static const struct inode_operations ovl_file_inode_operations = {
++ .setattr = ovl_setattr,
++ .permission = ovl_permission,
++ .getattr = ovl_getattr,
++ .setxattr = ovl_setxattr,
++ .getxattr = ovl_getxattr,
++ .listxattr = ovl_listxattr,
++ .removexattr = ovl_removexattr,
++ .open = ovl_open,
++};
++
++static const struct inode_operations ovl_symlink_inode_operations = {
++ .setattr = ovl_setattr,
++ .follow_link = ovl_follow_link,
++ .put_link = ovl_put_link,
++ .readlink = ovl_readlink,
++ .getattr = ovl_getattr,
++ .setxattr = ovl_setxattr,
++ .getxattr = ovl_getxattr,
++ .listxattr = ovl_listxattr,
++ .removexattr = ovl_removexattr,
++};
++
++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
++ struct ovl_entry *oe)
++{
++ struct inode *inode;
++
++ inode = new_inode(sb);
++ if (!inode)
++ return NULL;
++
++ mode &= S_IFMT;
++
++ inode->i_ino = get_next_ino();
++ inode->i_mode = mode;
++ inode->i_flags |= S_NOATIME | S_NOCMTIME;
++
++ switch (mode) {
++ case S_IFDIR:
++ inode->i_private = oe;
++ inode->i_op = &ovl_dir_inode_operations;
++ inode->i_fop = &ovl_dir_operations;
++ break;
++
++ case S_IFLNK:
++ inode->i_op = &ovl_symlink_inode_operations;
++ break;
++
++ case S_IFREG:
++ case S_IFSOCK:
++ case S_IFBLK:
++ case S_IFCHR:
++ case S_IFIFO:
++ inode->i_op = &ovl_file_inode_operations;
++ break;
++
++ default:
++ WARN(1, "illegal file type: %i\n", mode);
++ inode = NULL;
++ }
++
++ return inode;
++
++}
+--- /dev/null
++++ b/fs/overlayfs/overlayfs.h
+@@ -0,0 +1,64 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++struct ovl_entry;
++
++enum ovl_path_type {
++ OVL_PATH_UPPER,
++ OVL_PATH_MERGE,
++ OVL_PATH_LOWER,
++};
++
++extern const char *ovl_opaque_xattr;
++extern const char *ovl_whiteout_xattr;
++extern const struct dentry_operations ovl_dentry_operations;
++
++enum ovl_path_type ovl_path_type(struct dentry *dentry);
++u64 ovl_dentry_version_get(struct dentry *dentry);
++void ovl_dentry_version_inc(struct dentry *dentry);
++void ovl_path_upper(struct dentry *dentry, struct path *path);
++void ovl_path_lower(struct dentry *dentry, struct path *path);
++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
++struct dentry *ovl_dentry_upper(struct dentry *dentry);
++struct dentry *ovl_dentry_lower(struct dentry *dentry);
++struct dentry *ovl_dentry_real(struct dentry *dentry);
++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper);
++bool ovl_dentry_is_opaque(struct dentry *dentry);
++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
++bool ovl_is_whiteout(struct dentry *dentry);
++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *nd);
++struct file *ovl_path_open(struct path *path, int flags);
++
++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
++ struct kstat *stat, const char *link);
++
++/* readdir.c */
++extern const struct file_operations ovl_dir_operations;
++int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type);
++
++/* inode.c */
++int ovl_setattr(struct dentry *dentry, struct iattr *attr);
++int ovl_permission(struct inode *inode, int mask);
++int ovl_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags);
++ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
++ void *value, size_t size);
++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
++int ovl_removexattr(struct dentry *dentry, const char *name);
++
++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
++ struct ovl_entry *oe);
++/* dir.c */
++extern const struct inode_operations ovl_dir_inode_operations;
++
++/* copy_up.c */
++int ovl_copy_up(struct dentry *dentry);
++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size);
+--- /dev/null
++++ b/fs/overlayfs/readdir.c
+@@ -0,0 +1,565 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/namei.h>
++#include <linux/file.h>
++#include <linux/xattr.h>
++#include <linux/rbtree.h>
++#include <linux/security.h>
++#include "overlayfs.h"
++
++struct ovl_cache_entry {
++ const char *name;
++ unsigned int len;
++ unsigned int type;
++ u64 ino;
++ bool is_whiteout;
++ struct list_head l_node;
++ struct rb_node node;
++};
++
++struct ovl_readdir_data {
++ struct rb_root *root;
++ struct list_head *list;
++ struct list_head *middle;
++ struct dentry *dir;
++ int count;
++ int err;
++};
++
++struct ovl_dir_file {
++ bool is_real;
++ bool is_cached;
++ struct list_head cursor;
++ u64 cache_version;
++ struct list_head cache;
++ struct file *realfile;
++};
++
++static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
++{
++ return container_of(n, struct ovl_cache_entry, node);
++}
++
++static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
++ const char *name, int len)
++{
++ struct rb_node *node = root->rb_node;
++ int cmp;
++
++ while (node) {
++ struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
++
++ cmp = strncmp(name, p->name, len);
++ if (cmp > 0)
++ node = p->node.rb_right;
++ else if (cmp < 0 || len < p->len)
++ node = p->node.rb_left;
++ else
++ return p;
++ }
++
++ return NULL;
++}
++
++static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
++ u64 ino, unsigned int d_type)
++{
++ struct ovl_cache_entry *p;
++
++ p = kmalloc(sizeof(*p) + len + 1, GFP_KERNEL);
++ if (p) {
++ char *name_copy = (char *) (p + 1);
++ memcpy(name_copy, name, len);
++ name_copy[len] = '\0';
++ p->name = name_copy;
++ p->len = len;
++ p->type = d_type;
++ p->ino = ino;
++ p->is_whiteout = false;
++ }
++
++ return p;
++}
++
++static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
++ const char *name, int len, u64 ino,
++ unsigned int d_type)
++{
++ struct rb_node **newp = &rdd->root->rb_node;
++ struct rb_node *parent = NULL;
++ struct ovl_cache_entry *p;
++
++ while (*newp) {
++ int cmp;
++ struct ovl_cache_entry *tmp;
++
++ parent = *newp;
++ tmp = ovl_cache_entry_from_node(*newp);
++ cmp = strncmp(name, tmp->name, len);
++ if (cmp > 0)
++ newp = &tmp->node.rb_right;
++ else if (cmp < 0 || len < tmp->len)
++ newp = &tmp->node.rb_left;
++ else
++ return 0;
++ }
++
++ p = ovl_cache_entry_new(name, len, ino, d_type);
++ if (p == NULL)
++ return -ENOMEM;
++
++ list_add_tail(&p->l_node, rdd->list);
++ rb_link_node(&p->node, parent, newp);
++ rb_insert_color(&p->node, rdd->root);
++
++ return 0;
++}
++
++static int ovl_fill_lower(void *buf, const char *name, int namelen,
++ loff_t offset, u64 ino, unsigned int d_type)
++{
++ struct ovl_readdir_data *rdd = buf;
++ struct ovl_cache_entry *p;
++
++ rdd->count++;
++ p = ovl_cache_entry_find(rdd->root, name, namelen);
++ if (p) {
++ list_move_tail(&p->l_node, rdd->middle);
++ } else {
++ p = ovl_cache_entry_new(name, namelen, ino, d_type);
++ if (p == NULL)
++ rdd->err = -ENOMEM;
++ else
++ list_add_tail(&p->l_node, rdd->middle);
++ }
++
++ return rdd->err;
++}
++
++static void ovl_cache_free(struct list_head *list)
++{
++ struct ovl_cache_entry *p;
++ struct ovl_cache_entry *n;
++
++ list_for_each_entry_safe(p, n, list, l_node)
++ kfree(p);
++
++ INIT_LIST_HEAD(list);
++}
++
++static int ovl_fill_upper(void *buf, const char *name, int namelen,
++ loff_t offset, u64 ino, unsigned int d_type)
++{
++ struct ovl_readdir_data *rdd = buf;
++
++ rdd->count++;
++ return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
++}
++
++static inline int ovl_dir_read(struct path *realpath,
++ struct ovl_readdir_data *rdd, filldir_t filler)
++{
++ struct file *realfile;
++ int err;
++
++ realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
++ if (IS_ERR(realfile))
++ return PTR_ERR(realfile);
++
++ do {
++ rdd->count = 0;
++ rdd->err = 0;
++ err = vfs_readdir(realfile, filler, rdd);
++ if (err >= 0)
++ err = rdd->err;
++ } while (!err && rdd->count);
++ fput(realfile);
++
++ return 0;
++}
++
++static void ovl_dir_reset(struct file *file)
++{
++ struct ovl_dir_file *od = file->private_data;
++ enum ovl_path_type type = ovl_path_type(file->f_path.dentry);
++
++ if (ovl_dentry_version_get(file->f_path.dentry) != od->cache_version) {
++ list_del_init(&od->cursor);
++ ovl_cache_free(&od->cache);
++ od->is_cached = false;
++ }
++ WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
++ if (od->is_real && type == OVL_PATH_MERGE) {
++ fput(od->realfile);
++ od->realfile = NULL;
++ od->is_real = false;
++ }
++}
++
++static int ovl_dir_mark_whiteouts(struct ovl_readdir_data *rdd)
++{
++ struct ovl_cache_entry *p;
++ struct dentry *dentry;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ override_cred = prepare_creds();
++ if (!override_cred) {
++ ovl_cache_free(rdd->list);
++ return -ENOMEM;
++ }
++
++ /*
++ * CAP_SYS_ADMIN for getxattr
++ * CAP_DAC_OVERRIDE for lookup
++ */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++ old_cred = override_creds(override_cred);
++
++ mutex_lock(&rdd->dir->d_inode->i_mutex);
++ list_for_each_entry(p, rdd->list, l_node) {
++ if (p->type != DT_LNK)
++ continue;
++
++ dentry = lookup_one_len(p->name, rdd->dir, p->len);
++ if (IS_ERR(dentry))
++ continue;
++
++ p->is_whiteout = ovl_is_whiteout(dentry);
++ dput(dentry);
++ }
++ mutex_unlock(&rdd->dir->d_inode->i_mutex);
++
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++ return 0;
++}
++
++static inline int ovl_dir_read_merged(struct path *upperpath,
++ struct path *lowerpath,
++ struct ovl_readdir_data *rdd)
++{
++ int err;
++ struct rb_root root = RB_ROOT;
++ struct list_head middle;
++
++ rdd->root = &root;
++ if (upperpath->dentry) {
++ rdd->dir = upperpath->dentry;
++ err = ovl_dir_read(upperpath, rdd, ovl_fill_upper);
++ if (err)
++ goto out;
++
++ err = ovl_dir_mark_whiteouts(rdd);
++ if (err)
++ goto out;
++ }
++ /*
++ * Insert lowerpath entries before upperpath ones, this allows
++ * offsets to be reasonably constant
++ */
++ list_add(&middle, rdd->list);
++ rdd->middle = &middle;
++ err = ovl_dir_read(lowerpath, rdd, ovl_fill_lower);
++ list_del(&middle);
++out:
++ rdd->root = NULL;
++
++ return err;
++}
++
++static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
++{
++ struct list_head *l;
++ loff_t off;
++
++ l = od->cache.next;
++ for (off = 0; off < pos; off++) {
++ if (l == &od->cache)
++ break;
++ l = l->next;
++ }
++ list_move_tail(&od->cursor, l);
++}
++
++static int ovl_readdir(struct file *file, void *buf, filldir_t filler)
++{
++ struct ovl_dir_file *od = file->private_data;
++ int res;
++
++ if (!file->f_pos)
++ ovl_dir_reset(file);
++
++ if (od->is_real) {
++ res = vfs_readdir(od->realfile, filler, buf);
++ file->f_pos = od->realfile->f_pos;
++
++ return res;
++ }
++
++ if (!od->is_cached) {
++ struct path lowerpath;
++ struct path upperpath;
++ struct ovl_readdir_data rdd = { .list = &od->cache };
++
++ ovl_path_lower(file->f_path.dentry, &lowerpath);
++ ovl_path_upper(file->f_path.dentry, &upperpath);
++
++ res = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd);
++ if (res) {
++ ovl_cache_free(rdd.list);
++ return res;
++ }
++
++ od->cache_version = ovl_dentry_version_get(file->f_path.dentry);
++ od->is_cached = true;
++
++ ovl_seek_cursor(od, file->f_pos);
++ }
++
++ while (od->cursor.next != &od->cache) {
++ int over;
++ loff_t off;
++ struct ovl_cache_entry *p;
++
++ p = list_entry(od->cursor.next, struct ovl_cache_entry, l_node);
++ off = file->f_pos;
++ if (!p->is_whiteout) {
++ over = filler(buf, p->name, p->len, off, p->ino,
++ p->type);
++ if (over)
++ break;
++ }
++ file->f_pos++;
++ list_move(&od->cursor, &p->l_node);
++ }
++
++ return 0;
++}
++
++static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ loff_t res;
++ struct ovl_dir_file *od = file->private_data;
++
++ mutex_lock(&file->f_dentry->d_inode->i_mutex);
++ if (!file->f_pos)
++ ovl_dir_reset(file);
++
++ if (od->is_real) {
++ res = vfs_llseek(od->realfile, offset, origin);
++ file->f_pos = od->realfile->f_pos;
++ } else {
++ res = -EINVAL;
++
++ switch (origin) {
++ case SEEK_CUR:
++ offset += file->f_pos;
++ break;
++ case SEEK_SET:
++ break;
++ default:
++ goto out_unlock;
++ }
++ if (offset < 0)
++ goto out_unlock;
++
++ if (offset != file->f_pos) {
++ file->f_pos = offset;
++ if (od->is_cached)
++ ovl_seek_cursor(od, offset);
++ }
++ res = offset;
++ }
++out_unlock:
++ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++
++ return res;
++}
++
++static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
++ int datasync)
++{
++ struct ovl_dir_file *od = file->private_data;
++
++ /* May need to reopen directory if it got copied up */
++ if (!od->realfile) {
++ struct path upperpath;
++
++ ovl_path_upper(file->f_path.dentry, &upperpath);
++ od->realfile = ovl_path_open(&upperpath, O_RDONLY);
++ if (IS_ERR(od->realfile))
++ return PTR_ERR(od->realfile);
++ }
++
++ return vfs_fsync_range(od->realfile, start, end, datasync);
++}
++
++static int ovl_dir_release(struct inode *inode, struct file *file)
++{
++ struct ovl_dir_file *od = file->private_data;
++
++ list_del(&od->cursor);
++ ovl_cache_free(&od->cache);
++ if (od->realfile)
++ fput(od->realfile);
++ kfree(od);
++
++ return 0;
++}
++
++static int ovl_dir_open(struct inode *inode, struct file *file)
++{
++ struct path realpath;
++ struct file *realfile;
++ struct ovl_dir_file *od;
++ enum ovl_path_type type;
++
++ od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
++ if (!od)
++ return -ENOMEM;
++
++ type = ovl_path_real(file->f_path.dentry, &realpath);
++ realfile = ovl_path_open(&realpath, file->f_flags);
++ if (IS_ERR(realfile)) {
++ kfree(od);
++ return PTR_ERR(realfile);
++ }
++ INIT_LIST_HEAD(&od->cache);
++ INIT_LIST_HEAD(&od->cursor);
++ od->is_cached = false;
++ od->realfile = realfile;
++ od->is_real = (type != OVL_PATH_MERGE);
++ file->private_data = od;
++
++ return 0;
++}
++
++const struct file_operations ovl_dir_operations = {
++ .read = generic_read_dir,
++ .open = ovl_dir_open,
++ .readdir = ovl_readdir,
++ .llseek = ovl_dir_llseek,
++ .fsync = ovl_dir_fsync,
++ .release = ovl_dir_release,
++};
++
++static int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
++{
++ int err;
++ struct path lowerpath;
++ struct path upperpath;
++ struct ovl_cache_entry *p;
++ struct ovl_readdir_data rdd = { .list = list };
++
++ ovl_path_upper(dentry, &upperpath);
++ ovl_path_lower(dentry, &lowerpath);
++
++ err = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd);
++ if (err)
++ return err;
++
++ err = 0;
++
++ list_for_each_entry(p, list, l_node) {
++ if (p->is_whiteout)
++ continue;
++
++ if (p->name[0] == '.') {
++ if (p->len == 1)
++ continue;
++ if (p->len == 2 && p->name[1] == '.')
++ continue;
++ }
++ err = -ENOTEMPTY;
++ break;
++ }
++
++ return err;
++}
++
++static int ovl_remove_whiteouts(struct dentry *dir, struct list_head *list)
++{
++ struct path upperpath;
++ struct dentry *upperdir;
++ struct ovl_cache_entry *p;
++ const struct cred *old_cred;
++ struct cred *override_cred;
++ int err;
++
++ ovl_path_upper(dir, &upperpath);
++ upperdir = upperpath.dentry;
++
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++
++ /*
++ * CAP_DAC_OVERRIDE for lookup and unlink
++ * CAP_SYS_ADMIN for setxattr of "trusted" namespace
++ * CAP_FOWNER for unlink in sticky directory
++ */
++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ cap_raise(override_cred->cap_effective, CAP_FOWNER);
++ old_cred = override_creds(override_cred);
++
++ err = vfs_setxattr(upperdir, ovl_opaque_xattr, "y", 1, 0);
++ if (err)
++ goto out_revert_creds;
++
++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT);
++ list_for_each_entry(p, list, l_node) {
++ struct dentry *dentry;
++ int ret;
++
++ if (!p->is_whiteout)
++ continue;
++
++ dentry = lookup_one_len(p->name, upperdir, p->len);
++ if (IS_ERR(dentry)) {
++ printk(KERN_WARNING
++ "overlayfs: failed to lookup whiteout %.*s: %li\n",
++ p->len, p->name, PTR_ERR(dentry));
++ continue;
++ }
++ ret = vfs_unlink(upperdir->d_inode, dentry);
++ dput(dentry);
++ if (ret)
++ printk(KERN_WARNING
++ "overlayfs: failed to unlink whiteout %.*s: %i\n",
++ p->len, p->name, ret);
++ }
++ mutex_unlock(&upperdir->d_inode->i_mutex);
++
++out_revert_creds:
++ revert_creds(old_cred);
++ put_cred(override_cred);
++
++ return err;
++}
++
++int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type)
++{
++ int err;
++ LIST_HEAD(list);
++
++ err = ovl_check_empty_dir(dentry, &list);
++ if (!err && type == OVL_PATH_MERGE)
++ err = ovl_remove_whiteouts(dentry, &list);
++
++ ovl_cache_free(&list);
++
++ return err;
++}
+--- /dev/null
++++ b/fs/overlayfs/super.c
+@@ -0,0 +1,664 @@
++/*
++ *
++ * Copyright (C) 2011 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/xattr.h>
++#include <linux/security.h>
++#include <linux/mount.h>
++#include <linux/slab.h>
++#include <linux/parser.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include "overlayfs.h"
++
++MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
++MODULE_DESCRIPTION("Overlay filesystem");
++MODULE_LICENSE("GPL");
++
++struct ovl_config {
++ char *lowerdir;
++ char *upperdir;
++};
++
++/* private information held for overlayfs's superblock */
++struct ovl_fs {
++ struct vfsmount *upper_mnt;
++ struct vfsmount *lower_mnt;
++ /* pathnames of lower and upper dirs, for show_options */
++ struct ovl_config config;
++};
++
++/* private information held for every overlayfs dentry */
++struct ovl_entry {
++ /*
++ * Keep "double reference" on upper dentries, so that
++ * d_delete() doesn't think it's OK to reset d_inode to NULL.
++ */
++ struct dentry *__upperdentry;
++ struct dentry *lowerdentry;
++ union {
++ struct {
++ u64 version;
++ bool opaque;
++ };
++ struct rcu_head rcu;
++ };
++};
++
++const char *ovl_whiteout_xattr = "trusted.overlay.whiteout";
++const char *ovl_opaque_xattr = "trusted.overlay.opaque";
++
++
++enum ovl_path_type ovl_path_type(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ if (oe->__upperdentry) {
++ if (oe->lowerdentry && S_ISDIR(dentry->d_inode->i_mode))
++ return OVL_PATH_MERGE;
++ else
++ return OVL_PATH_UPPER;
++ } else {
++ return OVL_PATH_LOWER;
++ }
++}
++
++static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
++{
++ struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
++ smp_read_barrier_depends();
++ return upperdentry;
++}
++
++void ovl_path_upper(struct dentry *dentry, struct path *path)
++{
++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ path->mnt = ofs->upper_mnt;
++ path->dentry = ovl_upperdentry_dereference(oe);
++}
++
++void ovl_path_lower(struct dentry *dentry, struct path *path)
++{
++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ path->mnt = ofs->lower_mnt;
++ path->dentry = oe->lowerdentry;
++}
++
++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
++{
++
++ enum ovl_path_type type = ovl_path_type(dentry);
++
++ if (type == OVL_PATH_LOWER)
++ ovl_path_lower(dentry, path);
++ else
++ ovl_path_upper(dentry, path);
++
++ return type;
++}
++
++struct dentry *ovl_dentry_upper(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ return ovl_upperdentry_dereference(oe);
++}
++
++struct dentry *ovl_dentry_lower(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ return oe->lowerdentry;
++}
++
++struct dentry *ovl_dentry_real(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++ struct dentry *realdentry;
++
++ realdentry = ovl_upperdentry_dereference(oe);
++ if (!realdentry)
++ realdentry = oe->lowerdentry;
++
++ return realdentry;
++}
++
++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
++{
++ struct dentry *realdentry;
++
++ realdentry = ovl_upperdentry_dereference(oe);
++ if (realdentry) {
++ *is_upper = true;
++ } else {
++ realdentry = oe->lowerdentry;
++ *is_upper = false;
++ }
++ return realdentry;
++}
++
++bool ovl_dentry_is_opaque(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++ return oe->opaque;
++}
++
++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++ oe->opaque = opaque;
++}
++
++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex));
++ WARN_ON(oe->__upperdentry);
++ BUG_ON(!upperdentry->d_inode);
++ smp_wmb();
++ oe->__upperdentry = dget(upperdentry);
++}
++
++void ovl_dentry_version_inc(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
++ oe->version++;
++}
++
++u64 ovl_dentry_version_get(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
++ return oe->version;
++}
++
++bool ovl_is_whiteout(struct dentry *dentry)
++{
++ int res;
++ char val;
++
++ if (!dentry)
++ return false;
++ if (!dentry->d_inode)
++ return false;
++ if (!S_ISLNK(dentry->d_inode->i_mode))
++ return false;
++
++ res = vfs_getxattr(dentry, ovl_whiteout_xattr, &val, 1);
++ if (res == 1 && val == 'y')
++ return true;
++
++ return false;
++}
++
++static bool ovl_is_opaquedir(struct dentry *dentry)
++{
++ int res;
++ char val;
++
++ if (!S_ISDIR(dentry->d_inode->i_mode))
++ return false;
++
++ res = vfs_getxattr(dentry, ovl_opaque_xattr, &val, 1);
++ if (res == 1 && val == 'y')
++ return true;
++
++ return false;
++}
++
++static void ovl_entry_free(struct rcu_head *head)
++{
++ struct ovl_entry *oe = container_of(head, struct ovl_entry, rcu);
++ kfree(oe);
++}
++
++static void ovl_dentry_release(struct dentry *dentry)
++{
++ struct ovl_entry *oe = dentry->d_fsdata;
++
++ if (oe) {
++ dput(oe->__upperdentry);
++ dput(oe->__upperdentry);
++ dput(oe->lowerdentry);
++ call_rcu(&oe->rcu, ovl_entry_free);
++ }
++}
++
++const struct dentry_operations ovl_dentry_operations = {
++ .d_release = ovl_dentry_release,
++};
++
++static struct ovl_entry *ovl_alloc_entry(void)
++{
++ return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
++}
++
++static inline struct dentry *ovl_lookup_real(struct dentry *dir,
++ struct qstr *name)
++{
++ struct dentry *dentry;
++
++ mutex_lock(&dir->d_inode->i_mutex);
++ dentry = lookup_one_len(name->name, dir, name->len);
++ mutex_unlock(&dir->d_inode->i_mutex);
++
++ if (IS_ERR(dentry)) {
++ if (PTR_ERR(dentry) == -ENOENT)
++ dentry = NULL;
++ } else if (!dentry->d_inode) {
++ dput(dentry);
++ dentry = NULL;
++ }
++ return dentry;
++}
++
++static int ovl_do_lookup(struct dentry *dentry)
++{
++ struct ovl_entry *oe;
++ struct dentry *upperdir;
++ struct dentry *lowerdir;
++ struct dentry *upperdentry = NULL;
++ struct dentry *lowerdentry = NULL;
++ struct inode *inode = NULL;
++ int err;
++
++ err = -ENOMEM;
++ oe = ovl_alloc_entry();
++ if (!oe)
++ goto out;
++
++ upperdir = ovl_dentry_upper(dentry->d_parent);
++ lowerdir = ovl_dentry_lower(dentry->d_parent);
++
++ if (upperdir) {
++ upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
++ err = PTR_ERR(upperdentry);
++ if (IS_ERR(upperdentry))
++ goto out_put_dir;
++
++ if (lowerdir && upperdentry &&
++ (S_ISLNK(upperdentry->d_inode->i_mode) ||
++ S_ISDIR(upperdentry->d_inode->i_mode))) {
++ const struct cred *old_cred;
++ struct cred *override_cred;
++
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out_dput_upper;
++
++ /* CAP_SYS_ADMIN needed for getxattr */
++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
++ old_cred = override_creds(override_cred);
++
++ if (ovl_is_opaquedir(upperdentry)) {
++ oe->opaque = true;
++ } else if (ovl_is_whiteout(upperdentry)) {
++ dput(upperdentry);
++ upperdentry = NULL;
++ oe->opaque = true;
++ }
++ revert_creds(old_cred);
++ put_cred(override_cred);
++ }
++ }
++ if (lowerdir && !oe->opaque) {
++ lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
++ err = PTR_ERR(lowerdentry);
++ if (IS_ERR(lowerdentry))
++ goto out_dput_upper;
++ }
++
++ if (lowerdentry && upperdentry &&
++ (!S_ISDIR(upperdentry->d_inode->i_mode) ||
++ !S_ISDIR(lowerdentry->d_inode->i_mode))) {
++ dput(lowerdentry);
++ lowerdentry = NULL;
++ oe->opaque = true;
++ }
++
++ if (lowerdentry || upperdentry) {
++ struct dentry *realdentry;
++
++ realdentry = upperdentry ? upperdentry : lowerdentry;
++ err = -ENOMEM;
++ inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
++ oe);
++ if (!inode)
++ goto out_dput;
++ }
++
++ if (upperdentry)
++ oe->__upperdentry = dget(upperdentry);
++
++ if (lowerdentry)
++ oe->lowerdentry = lowerdentry;
++
++ dentry->d_fsdata = oe;
++ dentry->d_op = &ovl_dentry_operations;
++ d_add(dentry, inode);
++
++ return 0;
++
++out_dput:
++ dput(lowerdentry);
++out_dput_upper:
++ dput(upperdentry);
++out_put_dir:
++ kfree(oe);
++out:
++ return err;
++}
++
++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *nd)
++{
++ int err = ovl_do_lookup(dentry);
++
++ if (err)
++ return ERR_PTR(err);
++
++ return NULL;
++}
++
++struct file *ovl_path_open(struct path *path, int flags)
++{
++ path_get(path);
++ return dentry_open(path->dentry, path->mnt, flags, current_cred());
++}
++
++static void ovl_put_super(struct super_block *sb)
++{
++ struct ovl_fs *ufs = sb->s_fs_info;
++
++ if (!(sb->s_flags & MS_RDONLY))
++ mnt_drop_write(ufs->upper_mnt);
++
++ mntput(ufs->upper_mnt);
++ mntput(ufs->lower_mnt);
++
++ kfree(ufs->config.lowerdir);
++ kfree(ufs->config.upperdir);
++ kfree(ufs);
++}
++
++static int ovl_remount_fs(struct super_block *sb, int *flagsp, char *data)
++{
++ int flags = *flagsp;
++ struct ovl_fs *ufs = sb->s_fs_info;
++
++ /* When remounting rw or ro, we need to adjust the write access to the
++ * upper fs.
++ */
++ if (((flags ^ sb->s_flags) & MS_RDONLY) == 0)
++ /* No change to readonly status */
++ return 0;
++
++ if (flags & MS_RDONLY) {
++ mnt_drop_write(ufs->upper_mnt);
++ return 0;
++ } else
++ return mnt_want_write(ufs->upper_mnt);
++}
++
++/**
++ * ovl_statfs
++ * @sb: The overlayfs super block
++ * @buf: The struct kstatfs to fill in with stats
++ *
++ * Get the filesystem statistics. As writes always target the upper layer
++ * filesystem pass the statfs to the same filesystem.
++ */
++static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++ struct dentry *root_dentry = dentry->d_sb->s_root;
++ struct path path;
++ ovl_path_upper(root_dentry, &path);
++
++ if (!path.dentry->d_sb->s_op->statfs)
++ return -ENOSYS;
++ return path.dentry->d_sb->s_op->statfs(path.dentry, buf);
++}
++
++/**
++ * ovl_show_options
++ *
++ * Prints the mount options for a given superblock.
++ * Returns zero; does not fail.
++ */
++static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
++{
++ struct super_block *sb = dentry->d_sb;
++ struct ovl_fs *ufs = sb->s_fs_info;
++
++ seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
++ seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
++ return 0;
++}
++
++static const struct super_operations ovl_super_operations = {
++ .put_super = ovl_put_super,
++ .remount_fs = ovl_remount_fs,
++ .statfs = ovl_statfs,
++ .show_options = ovl_show_options,
++};
++
++enum {
++ Opt_lowerdir,
++ Opt_upperdir,
++ Opt_err,
++};
++
++static const match_table_t ovl_tokens = {
++ {Opt_lowerdir, "lowerdir=%s"},
++ {Opt_upperdir, "upperdir=%s"},
++ {Opt_err, NULL}
++};
++
++static int ovl_parse_opt(char *opt, struct ovl_config *config)
++{
++ char *p;
++
++ config->upperdir = NULL;
++ config->lowerdir = NULL;
++
++ while ((p = strsep(&opt, ",")) != NULL) {
++ int token;
++ substring_t args[MAX_OPT_ARGS];
++
++ if (!*p)
++ continue;
++
++ token = match_token(p, ovl_tokens, args);
++ switch (token) {
++ case Opt_upperdir:
++ kfree(config->upperdir);
++ config->upperdir = match_strdup(&args[0]);
++ if (!config->upperdir)
++ return -ENOMEM;
++ break;
++
++ case Opt_lowerdir:
++ kfree(config->lowerdir);
++ config->lowerdir = match_strdup(&args[0]);
++ if (!config->lowerdir)
++ return -ENOMEM;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++static int ovl_fill_super(struct super_block *sb, void *data, int silent)
++{
++ struct path lowerpath;
++ struct path upperpath;
++ struct inode *root_inode;
++ struct dentry *root_dentry;
++ struct ovl_entry *oe;
++ struct ovl_fs *ufs;
++ int err;
++
++ err = -ENOMEM;
++ ufs = kmalloc(sizeof(struct ovl_fs), GFP_KERNEL);
++ if (!ufs)
++ goto out;
++
++ err = ovl_parse_opt((char *) data, &ufs->config);
++ if (err)
++ goto out_free_ufs;
++
++ err = -EINVAL;
++ if (!ufs->config.upperdir || !ufs->config.lowerdir) {
++ printk(KERN_ERR "overlayfs: missing upperdir or lowerdir\n");
++ goto out_free_config;
++ }
++
++ oe = ovl_alloc_entry();
++ if (oe == NULL)
++ goto out_free_config;
++
++ root_inode = ovl_new_inode(sb, S_IFDIR, oe);
++ if (!root_inode)
++ goto out_free_oe;
++
++ err = kern_path(ufs->config.upperdir, LOOKUP_FOLLOW, &upperpath);
++ if (err)
++ goto out_put_root;
++
++ err = kern_path(ufs->config.lowerdir, LOOKUP_FOLLOW, &lowerpath);
++ if (err)
++ goto out_put_upperpath;
++
++ err = -ENOTDIR;
++ if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
++ !S_ISDIR(lowerpath.dentry->d_inode->i_mode))
++ goto out_put_lowerpath;
++
++ sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
++ lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
++
++ err = -EINVAL;
++ if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++ printk(KERN_ERR "overlayfs: maximum fs stacking depth exceeded\n");
++ goto out_put_lowerpath;
++ }
++
++
++ ufs->upper_mnt = clone_private_mount(&upperpath);
++ err = PTR_ERR(ufs->upper_mnt);
++ if (IS_ERR(ufs->upper_mnt)) {
++ printk(KERN_ERR "overlayfs: failed to clone upperpath\n");
++ goto out_put_lowerpath;
++ }
++
++ ufs->lower_mnt = clone_private_mount(&lowerpath);
++ err = PTR_ERR(ufs->lower_mnt);
++ if (IS_ERR(ufs->lower_mnt)) {
++ printk(KERN_ERR "overlayfs: failed to clone lowerpath\n");
++ goto out_put_upper_mnt;
++ }
++
++ /*
++ * Make lower_mnt R/O. That way fchmod/fchown on lower file
++ * will fail instead of modifying lower fs.
++ */
++ ufs->lower_mnt->mnt_flags |= MNT_READONLY;
++
++ /* If the upper fs is r/o, we mark overlayfs r/o too */
++ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
++ sb->s_flags |= MS_RDONLY;
++
++ if (!(sb->s_flags & MS_RDONLY)) {
++ err = mnt_want_write(ufs->upper_mnt);
++ if (err)
++ goto out_put_lower_mnt;
++ }
++
++ err = -ENOMEM;
++ root_dentry = d_alloc_root(root_inode);
++ if (!root_dentry)
++ goto out_drop_write;
++
++ mntput(upperpath.mnt);
++ mntput(lowerpath.mnt);
++
++ oe->__upperdentry = dget(upperpath.dentry);
++ oe->lowerdentry = lowerpath.dentry;
++
++ root_dentry->d_fsdata = oe;
++ root_dentry->d_op = &ovl_dentry_operations;
++
++ sb->s_op = &ovl_super_operations;
++ sb->s_root = root_dentry;
++ sb->s_fs_info = ufs;
++
++ return 0;
++
++out_drop_write:
++ if (!(sb->s_flags & MS_RDONLY))
++ mnt_drop_write(ufs->upper_mnt);
++out_put_lower_mnt:
++ mntput(ufs->lower_mnt);
++out_put_upper_mnt:
++ mntput(ufs->upper_mnt);
++out_put_lowerpath:
++ path_put(&lowerpath);
++out_put_upperpath:
++ path_put(&upperpath);
++out_put_root:
++ iput(root_inode);
++out_free_oe:
++ kfree(oe);
++out_free_config:
++ kfree(ufs->config.lowerdir);
++ kfree(ufs->config.upperdir);
++out_free_ufs:
++ kfree(ufs);
++out:
++ return err;
++}
++
++static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags,
++ const char *dev_name, void *raw_data)
++{
++ return mount_nodev(fs_type, flags, raw_data, ovl_fill_super);
++}
++
++static struct file_system_type ovl_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "overlayfs",
++ .mount = ovl_mount,
++ .kill_sb = kill_anon_super,
++};
++
++static int __init ovl_init(void)
++{
++ return register_filesystem(&ovl_fs_type);
++}
++
++static void __exit ovl_exit(void)
++{
++ unregister_filesystem(&ovl_fs_type);
++}
++
++module_init(ovl_init);
++module_exit(ovl_exit);
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1302,6 +1302,7 @@ long do_splice_direct(struct file *in, l
+
+ return ret;
+ }
++EXPORT_SYMBOL(do_splice_direct);
+
+ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
+ struct pipe_inode_info *opipe,
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -484,6 +484,12 @@ struct iattr {
+ */
+ #include <linux/quota.h>
+
++/*
++ * Maximum number of layers of fs stack. Needs to be limited to
++ * prevent kernel stack overflow
++ */
++#define FILESYSTEM_MAX_STACK_DEPTH 2
++
+ /**
+ * enum positive_aop_returns - aop return codes with specific semantics
+ *
+@@ -1501,6 +1507,11 @@ struct super_block {
+
+ /* Being remounted read-only */
+ int s_readonly_remount;
++
++ /*
++ * Indicates how deep in a filesystem stack this SB is
++ */
++ int s_stack_depth;
+ };
+
+ /* superblock cache pruning functions */
+@@ -1658,6 +1669,8 @@ struct inode_operations {
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
+ u64 len);
++ struct file *(*open) (struct dentry *, struct file *,
++ const struct cred *);
+ } ____cacheline_aligned;
+
+ struct seq_file;
+@@ -2028,6 +2041,7 @@ extern long do_sys_open(int dfd, const c
+ extern struct file *filp_open(const char *, int, umode_t);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+ const char *, int);
++extern struct file *vfs_open(struct path *, struct file *, const struct cred *);
+ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
+ const struct cred *);
+ extern int filp_close(struct file *, fl_owner_t id);
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -66,6 +66,9 @@ extern void mnt_pin(struct vfsmount *mnt
+ extern void mnt_unpin(struct vfsmount *mnt);
+ extern int __mnt_is_readonly(struct vfsmount *mnt);
+
++struct path;
++extern struct vfsmount *clone_private_mount(struct path *path);
++
+ struct file_system_type;
+ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
+ int flags, const char *name,
diff --git a/target/linux/generic/patches-3.3/102-ehci_hcd_ignore_oc.patch b/target/linux/generic/patches-3.3/102-ehci_hcd_ignore_oc.patch
new file mode 100644
index 000000000..400d2ed8e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/102-ehci_hcd_ignore_oc.patch
@@ -0,0 +1,41 @@
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -795,7 +795,7 @@ static int ehci_run (struct usb_hcd *hcd
+ "USB %x.%x started, EHCI %x.%02x%s\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
+ temp >> 8, temp & 0xff,
+- ignore_oc ? ", overcurrent ignored" : "");
++ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
+
+ ehci_writel(ehci, INTR_MASK,
+ &ehci->regs->intr_enable); /* Turn On Interrupts */
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -578,7 +578,7 @@ ehci_hub_status_data (struct usb_hcd *hc
+ * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+ * PORT_POWER; that's surprising, but maybe within-spec.
+ */
+- if (!ignore_oc)
++ if (!ignore_oc && !ehci->ignore_oc)
+ mask = PORT_CSC | PORT_PEC | PORT_OCC;
+ else
+ mask = PORT_CSC | PORT_PEC;
+@@ -803,7 +803,7 @@ static int ehci_hub_control (
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+- if ((temp & PORT_OCC) && !ignore_oc){
++ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /*
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -147,6 +147,7 @@ struct ehci_hcd { /* one per controlle
+ unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
+ unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
++ unsigned ignore_oc:1;
+
+ /* required for usb32 quirk */
+ #define OHCI_CTRL_HCFS (3 << 6)
diff --git a/target/linux/generic/patches-3.3/110-fix_mtd_include.patch b/target/linux/generic/patches-3.3/110-fix_mtd_include.patch
new file mode 100644
index 000000000..c63dbc059
--- /dev/null
+++ b/target/linux/generic/patches-3.3/110-fix_mtd_include.patch
@@ -0,0 +1,10 @@
+--- a/include/linux/mtd/physmap.h
++++ b/include/linux/mtd/physmap.h
+@@ -17,6 +17,7 @@
+
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/platform_device.h>
+
+ struct map_info;
+ struct platform_device;
diff --git a/target/linux/generic/patches-3.3/130-pppoatm-queue-depth.patch b/target/linux/generic/patches-3.3/130-pppoatm-queue-depth.patch
new file mode 100644
index 000000000..247a31229
--- /dev/null
+++ b/target/linux/generic/patches-3.3/130-pppoatm-queue-depth.patch
@@ -0,0 +1,188 @@
+From 9d02daf754238adac48fa075ee79e7edd3d79ed3 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw2@infradead.org>
+Date: Sun, 8 Apr 2012 09:55:43 +0000
+Subject: [PATCH] pppoatm: Fix excessive queue bloat
+
+We discovered that PPPoATM has an excessively deep transmit queue. A
+queue the size of the default socket send buffer (wmem_default) is
+maintained between the PPP generic core and the ATM device.
+
+Fix it to queue a maximum of *two* packets. The one the ATM device is
+currently working on, and one more for the ATM driver to process
+immediately in its TX done interrupt handler. The PPP core is designed
+to feed packets to the channel with minimal latency, so that really
+ought to be enough to keep the ATM device busy.
+
+While we're at it, fix the fact that we were triggering the wakeup
+tasklet on *every* pppoatm_pop() call. The comment saying "this is
+inefficient, but doing it right is too hard" turns out to be overly
+pessimistic... I think :)
+
+On machines like the Traverse Geos, with a slow Geode CPU and two
+high-speed ADSL2+ interfaces, there were reports of extremely high CPU
+usage which could partly be attributed to the extra wakeups.
+
+(The wakeup handling could actually be made a whole lot easier if we
+ stop checking sk->sk_sndbuf altogether. Given that we now only queue
+ *two* packets ever, one wonders what the point is. As it is, you could
+ already deadlock the thing by setting the sk_sndbuf to a value lower
+ than the MTU of the device, and it'd just block for ever.)
+
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/atm/pppoatm.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 files changed, 85 insertions(+), 10 deletions(-)
+
+--- a/net/atm/pppoatm.c
++++ b/net/atm/pppoatm.c
+@@ -62,12 +62,25 @@ struct pppoatm_vcc {
+ void (*old_pop)(struct atm_vcc *, struct sk_buff *);
+ /* keep old push/pop for detaching */
+ enum pppoatm_encaps encaps;
++ atomic_t inflight;
++ unsigned long blocked;
+ int flags; /* SC_COMP_PROT - compress protocol */
+ struct ppp_channel chan; /* interface to generic ppp layer */
+ struct tasklet_struct wakeup_tasklet;
+ };
+
+ /*
++ * We want to allow two packets in the queue. The one that's currently in
++ * flight, and *one* queued up ready for the ATM device to send immediately
++ * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
++ * inflight == -2 represents an empty queue, -1 one packet, and zero means
++ * there are two packets in the queue.
++ */
++#define NONE_INFLIGHT -2
++
++#define BLOCKED 0
++
++/*
+ * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
+ * ID (0xC021) used in autodetection
+ */
+@@ -102,16 +115,30 @@ static void pppoatm_wakeup_sender(unsign
+ static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
+ {
+ struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
++
+ pvcc->old_pop(atmvcc, skb);
++ atomic_dec(&pvcc->inflight);
++
+ /*
+- * We don't really always want to do this since it's
+- * really inefficient - it would be much better if we could
+- * test if we had actually throttled the generic layer.
+- * Unfortunately then there would be a nasty SMP race where
+- * we could clear that flag just as we refuse another packet.
+- * For now we do the safe thing.
++ * We always used to run the wakeup tasklet unconditionally here, for
++ * fear of race conditions where we clear the BLOCKED flag just as we
++ * refuse another packet in pppoatm_send(). This was quite inefficient.
++ *
++ * In fact it's OK. The PPP core will only ever call pppoatm_send()
++ * while holding the channel->downl lock. And ppp_output_wakeup() as
++ * called by the tasklet will *also* grab that lock. So even if another
++ * CPU is in pppoatm_send() right now, the tasklet isn't going to race
++ * with it. The wakeup *will* happen after the other CPU is safely out
++ * of pppoatm_send() again.
++ *
++ * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
++ * it about to return, that's fine. We trigger a wakeup which will
++ * happen later. And if the CPU in pppoatm_send() *hasn't* set the
++ * BLOCKED bit yet, that's fine too because of the double check in
++ * pppoatm_may_send() which is commented there.
+ */
+- tasklet_schedule(&pvcc->wakeup_tasklet);
++ if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
++ tasklet_schedule(&pvcc->wakeup_tasklet);
+ }
+
+ /*
+@@ -184,6 +211,51 @@ error:
+ ppp_input_error(&pvcc->chan, 0);
+ }
+
++static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
++{
++ /*
++ * It's not clear that we need to bother with using atm_may_send()
++ * to check we don't exceed sk->sk_sndbuf. If userspace sets a
++ * value of sk_sndbuf which is lower than the MTU, we're going to
++ * block for ever. But the code always did that before we introduced
++ * the packet count limit, so...
++ */
++ if (atm_may_send(pvcc->atmvcc, size) &&
++ atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
++ return 1;
++
++ /*
++ * We use test_and_set_bit() rather than set_bit() here because
++ * we need to ensure there's a memory barrier after it. The bit
++ * *must* be set before we do the atomic_inc() on pvcc->inflight.
++ * There's no smp_mb__after_set_bit(), so it's this or abuse
++ * smp_mb__after_clear_bit().
++ */
++ test_and_set_bit(BLOCKED, &pvcc->blocked);
++
++ /*
++ * We may have raced with pppoatm_pop(). If it ran for the
++ * last packet in the queue, *just* before we set the BLOCKED
++ * bit, then it might never run again and the channel could
++ * remain permanently blocked. Cope with that race by checking
++ * *again*. If it did run in that window, we'll have space on
++ * the queue now and can return success. It's harmless to leave
++ * the BLOCKED flag set, since it's only used as a trigger to
++ * run the wakeup tasklet. Another wakeup will never hurt.
++ * If pppoatm_pop() is running but hasn't got as far as making
++ * space on the queue yet, then it hasn't checked the BLOCKED
++ * flag yet either, so we're safe in that case too. It'll issue
++ * an "immediate" wakeup... where "immediate" actually involves
++ * taking the PPP channel's ->downl lock, which is held by the
++ * code path that calls pppoatm_send(), and is thus going to
++ * wait for us to finish.
++ */
++ if (atm_may_send(pvcc->atmvcc, size) &&
++ atomic_inc_not_zero(&pvcc->inflight))
++ return 1;
++
++ return 0;
++}
+ /*
+ * Called by the ppp_generic.c to send a packet - returns true if packet
+ * was accepted. If we return false, then it's our job to call
+@@ -207,7 +279,7 @@ static int pppoatm_send(struct ppp_chann
+ struct sk_buff *n;
+ n = skb_realloc_headroom(skb, LLC_LEN);
+ if (n != NULL &&
+- !atm_may_send(pvcc->atmvcc, n->truesize)) {
++ !pppoatm_may_send(pvcc, n->truesize)) {
+ kfree_skb(n);
+ goto nospace;
+ }
+@@ -215,12 +287,12 @@ static int pppoatm_send(struct ppp_chann
+ skb = n;
+ if (skb == NULL)
+ return DROP_PACKET;
+- } else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
++ } else if (!pppoatm_may_send(pvcc, skb->truesize))
+ goto nospace;
+ memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
+ break;
+ case e_vc:
+- if (!atm_may_send(pvcc->atmvcc, skb->truesize))
++ if (!pppoatm_may_send(pvcc, skb->truesize))
+ goto nospace;
+ break;
+ case e_autodetect:
+@@ -285,6 +357,9 @@ static int pppoatm_assign_vcc(struct atm
+ if (pvcc == NULL)
+ return -ENOMEM;
+ pvcc->atmvcc = atmvcc;
++
++ /* Maximum is zero, so that we can use atomic_inc_not_zero() */
++ atomic_set(&pvcc->inflight, NONE_INFLIGHT);
+ pvcc->old_push = atmvcc->push;
+ pvcc->old_pop = atmvcc->pop;
+ pvcc->encaps = (enum pppoatm_encaps) be.encaps;
diff --git a/target/linux/generic/patches-3.3/140-ixp4xx_hss_module_h_include.patch b/target/linux/generic/patches-3.3/140-ixp4xx_hss_module_h_include.patch
new file mode 100644
index 000000000..94acf3780
--- /dev/null
+++ b/target/linux/generic/patches-3.3/140-ixp4xx_hss_module_h_include.patch
@@ -0,0 +1,39 @@
+From 32e857cd1fbb006f56a99a2eab998173b1576533 Mon Sep 17 00:00:00 2001
+From: Florian Fainelli <florian@openwrt.org>
+Date: Mon, 10 Sep 2012 10:18:57 +0200
+Subject: [PATCH net] ixp4xx_hss: fix build failure after logging conversion
+
+Commit c75bb2c6f0cf455c23e60f14d780e841dd47f801 (ixp4xx_hss: Update to
+current logging forms) converted the ixp4xx_hss module to use the current
+logging macros, but forgot to include linux/module.h, leading to the
+following build failures:
+
+ CC [M] drivers/net/wan/ixp4xx_hss.o
+ drivers/net/wan/ixp4xx_hss.c:1412:20: error: expected ';', ',' or ')'
+ before string constant
+ drivers/net/wan/ixp4xx_hss.c:1413:25: error: expected ';', ',' or ')'
+ before string constant
+ drivers/net/wan/ixp4xx_hss.c:1414:21: error: expected ';', ',' or ')'
+ before string constant
+ drivers/net/wan/ixp4xx_hss.c:1415:19: error: expected ';', ',' or ')'
+ before string constant
+ make[8]: *** [drivers/net/wan/ixp4xx_hss.o] Error 1
+
+CC: stable@vger.kernel.org
+Signed-off-by: Florian Fainelli <florian@openwrt.org>
+---
+[stable: 3.1+]
+
+ drivers/net/wan/ixp4xx_hss.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wan/ixp4xx_hss.c
++++ b/drivers/net/wan/ixp4xx_hss.c
+@@ -10,6 +10,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/module.h>
+ #include <linux/bitops.h>
+ #include <linux/cdev.h>
+ #include <linux/dma-mapping.h>
diff --git a/target/linux/generic/patches-3.3/200-fix_localversion.patch b/target/linux/generic/patches-3.3/200-fix_localversion.patch
new file mode 100644
index 000000000..0d1bae8d4
--- /dev/null
+++ b/target/linux/generic/patches-3.3/200-fix_localversion.patch
@@ -0,0 +1,11 @@
+--- a/scripts/setlocalversion
++++ b/scripts/setlocalversion
+@@ -168,7 +168,7 @@ else
+ # annotated or signed tagged state (as git describe only
+ # looks at signed or annotated tags - git tag -a/-s) and
+ # LOCALVERSION= is not specified
+- if test "${LOCALVERSION+set}" != "set"; then
++ if test "${CONFIG_LOCALVERSION+set}" != "set"; then
+ scm=$(scm_version --short)
+ res="$res${scm:++}"
+ fi
diff --git a/target/linux/generic/patches-3.3/201-extra_optimization.patch b/target/linux/generic/patches-3.3/201-extra_optimization.patch
new file mode 100644
index 000000000..6fe8b0938
--- /dev/null
+++ b/target/linux/generic/patches-3.3/201-extra_optimization.patch
@@ -0,0 +1,24 @@
+--- a/Makefile
++++ b/Makefile
+@@ -559,9 +559,9 @@ endif # $(dot-config)
+ all: vmlinux
+
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+-KBUILD_CFLAGS += -Os
++KBUILD_CFLAGS += -Os -fno-caller-saves
+ else
+-KBUILD_CFLAGS += -O2
++KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch -fno-caller-saves
+ endif
+
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+@@ -620,6 +620,9 @@ endif
+ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
+ CHECKFLAGS += $(NOSTDINC_FLAGS)
+
++# improve gcc optimization
++CFLAGS += $(call cc-option,-funit-at-a-time,)
++
+ # warn about C99 declaration after statement
+ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
+
diff --git a/target/linux/generic/patches-3.3/202-reduce_module_size.patch b/target/linux/generic/patches-3.3/202-reduce_module_size.patch
new file mode 100644
index 000000000..bca1e356a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/202-reduce_module_size.patch
@@ -0,0 +1,11 @@
+--- a/Makefile
++++ b/Makefile
+@@ -374,7 +374,7 @@ KBUILD_CFLAGS_KERNEL :=
+ KBUILD_AFLAGS := -D__ASSEMBLY__
+ KBUILD_AFLAGS_MODULE := -DMODULE
+ KBUILD_CFLAGS_MODULE := -DMODULE
+-KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
++KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
+
+ # Read KERNELRELEASE from include/config/kernel.release (if it exists)
+ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
diff --git a/target/linux/generic/patches-3.3/210-darwin_scripts_include.patch b/target/linux/generic/patches-3.3/210-darwin_scripts_include.patch
new file mode 100644
index 000000000..72d344ecd
--- /dev/null
+++ b/target/linux/generic/patches-3.3/210-darwin_scripts_include.patch
@@ -0,0 +1,78 @@
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -22,6 +22,35 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <ctype.h>
++#ifdef __APPLE__
++/* Darwin has no memmem implementation, this one is ripped of the uClibc-0.9.28 source */
++void *memmem (const void *haystack, size_t haystack_len,
++ const void *needle, size_t needle_len)
++{
++ const char *begin;
++ const char *const last_possible
++ = (const char *) haystack + haystack_len - needle_len;
++
++ if (needle_len == 0)
++ /* The first occurrence of the empty string is deemed to occur at
++ the beginning of the string. */
++ return (void *) haystack;
++
++ /* Sanity check, otherwise the loop might search through the whole
++ memory. */
++ if (__builtin_expect (haystack_len < needle_len, 0))
++ return NULL;
++
++ for (begin = (const char *) haystack; begin <= last_possible; ++begin)
++ if (begin[0] == ((const char *) needle)[0] &&
++ !memcmp ((const void *) &begin[1],
++ (const void *) ((const char *) needle + 1),
++ needle_len - 1))
++ return (void *) begin;
++
++ return NULL;
++}
++#endif
+
+ #ifndef ARRAY_SIZE
+ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -123,6 +123,9 @@ check-lxdialog := $(srctree)/$(src)/lxd
+ # we really need to do so. (Do not call gcc as part of make mrproper)
+ HOST_EXTRACFLAGS += $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags) \
+ -DLOCALE
++ifeq ($(shell uname -s),Darwin)
++HOST_LOADLIBES += -lncurses
++endif
+
+ # ===========================================================================
+ # Shared Makefile for the various kconfig executables:
+--- a/scripts/mod/mk_elfconfig.c
++++ b/scripts/mod/mk_elfconfig.c
+@@ -1,7 +1,11 @@
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
++#ifndef __APPLE__
+ #include <elf.h>
++#else
++#include "../../../../../tools/sstrip/include/elf.h"
++#endif
+
+ int
+ main(int argc, char **argv)
+--- a/scripts/mod/modpost.h
++++ b/scripts/mod/modpost.h
+@@ -7,7 +7,11 @@
+ #include <sys/mman.h>
+ #include <fcntl.h>
+ #include <unistd.h>
++#if !(defined(__APPLE__) || defined(__CYGWIN__))
+ #include <elf.h>
++#else
++#include "../../../../../tools/sstrip/include/elf.h"
++#endif
+
+ #include "elfconfig.h"
+
diff --git a/target/linux/generic/patches-3.3/211-stddef_include.patch b/target/linux/generic/patches-3.3/211-stddef_include.patch
new file mode 100644
index 000000000..7fe248d8d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/211-stddef_include.patch
@@ -0,0 +1,17 @@
+--- a/include/linux/stddef.h
++++ b/include/linux/stddef.h
+@@ -16,6 +16,7 @@ enum {
+ false = 0,
+ true = 1
+ };
++#endif /* __KERNEL__ */
+
+ #undef offsetof
+ #ifdef __compiler_offsetof
+@@ -23,6 +24,5 @@ enum {
+ #else
+ #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+ #endif
+-#endif /* __KERNEL__ */
+
+ #endif
diff --git a/target/linux/generic/patches-3.3/212-x86_reloc_portability.patch b/target/linux/generic/patches-3.3/212-x86_reloc_portability.patch
new file mode 100644
index 000000000..d0c8106a6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/212-x86_reloc_portability.patch
@@ -0,0 +1,22 @@
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -10,7 +10,18 @@
+ #define USE_BSD
+ #include <endian.h>
+ #include <regex.h>
+-#include <tools/le_byteshift.h>
++
++static inline void __put_unaligned_le16(uint16_t val, uint8_t *p)
++{
++ *p++ = val;
++ *p++ = val >> 8;
++}
++
++static inline void put_unaligned_le32(uint32_t val, uint8_t *p)
++{
++ __put_unaligned_le16(val >> 16, p + 2);
++ __put_unaligned_le16(val, p);
++}
+
+ static void die(char *fmt, ...);
+
diff --git a/target/linux/generic/patches-3.3/220-module_exports.patch b/target/linux/generic/patches-3.3/220-module_exports.patch
new file mode 100644
index 000000000..be6b6ff9d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/220-module_exports.patch
@@ -0,0 +1,89 @@
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -52,6 +52,27 @@
+ #define LOAD_OFFSET 0
+ #endif
+
++#ifndef SYMTAB_KEEP_STR
++#define SYMTAB_KEEP_STR *(__ksymtab_strings+*)
++#define SYMTAB_DISCARD_STR
++#else
++#define SYMTAB_DISCARD_STR *(__ksymtab_strings+*)
++#endif
++
++#ifndef SYMTAB_KEEP
++#define SYMTAB_KEEP *(SORT(___ksymtab+*))
++#define SYMTAB_DISCARD
++#else
++#define SYMTAB_DISCARD *(SORT(___ksymtab+*))
++#endif
++
++#ifndef SYMTAB_KEEP_GPL
++#define SYMTAB_KEEP_GPL *(SORT(___ksymtab_gpl+*))
++#define SYMTAB_DISCARD_GPL
++#else
++#define SYMTAB_DISCARD_GPL *(SORT(___ksymtab_gpl+*))
++#endif
++
+ #ifndef SYMBOL_PREFIX
+ #define VMLINUX_SYMBOL(sym) sym
+ #else
+@@ -275,14 +296,14 @@
+ /* Kernel symbol table: Normal symbols */ \
+ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab) = .; \
+- *(SORT(___ksymtab+*)) \
++ SYMTAB_KEEP \
+ VMLINUX_SYMBOL(__stop___ksymtab) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
+- *(SORT(___ksymtab_gpl+*)) \
++ SYMTAB_KEEP_GPL \
+ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
+ } \
+ \
+@@ -344,7 +365,7 @@
+ \
+ /* Kernel symbol table: strings */ \
+ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
+- *(__ksymtab_strings) \
++ SYMTAB_KEEP_STR \
+ } \
+ \
+ /* __*init sections */ \
+@@ -676,6 +697,9 @@
+ EXIT_TEXT \
+ EXIT_DATA \
+ EXIT_CALL \
++ SYMTAB_DISCARD \
++ SYMTAB_DISCARD_GPL \
++ SYMTAB_DISCARD_STR \
+ *(.discard) \
+ *(.discard.*) \
+ }
+--- a/include/linux/export.h
++++ b/include/linux/export.h
+@@ -45,12 +45,19 @@ extern struct module __this_module;
+ #define __CRC_SYMBOL(sym, sec)
+ #endif
+
++#ifdef MODULE
++#define __EXPORT_SUFFIX(sym)
++#else
++#define __EXPORT_SUFFIX(sym) "+" #sym
++#endif
++
+ /* For every exported symbol, place a struct in the __ksymtab section */
+ #define __EXPORT_SYMBOL(sym, sec) \
+ extern typeof(sym) sym; \
+ __CRC_SYMBOL(sym, sec) \
+ static const char __kstrtab_##sym[] \
+- __attribute__((section("__ksymtab_strings"), aligned(1))) \
++ __attribute__((section("__ksymtab_strings" \
++ __EXPORT_SUFFIX(sym)), aligned(1))) \
+ = MODULE_SYMBOL_PREFIX #sym; \
+ static const struct kernel_symbol __ksymtab_##sym \
+ __used \
diff --git a/target/linux/generic/patches-3.3/230-openwrt_lzma_options.patch b/target/linux/generic/patches-3.3/230-openwrt_lzma_options.patch
new file mode 100644
index 000000000..f17f40ac9
--- /dev/null
+++ b/target/linux/generic/patches-3.3/230-openwrt_lzma_options.patch
@@ -0,0 +1,54 @@
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -296,7 +296,7 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^)
+
+ quiet_cmd_lzma = LZMA $@
+ cmd_lzma = (cat $(filter-out FORCE,$^) | \
+- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
++ lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+ (rm -f $@ ; false)
+
+ quiet_cmd_lzo = LZO $@
+--- a/scripts/gen_initramfs_list.sh
++++ b/scripts/gen_initramfs_list.sh
+@@ -226,7 +226,7 @@ cpio_list=
+ output="/dev/stdout"
+ output_file=""
+ is_cpio_compressed=
+-compr="gzip -n -9 -f"
++compr="gzip -n -9 -f -"
+
+ arg="$1"
+ case "$arg" in
+@@ -240,9 +240,9 @@ case "$arg" in
+ output_file="$1"
+ cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
+ output=${cpio_list}
+- echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f"
+- echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
+- echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
++ echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f -"
++ echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f -"
++ echo "$output_file" | grep -q "\.lzma$" && compr="lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so"
+ echo "$output_file" | grep -q "\.xz$" && \
+ compr="xz --check=crc32 --lzma2=dict=1MiB"
+ echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
+@@ -303,7 +303,7 @@ if [ ! -z ${output_file} ]; then
+ if [ "${is_cpio_compressed}" = "compressed" ]; then
+ cat ${cpio_tfile} > ${output_file}
+ else
+- (cat ${cpio_tfile} | ${compr} - > ${output_file}) \
++ (cat ${cpio_tfile} | ${compr} > ${output_file}) \
+ || (rm -f ${output_file} ; false)
+ fi
+ [ -z ${cpio_file} ] && rm ${cpio_tfile}
+--- a/lib/decompress.c
++++ b/lib/decompress.c
+@@ -40,6 +40,7 @@ static const struct compress_format {
+ { {037, 0236}, "gzip", gunzip },
+ { {0x42, 0x5a}, "bzip2", bunzip2 },
+ { {0x5d, 0x00}, "lzma", unlzma },
++ { {0x6d, 0x00}, "lzma-openwrt", unlzma },
+ { {0xfd, 0x37}, "xz", unxz },
+ { {0x89, 0x4c}, "lzo", unlzo },
+ { {0, 0}, NULL, NULL }
diff --git a/target/linux/generic/patches-3.3/250-netfilter_depends.patch b/target/linux/generic/patches-3.3/250-netfilter_depends.patch
new file mode 100644
index 000000000..cbe389231
--- /dev/null
+++ b/target/linux/generic/patches-3.3/250-netfilter_depends.patch
@@ -0,0 +1,18 @@
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -181,7 +181,6 @@ config NF_CONNTRACK_FTP
+
+ config NF_CONNTRACK_H323
+ tristate "H.323 protocol support"
+- depends on (IPV6 || IPV6=n)
+ depends on NETFILTER_ADVANCED
+ help
+ H.323 is a VoIP signalling protocol from ITU-T. As one of the most
+@@ -627,7 +626,6 @@ config NETFILTER_XT_TARGET_SECMARK
+
+ config NETFILTER_XT_TARGET_TCPMSS
+ tristate '"TCPMSS" target support'
+- depends on (IPV6 || IPV6=n)
+ default m if NETFILTER_ADVANCED=n
+ ---help---
+ This option adds a `TCPMSS' target, which allows you to alter the
diff --git a/target/linux/generic/patches-3.3/251-sound_kconfig.patch b/target/linux/generic/patches-3.3/251-sound_kconfig.patch
new file mode 100644
index 000000000..f374009a6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/251-sound_kconfig.patch
@@ -0,0 +1,11 @@
+--- a/sound/core/Kconfig
++++ b/sound/core/Kconfig
+@@ -7,7 +7,7 @@ config SND_PCM
+ select SND_TIMER
+
+ config SND_HWDEP
+- tristate
++ tristate "Sound hardware support"
+
+ config SND_RAWMIDI
+ tristate
diff --git a/target/linux/generic/patches-3.3/252-mv_cesa_depends.patch b/target/linux/generic/patches-3.3/252-mv_cesa_depends.patch
new file mode 100644
index 000000000..53701458a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/252-mv_cesa_depends.patch
@@ -0,0 +1,10 @@
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -172,6 +172,7 @@ config CRYPTO_DEV_MV_CESA
+ depends on PLAT_ORION
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
++ select CRYPTO_HASH2
+ select CRYPTO_BLKCIPHER2
+ select CRYPTO_HASH
+ help
diff --git a/target/linux/generic/patches-3.3/253-ssb_b43_default_on.patch b/target/linux/generic/patches-3.3/253-ssb_b43_default_on.patch
new file mode 100644
index 000000000..29d2a41a3
--- /dev/null
+++ b/target/linux/generic/patches-3.3/253-ssb_b43_default_on.patch
@@ -0,0 +1,29 @@
+--- a/drivers/ssb/Kconfig
++++ b/drivers/ssb/Kconfig
+@@ -29,6 +29,7 @@ config SSB_SPROM
+ config SSB_BLOCKIO
+ bool
+ depends on SSB
++ default y
+
+ config SSB_PCIHOST_POSSIBLE
+ bool
+@@ -49,7 +50,7 @@ config SSB_PCIHOST
+ config SSB_B43_PCI_BRIDGE
+ bool
+ depends on SSB_PCIHOST
+- default n
++ default y
+
+ config SSB_PCMCIAHOST_POSSIBLE
+ bool
+--- a/drivers/bcma/Kconfig
++++ b/drivers/bcma/Kconfig
+@@ -17,6 +17,7 @@ config BCMA
+ config BCMA_BLOCKIO
+ bool
+ depends on BCMA
++ default y
+
+ config BCMA_HOST_PCI_POSSIBLE
+ bool
diff --git a/target/linux/generic/patches-3.3/254-textsearch_kconfig_hacks.patch b/target/linux/generic/patches-3.3/254-textsearch_kconfig_hacks.patch
new file mode 100644
index 000000000..1659fda93
--- /dev/null
+++ b/target/linux/generic/patches-3.3/254-textsearch_kconfig_hacks.patch
@@ -0,0 +1,23 @@
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -207,16 +207,16 @@ config BCH_CONST_T
+ # Textsearch support is select'ed if needed
+ #
+ config TEXTSEARCH
+- boolean
++ boolean "Textsearch support"
+
+ config TEXTSEARCH_KMP
+- tristate
++ tristate "Textsearch KMP"
+
+ config TEXTSEARCH_BM
+- tristate
++ tristate "Textsearch BM"
+
+ config TEXTSEARCH_FSM
+- tristate
++ tristate "Textsearch FSM"
+
+ config BTREE
+ boolean
diff --git a/target/linux/generic/patches-3.3/255-lib80211_kconfig_hacks.patch b/target/linux/generic/patches-3.3/255-lib80211_kconfig_hacks.patch
new file mode 100644
index 000000000..8dde331a9
--- /dev/null
+++ b/target/linux/generic/patches-3.3/255-lib80211_kconfig_hacks.patch
@@ -0,0 +1,19 @@
+--- a/net/wireless/Kconfig
++++ b/net/wireless/Kconfig
+@@ -143,13 +143,13 @@ config LIB80211
+ you want this built into your kernel.
+
+ config LIB80211_CRYPT_WEP
+- tristate
++ tristate "LIB80211_CRYPT_WEP"
+
+ config LIB80211_CRYPT_CCMP
+- tristate
++ tristate "LIB80211_CRYPT_CCMP"
+
+ config LIB80211_CRYPT_TKIP
+- tristate
++ tristate "LIB80211_CRYPT_TKIP"
+
+ config LIB80211_DEBUG
+ bool "lib80211 debugging messages"
diff --git a/target/linux/generic/patches-3.3/256-crypto_add_kconfig_prompts.patch b/target/linux/generic/patches-3.3/256-crypto_add_kconfig_prompts.patch
new file mode 100644
index 000000000..8462c711f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/256-crypto_add_kconfig_prompts.patch
@@ -0,0 +1,47 @@
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -31,7 +31,7 @@ config CRYPTO_FIPS
+ this is.
+
+ config CRYPTO_ALGAPI
+- tristate
++ tristate "ALGAPI"
+ select CRYPTO_ALGAPI2
+ help
+ This option provides the API for cryptographic algorithms.
+@@ -40,7 +40,7 @@ config CRYPTO_ALGAPI2
+ tristate
+
+ config CRYPTO_AEAD
+- tristate
++ tristate "AEAD"
+ select CRYPTO_AEAD2
+ select CRYPTO_ALGAPI
+
+@@ -49,7 +49,7 @@ config CRYPTO_AEAD2
+ select CRYPTO_ALGAPI2
+
+ config CRYPTO_BLKCIPHER
+- tristate
++ tristate "BLKCIPHER"
+ select CRYPTO_BLKCIPHER2
+ select CRYPTO_ALGAPI
+
+@@ -60,7 +60,7 @@ config CRYPTO_BLKCIPHER2
+ select CRYPTO_WORKQUEUE
+
+ config CRYPTO_HASH
+- tristate
++ tristate "HASH"
+ select CRYPTO_HASH2
+ select CRYPTO_ALGAPI
+
+@@ -69,7 +69,7 @@ config CRYPTO_HASH2
+ select CRYPTO_ALGAPI2
+
+ config CRYPTO_RNG
+- tristate
++ tristate "RNG"
+ select CRYPTO_RNG2
+ select CRYPTO_ALGAPI
+
diff --git a/target/linux/generic/patches-3.3/257-wireless_ext_kconfig_hack.patch b/target/linux/generic/patches-3.3/257-wireless_ext_kconfig_hack.patch
new file mode 100644
index 000000000..daac5898a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/257-wireless_ext_kconfig_hack.patch
@@ -0,0 +1,22 @@
+--- a/net/wireless/Kconfig
++++ b/net/wireless/Kconfig
+@@ -1,5 +1,5 @@
+ config WIRELESS_EXT
+- bool
++ bool "Wireless extensions"
+
+ config WEXT_CORE
+ def_bool y
+@@ -11,10 +11,10 @@ config WEXT_PROC
+ depends on WEXT_CORE
+
+ config WEXT_SPY
+- bool
++ bool "WEXT_SPY"
+
+ config WEXT_PRIV
+- bool
++ bool "WEXT_PRIV"
+
+ config CFG80211
+ tristate "cfg80211 - wireless configuration API"
diff --git a/target/linux/generic/patches-3.3/258-netfilter_netlink_kconfig_hack.patch b/target/linux/generic/patches-3.3/258-netfilter_netlink_kconfig_hack.patch
new file mode 100644
index 000000000..9d827c253
--- /dev/null
+++ b/target/linux/generic/patches-3.3/258-netfilter_netlink_kconfig_hack.patch
@@ -0,0 +1,11 @@
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -2,7 +2,7 @@ menu "Core Netfilter Configuration"
+ depends on NET && INET && NETFILTER
+
+ config NETFILTER_NETLINK
+- tristate
++ tristate "Netfilter NFNETLINK interface"
+
+ config NETFILTER_NETLINK_ACCT
+ tristate "Netfilter NFACCT over NFNETLINK interface"
diff --git a/target/linux/generic/patches-3.3/300-mips_expose_boot_raw.patch b/target/linux/generic/patches-3.3/300-mips_expose_boot_raw.patch
new file mode 100644
index 000000000..b114ef404
--- /dev/null
+++ b/target/linux/generic/patches-3.3/300-mips_expose_boot_raw.patch
@@ -0,0 +1,39 @@
+From: Mark Miller <mark@mirell.org>
+
+This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
+certain Broadcom chipsets running CFE in order to load the kernel.
+
+Signed-off-by: Mark Miller <mark@mirell.org>
+Acked-by: Rob Landley <rob@landley.net>
+---
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -877,9 +877,6 @@ config ARC
+ config ARCH_MAY_HAVE_PC_FDC
+ bool
+
+-config BOOT_RAW
+- bool
+-
+ config CEVT_BCM1480
+ bool
+
+@@ -2330,6 +2327,18 @@ config USE_OF
+ help
+ Include support for flattened device tree machine descriptions.
+
++config BOOT_RAW
++ bool "Enable the kernel to be executed from the load address"
++ default n
++ help
++ Allow the kernel to be executed from the load address for
++ bootloaders which cannot read the ELF format. This places
++ a jump to start_kernel at the load address.
++
++ If unsure, say N.
++
++
++
+ endmenu
+
+ config LOCKDEP_SUPPORT
diff --git a/target/linux/generic/patches-3.3/301-mips_image_cmdline_hack.patch b/target/linux/generic/patches-3.3/301-mips_image_cmdline_hack.patch
new file mode 100644
index 000000000..9193c68f0
--- /dev/null
+++ b/target/linux/generic/patches-3.3/301-mips_image_cmdline_hack.patch
@@ -0,0 +1,28 @@
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -976,6 +976,10 @@ config SYNC_R4K
+ config MIPS_MACHINE
+ def_bool n
+
++config IMAGE_CMDLINE_HACK
++ bool "OpenWrt specific image command line hack"
++ default n
++
+ config NO_IOPORT
+ def_bool n
+
+--- a/arch/mips/kernel/head.S
++++ b/arch/mips/kernel/head.S
+@@ -141,6 +141,12 @@ FEXPORT(__kernel_entry)
+ j kernel_entry
+ #endif
+
++#ifdef CONFIG_IMAGE_CMDLINE_HACK
++ .ascii "CMDLINE:"
++EXPORT(__image_cmdline)
++ .fill 0x400
++#endif /* CONFIG_IMAGE_CMDLINE_HACK */
++
+ __REF
+
+ NESTED(kernel_entry, 16, sp) # kernel entry point
diff --git a/target/linux/generic/patches-3.3/302-mips_use_generic_thread_info_allocator.patch b/target/linux/generic/patches-3.3/302-mips_use_generic_thread_info_allocator.patch
new file mode 100644
index 000000000..1bcc74b41
--- /dev/null
+++ b/target/linux/generic/patches-3.3/302-mips_use_generic_thread_info_allocator.patch
@@ -0,0 +1,18 @@
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -85,6 +85,7 @@ register struct thread_info *__current_t
+
+ #define STACK_WARN (THREAD_SIZE / 8)
+
++#if 0
+ #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+
+ #ifdef CONFIG_DEBUG_STACK_USAGE
+@@ -96,6 +97,7 @@ register struct thread_info *__current_t
+ #endif
+
+ #define free_thread_info(info) kfree(info)
++#endif
+
+ #endif /* !__ASSEMBLY__ */
+
diff --git a/target/linux/generic/patches-3.3/303-mips_fix_kexec.patch b/target/linux/generic/patches-3.3/303-mips_fix_kexec.patch
new file mode 100644
index 000000000..e6928dde9
--- /dev/null
+++ b/target/linux/generic/patches-3.3/303-mips_fix_kexec.patch
@@ -0,0 +1,11 @@
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -52,7 +52,7 @@ machine_kexec(struct kimage *image)
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+- kexec_start_address = image->start;
++ kexec_start_address = (unsigned long) phys_to_virt(image->start);
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+
diff --git a/target/linux/generic/patches-3.3/304-mips_disable_fpu.patch b/target/linux/generic/patches-3.3/304-mips_disable_fpu.patch
new file mode 100644
index 000000000..e747feac6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/304-mips_disable_fpu.patch
@@ -0,0 +1,160 @@
+MIPS: allow disabling the kernel FPU emulator
+
+This patch allows turning off the in-kernel Algorithmics
+FPU emulator support, which allows one to save a couple of
+precious blocks on an embedded system.
+
+Signed-off-by: Florian Fainelli <florian@openwrt.org>
+--
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -961,6 +961,17 @@ config I8259
+ config MIPS_BONITO64
+ bool
+
++config MIPS_FPU_EMU
++ bool "Enable FPU emulation"
++ default y
++ help
++ This option allows building a kernel with or without the Algorithmics
++ FPU emulator enabled. Turning off this option results in a kernel which
++ does not catch floating operations exceptions. Make sure that your toolchain
++ is configured to enable software floating point emulation in that case.
++
++ If unsure say Y here.
++
+ config MIPS_MSC
+ bool
+
+--- a/arch/mips/math-emu/Makefile
++++ b/arch/mips/math-emu/Makefile
+@@ -2,11 +2,13 @@
+ # Makefile for the Linux/MIPS kernel FPU emulation.
+ #
+
+-obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
++obj-y := kernel_linkage.o dsemul.o cp1emu.o
++
++obj-$(CONFIG_MIPS_FPU_EMU) += ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
+ ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \
+ dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \
+ dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \
+ sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
+ sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
+- dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
++ dp_sqrt.o sp_sqrt.o
+
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -58,7 +58,11 @@
+ #define __mips 4
+
+ /* Function which emulates a floating point instruction. */
++#ifdef CONFIG_DEBUG_FS
++DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
++#endif
+
++#ifdef CONFIG_MIPS_FPU_EMU
+ static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
+ mips_instruction);
+
+@@ -69,10 +73,6 @@ static int fpux_emu(struct pt_regs *,
+
+ /* Further private data for which no space exists in mips_fpu_struct */
+
+-#ifdef CONFIG_DEBUG_FS
+-DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
+-#endif
+-
+ /* Control registers */
+
+ #define FPCREG_RID 0 /* $0 = revision id */
+@@ -1360,7 +1360,6 @@ int fpu_emulator_cop1Handler(struct pt_r
+
+ return sig;
+ }
+-
+ #ifdef CONFIG_DEBUG_FS
+
+ static int fpuemu_stat_get(void *data, u64 *val)
+@@ -1409,4 +1408,11 @@ static int __init debugfs_fpuemu(void)
+ return 0;
+ }
+ __initcall(debugfs_fpuemu);
+-#endif
++#endif /* CONFIG_DEBUGFS */
++#else
++int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
++ int has_fpu)
++{
++ return 0;
++}
++#endif /* CONFIG_MIPS_FPU_EMU */
+--- a/arch/mips/math-emu/dsemul.c
++++ b/arch/mips/math-emu/dsemul.c
+@@ -109,6 +109,7 @@ int mips_dsemul(struct pt_regs *regs, mi
+ return SIGILL; /* force out of emulation loop */
+ }
+
++#ifdef CONFIG_MIPS_FPU_EMU
+ int do_dsemulret(struct pt_regs *xcp)
+ {
+ struct emuframe __user *fr;
+@@ -165,3 +166,9 @@ int do_dsemulret(struct pt_regs *xcp)
+
+ return 1;
+ }
++#else
++int do_dsemulret(struct pt_regs *xcp)
++{
++ return 0;
++}
++#endif /* CONFIG_MIPS_FPU_EMU */
+--- a/arch/mips/math-emu/kernel_linkage.c
++++ b/arch/mips/math-emu/kernel_linkage.c
+@@ -29,6 +29,7 @@
+
+ #define SIGNALLING_NAN 0x7ff800007ff80000LL
+
++#ifdef CONFIG_MIPS_FPU_EMU
+ void fpu_emulator_init_fpu(void)
+ {
+ static int first = 1;
+@@ -112,4 +113,36 @@ int fpu_emulator_restore_context32(struc
+
+ return err;
+ }
+-#endif
++#endif /* CONFIG_64BIT */
++#else
++
++void fpu_emulator_init_fpu(void)
++{
++ printk(KERN_INFO "FPU emulator disabled, make sure your toolchain"
++ "was compiled with software floating point support (soft-float)\n");
++ return;
++}
++
++int fpu_emulator_save_context(struct sigcontext __user *sc)
++{
++ return 0;
++}
++
++int fpu_emulator_restore_context(struct sigcontext __user *sc)
++{
++ return 0;
++}
++
++int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
++{
++ return 0;
++}
++
++int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
++{
++ return 0;
++}
++
++#ifdef CONFIG_64BIT
++#endif /* CONFIG_64BIT */
++#endif /* CONFIG_MIPS_FPU_EMU */
diff --git a/target/linux/generic/patches-3.3/305-mips_module_reloc.patch b/target/linux/generic/patches-3.3/305-mips_module_reloc.patch
new file mode 100644
index 000000000..b4b142cb7
--- /dev/null
+++ b/target/linux/generic/patches-3.3/305-mips_module_reloc.patch
@@ -0,0 +1,371 @@
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -90,8 +90,8 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
+ cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
+ cflags-y += -msoft-float
+ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+-KBUILD_AFLAGS_MODULE += -mlong-calls
+-KBUILD_CFLAGS_MODULE += -mlong-calls
++KBUILD_AFLAGS_MODULE += -mno-long-calls
++KBUILD_CFLAGS_MODULE += -mno-long-calls
+
+ cflags-y += -ffreestanding
+
+--- a/arch/mips/include/asm/module.h
++++ b/arch/mips/include/asm/module.h
+@@ -9,6 +9,11 @@ struct mod_arch_specific {
+ struct list_head dbe_list;
+ const struct exception_table_entry *dbe_start;
+ const struct exception_table_entry *dbe_end;
++
++ void *phys_plt_tbl;
++ void *virt_plt_tbl;
++ unsigned int phys_plt_offset;
++ unsigned int virt_plt_offset;
+ };
+
+ typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
+--- a/arch/mips/kernel/module.c
++++ b/arch/mips/kernel/module.c
+@@ -44,14 +44,219 @@ static struct mips_hi16 *mips_hi16_list;
+ static LIST_HEAD(dbe_list);
+ static DEFINE_SPINLOCK(dbe_lock);
+
+-#ifdef MODULE_START
++/*
++ * Get the potential max trampolines size required of the init and
++ * non-init sections. Only used if we cannot find enough contiguous
++ * physically mapped memory to put the module into.
++ */
++static unsigned int
++get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
++ const char *secstrings, unsigned int symindex, bool is_init)
++{
++ unsigned long ret = 0;
++ unsigned int i, j;
++ Elf_Sym *syms;
++
++ /* Everything marked ALLOC (this includes the exported symbols) */
++ for (i = 1; i < hdr->e_shnum; ++i) {
++ unsigned int info = sechdrs[i].sh_info;
++
++ if (sechdrs[i].sh_type != SHT_REL
++ && sechdrs[i].sh_type != SHT_RELA)
++ continue;
++
++ /* Not a valid relocation section? */
++ if (info >= hdr->e_shnum)
++ continue;
++
++ /* Don't bother with non-allocated sections */
++ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
++ continue;
++
++ /* If it's called *.init*, and we're not init, we're
++ not interested */
++ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
++ != is_init)
++ continue;
++
++ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
++ if (sechdrs[i].sh_type == SHT_REL) {
++ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rel[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ } else {
++ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rela[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ }
++ }
++
++ return ret;
++}
++
++#ifndef MODULE_START
++static void *alloc_phys(unsigned long size)
++{
++ unsigned order;
++ struct page *page;
++ struct page *p;
++
++ size = PAGE_ALIGN(size);
++ order = get_order(size);
++
++ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
++ __GFP_THISNODE, order);
++ if (!page)
++ return NULL;
++
++ split_page(page, order);
++
++ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
++ __free_page(p);
++
++ return page_address(page);
++}
++#endif
++
++static void free_phys(void *ptr, unsigned long size)
++{
++ struct page *page;
++ struct page *end;
++
++ page = virt_to_page(ptr);
++ end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
++
++ for (; page < end; ++page)
++ __free_page(page);
++}
++
++
+ void *module_alloc(unsigned long size)
+ {
++#ifdef MODULE_START
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, -1,
+ __builtin_return_address(0));
++#else
++ void *ptr;
++
++ if (size == 0)
++ return NULL;
++
++ ptr = alloc_phys(size);
++
++ /* If we failed to allocate physically contiguous memory,
++ * fall back to regular vmalloc. The module loader code will
++ * create jump tables to handle long jumps */
++ if (!ptr)
++ return vmalloc(size);
++
++ return ptr;
++#endif
+ }
++
++static inline bool is_phys_addr(void *ptr)
++{
++#ifdef CONFIG_64BIT
++ return (KSEGX((unsigned long)ptr) == CKSEG0);
++#else
++ return (KSEGX(ptr) == KSEG0);
+ #endif
++}
++
++/* Free memory returned from module_alloc */
++void module_free(struct module *mod, void *module_region)
++{
++ if (is_phys_addr(module_region)) {
++ if (mod->module_init == module_region)
++ free_phys(module_region, mod->init_size);
++ else if (mod->module_core == module_region)
++ free_phys(module_region, mod->core_size);
++ else
++ BUG();
++ } else {
++ vfree(module_region);
++ }
++}
++
++static void *__module_alloc(int size, bool phys)
++{
++ void *ptr;
++
++ if (phys)
++ ptr = kmalloc(size, GFP_KERNEL);
++ else
++ ptr = vmalloc(size);
++ return ptr;
++}
++
++static void __module_free(void *ptr)
++{
++ if (is_phys_addr(ptr))
++ kfree(ptr);
++ else
++ vfree(ptr);
++}
++
++int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
++ char *secstrings, struct module *mod)
++{
++ unsigned int symindex = 0;
++ unsigned int core_size, init_size;
++ int i;
++
++ for (i = 1; i < hdr->e_shnum; i++)
++ if (sechdrs[i].sh_type == SHT_SYMTAB)
++ symindex = i;
++
++ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
++ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
++
++ mod->arch.phys_plt_offset = 0;
++ mod->arch.virt_plt_offset = 0;
++ mod->arch.phys_plt_tbl = NULL;
++ mod->arch.virt_plt_tbl = NULL;
++
++ if ((core_size + init_size) == 0)
++ return 0;
++
++ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
++ if (!mod->arch.phys_plt_tbl)
++ return -ENOMEM;
++
++ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
++ if (!mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
+
+ static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
+ {
+@@ -72,28 +277,36 @@ static int apply_r_mips_32_rela(struct m
+ return 0;
+ }
+
+-static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
++static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
++ void *start, Elf_Addr v)
+ {
+- if (v % 4) {
+- pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
+- me->name);
+- return -ENOEXEC;
+- }
++ unsigned *tramp = start + *plt_offset;
++ *plt_offset += 4 * sizeof(int);
+
+- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+- printk(KERN_ERR
+- "module %s: relocation overflow\n",
+- me->name);
+- return -ENOEXEC;
+- }
++ /* adjust carry for addiu */
++ if (v & 0x00008000)
++ v += 0x10000;
+
+- *location = (*location & ~0x03ffffff) |
+- ((*location + (v >> 2)) & 0x03ffffff);
++ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
++ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
++ tramp[2] = 0x03200008; /* jr t9 */
++ tramp[3] = 0x00000000; /* nop */
+
+- return 0;
++ return (Elf_Addr) tramp;
+ }
+
+-static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
++static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
++{
++ if (is_phys_addr(location))
++ return add_plt_entry_to(&me->arch.phys_plt_offset,
++ me->arch.phys_plt_tbl, v);
++ else
++ return add_plt_entry_to(&me->arch.virt_plt_offset,
++ me->arch.virt_plt_tbl, v);
++
++}
++
++static int set_r_mips_26(struct module *me, u32 *location, u32 ofs, Elf_Addr v)
+ {
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
+@@ -102,17 +315,31 @@ static int apply_r_mips_26_rela(struct m
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+- printk(KERN_ERR
++ v = add_plt_entry(me, location, v + (ofs << 2));
++ if (!v) {
++ printk(KERN_ERR
+ "module %s: relocation overflow\n",
+ me->name);
+- return -ENOEXEC;
++ return -ENOEXEC;
++ }
++ ofs = 0;
+ }
+
+- *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
++ *location = (*location & ~0x03ffffff) | ((ofs + (v >> 2)) & 0x03ffffff);
+
+ return 0;
+ }
+
++static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
++{
++ return set_r_mips_26(me, location, *location & 0x03ffffff, v);
++}
++
++static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
++{
++ return set_r_mips_26(me, location, 0, v);
++}
++
+ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
+ {
+ struct mips_hi16 *n;
+@@ -380,11 +607,32 @@ int module_finalize(const Elf_Ehdr *hdr,
+ list_add(&me->arch.dbe_list, &dbe_list);
+ spin_unlock_irq(&dbe_lock);
+ }
++
++ /* Get rid of the fixup trampoline if we're running the module
++ * from physically mapped address space */
++ if (me->arch.phys_plt_offset == 0) {
++ __module_free(me->arch.phys_plt_tbl);
++ me->arch.phys_plt_tbl = NULL;
++ }
++ if (me->arch.virt_plt_offset == 0) {
++ __module_free(me->arch.virt_plt_tbl);
++ me->arch.virt_plt_tbl = NULL;
++ }
++
+ return 0;
+ }
+
+ void module_arch_cleanup(struct module *mod)
+ {
++ if (mod->arch.phys_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ }
++ if (mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.virt_plt_tbl);
++ mod->arch.virt_plt_tbl = NULL;
++ }
++
+ spin_lock_irq(&dbe_lock);
+ list_del(&mod->arch.dbe_list);
+ spin_unlock_irq(&dbe_lock);
diff --git a/target/linux/generic/patches-3.3/306-mips_mem_functions_performance.patch b/target/linux/generic/patches-3.3/306-mips_mem_functions_performance.patch
new file mode 100644
index 000000000..e4324717b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/306-mips_mem_functions_performance.patch
@@ -0,0 +1,83 @@
+--- a/arch/mips/include/asm/string.h
++++ b/arch/mips/include/asm/string.h
+@@ -133,11 +133,44 @@ strncmp(__const__ char *__cs, __const__
+
+ #define __HAVE_ARCH_MEMSET
+ extern void *memset(void *__s, int __c, size_t __count);
++#define memset(__s, __c, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memset((__s), (__c), __len); \
++ else \
++ __ret = __builtin_memset((__s), (__c), __len); \
++ __ret; \
++})
+
+ #define __HAVE_ARCH_MEMCPY
+ extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
++#define memcpy(dst, src, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memcpy((dst), (src), __len); \
++ else \
++ __ret = __builtin_memcpy((dst), (src), __len); \
++ __ret; \
++})
+
+ #define __HAVE_ARCH_MEMMOVE
+ extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
++#define memmove(dst, src, len) \
++({ \
++ size_t __len = (len); \
++ void *__ret; \
++ if (__builtin_constant_p(len) && __len >= 64) \
++ __ret = memmove((dst), (src), __len); \
++ else \
++ __ret = __builtin_memmove((dst), (src), __len); \
++ __ret; \
++})
++
++#define __HAVE_ARCH_MEMCMP
++#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
+
+ #endif /* _ASM_STRING_H */
+--- a/arch/mips/lib/Makefile
++++ b/arch/mips/lib/Makefile
+@@ -3,7 +3,7 @@
+ #
+
+ lib-y += csum_partial.o delay.o memcpy.o memcpy-inatomic.o memset.o \
+- strlen_user.o strncpy_user.o strnlen_user.o uncached.o
++ strlen_user.o strncpy_user.o strnlen_user.o uncached.o memcmp.o
+
+ obj-y += iomap.o
+ obj-$(CONFIG_PCI) += iomap-pci.o
+--- /dev/null
++++ b/arch/mips/lib/memcmp.c
+@@ -0,0 +1,22 @@
++/*
++ * copied from linux/lib/string.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++#include <linux/module.h>
++#include <linux/string.h>
++
++#undef memcmp
++int memcmp(const void *cs, const void *ct, size_t count)
++{
++ const unsigned char *su1, *su2;
++ int res = 0;
++
++ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
++ if ((res = *su1 - *su2) != 0)
++ break;
++ return res;
++}
++EXPORT_SYMBOL(memcmp);
++
diff --git a/target/linux/generic/patches-3.3/307-mips_oprofile_fix.patch b/target/linux/generic/patches-3.3/307-mips_oprofile_fix.patch
new file mode 100644
index 000000000..f83c96bb8
--- /dev/null
+++ b/target/linux/generic/patches-3.3/307-mips_oprofile_fix.patch
@@ -0,0 +1,35 @@
+--- a/arch/mips/oprofile/op_model_mipsxx.c
++++ b/arch/mips/oprofile/op_model_mipsxx.c
+@@ -298,6 +298,11 @@ static void reset_counters(void *arg)
+ }
+ }
+
++static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
++{
++ return mipsxx_perfcount_handler();
++}
++
+ static int __init mipsxx_init(void)
+ {
+ int counters;
+@@ -374,6 +379,10 @@ static int __init mipsxx_init(void)
+ save_perf_irq = perf_irq;
+ perf_irq = mipsxx_perfcount_handler;
+
++ if (cp0_perfcount_irq >= 0)
++ return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
++ IRQF_SHARED, "Perfcounter", save_perf_irq);
++
+ return 0;
+ }
+
+@@ -381,6 +390,9 @@ static void mipsxx_exit(void)
+ {
+ int counters = op_model_mipsxx_ops.num_counters;
+
++ if (cp0_perfcount_irq >= 0)
++ free_irq(cp0_perfcount_irq, save_perf_irq);
++
+ counters = counters_per_cpu_to_total(counters);
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
diff --git a/target/linux/generic/patches-3.3/308-mips-show-correct-cpu-name-for-24KEc.patch b/target/linux/generic/patches-3.3/308-mips-show-correct-cpu-name-for-24KEc.patch
new file mode 100644
index 000000000..ac6304a2b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/308-mips-show-correct-cpu-name-for-24KEc.patch
@@ -0,0 +1,17 @@
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -816,10 +816,13 @@ static inline void cpu_probe_mips(struct
+ __cpu_name[cpu] = "MIPS 20Kc";
+ break;
+ case PRID_IMP_24K:
+- case PRID_IMP_24KE:
+ c->cputype = CPU_24K;
+ __cpu_name[cpu] = "MIPS 24Kc";
+ break;
++ case PRID_IMP_24KE:
++ c->cputype = CPU_24K;
++ __cpu_name[cpu] = "MIPS 24KEc";
++ break;
+ case PRID_IMP_25KF:
+ c->cputype = CPU_25KF;
+ __cpu_name[cpu] = "MIPS 25Kc";
diff --git a/target/linux/generic/patches-3.3/309-mips_fuse_workaround.patch b/target/linux/generic/patches-3.3/309-mips_fuse_workaround.patch
new file mode 100644
index 000000000..78ab64f06
--- /dev/null
+++ b/target/linux/generic/patches-3.3/309-mips_fuse_workaround.patch
@@ -0,0 +1,32 @@
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -39,6 +39,7 @@ void (*__flush_kernel_vmap_range)(unsign
+ void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
+
+ EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
++EXPORT_SYMBOL(__flush_cache_all);
+
+ /* MIPS specific cache operations */
+ void (*flush_cache_sigtramp)(unsigned long addr);
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -19,6 +19,9 @@
+ #include <linux/pipe_fs_i.h>
+ #include <linux/swap.h>
+ #include <linux/splice.h>
++#ifdef CONFIG_MIPS
++#include <asm/cacheflush.h>
++#endif
+
+ MODULE_ALIAS_MISCDEV(FUSE_MINOR);
+ MODULE_ALIAS("devname:fuse");
+@@ -655,6 +658,9 @@ static int fuse_copy_fill(struct fuse_co
+ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
+ {
+ unsigned ncpy = min(*size, cs->len);
++#ifdef CONFIG_MIPS
++ __flush_cache_all();
++#endif
+ if (val) {
+ if (cs->write)
+ memcpy(cs->buf, *val, ncpy);
diff --git a/target/linux/generic/patches-3.3/310-arm_module_unresolved_weak_sym.patch b/target/linux/generic/patches-3.3/310-arm_module_unresolved_weak_sym.patch
new file mode 100644
index 000000000..d1eba5503
--- /dev/null
+++ b/target/linux/generic/patches-3.3/310-arm_module_unresolved_weak_sym.patch
@@ -0,0 +1,13 @@
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -81,6 +81,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
+ return -ENOEXEC;
+ }
+
++ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
++ ELF_ST_BIND(sym->st_info) == STB_WEAK)
++ continue;
++
+ loc = dstsec->sh_addr + rel->r_offset;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
diff --git a/target/linux/generic/patches-3.3/320-ppc4xx_optimization.patch b/target/linux/generic/patches-3.3/320-ppc4xx_optimization.patch
new file mode 100644
index 000000000..3f67f1b6c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/320-ppc4xx_optimization.patch
@@ -0,0 +1,31 @@
+Upstream doesn't optimize the kernel and bootwrappers for ppc44x because
+they still want to support gcc 3.3 -- well, we don't.
+
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -130,7 +130,8 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
+ KBUILD_CFLAGS += -mno-sched-epilog
+ endif
+
+-cpu-as-$(CONFIG_4xx) += -Wa,-m405
++cpu-as-$(CONFIG_40x) += -Wa,-m405
++cpu-as-$(CONFIG_44x) += -Wa,-m440
+ cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
+ cpu-as-$(CONFIG_E500) += -Wa,-me500
+ cpu-as-$(CONFIG_E200) += -Wa,-me200
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -38,10 +38,10 @@ BOOTCFLAGS += -I$(obj) -I$(srctree)/$(ob
+ DTC_FLAGS ?= -p 1024
+
+ $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
+-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
++$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
+ $(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
+-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
+-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
++$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
++$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
+ $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
+ $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
+ $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
diff --git a/target/linux/generic/patches-3.3/321-powerpc_crtsavres_prereq.patch b/target/linux/generic/patches-3.3/321-powerpc_crtsavres_prereq.patch
new file mode 100644
index 000000000..941aa909d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/321-powerpc_crtsavres_prereq.patch
@@ -0,0 +1,10 @@
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -94,7 +94,6 @@ else
+ endif
+ endif
+
+-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+
+ ifeq ($(CONFIG_TUNE_CELL),y)
+ KBUILD_CFLAGS += $(call cc-option,-mtune=cell)
diff --git a/target/linux/generic/patches-3.3/322-ppc4xx-crypto-compile-fix.patch b/target/linux/generic/patches-3.3/322-ppc4xx-crypto-compile-fix.patch
new file mode 100644
index 000000000..226e41154
--- /dev/null
+++ b/target/linux/generic/patches-3.3/322-ppc4xx-crypto-compile-fix.patch
@@ -0,0 +1,10 @@
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -19,6 +19,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/spinlock_types.h>
+ #include <linux/random.h>
diff --git a/target/linux/generic/patches-3.3/330-mips-add-crash-and-kdump-support.patch b/target/linux/generic/patches-3.3/330-mips-add-crash-and-kdump-support.patch
new file mode 100644
index 000000000..7a79c4144
--- /dev/null
+++ b/target/linux/generic/patches-3.3/330-mips-add-crash-and-kdump-support.patch
@@ -0,0 +1,616 @@
+From eee16330c9de9adf7880cce9f1d32e13f89706bb Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Tue, 11 Jan 2011 13:16:47 +0000
+Subject: MIPS: Add crash and kdump support
+
+From: http://patchwork.linux-mips.org/patch/1025/
+
+Hello folks,
+
+Please find here MIPS crash and kdump patches.
+This is patch set of 3 patches:
+1. generic MIPS changes (kernel);
+2. MIPS Cavium Octeon board kexec/kdump code (kernel);
+3. Kexec user space MIPS changes.
+
+Patches were tested on the latest linux-mips@ git kernel and the latest
+kexec-tools git on Cavium Octeon 50xx board.
+
+I also made the same code working on RMI XLR/XLS boards for both
+mips32 and mips64 kernels.
+
+Best regards,
+Maxim Uvarov.
+
+------
+[ Zhangjin: Several trivial building failure has been fixed.
+
+Note: the 2nd patch can not be cleanly applied, but may be a good
+reference for the other board development:
+
+ + MIPS Cavium Octeon board kexec,kdump support
+ http://patchwork.linux-mips.org/patch/1026/
+
+And the 3rd patch has already been merged into the mainline kexec-tools:
+
+ + some kexec MIPS improvements
+ http://patchwork.linux-mips.org/patch/1027/
+
+kexec-tools is available here:
+
+ + http://horms.net/projects/kexec/
+ git://git.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
+]
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+(limited to 'arch/mips/kernel')
+
+--- a/arch/mips/kernel/Makefile
++++ b/arch/mips/kernel/Makefile
+@@ -97,7 +97,8 @@ obj-$(CONFIG_I8253) += i8253.o
+
+ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
+
+-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
++obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
++obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+ obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
+ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
+--- /dev/null
++++ b/arch/mips/kernel/crash.c
+@@ -0,0 +1,75 @@
++#include <linux/kernel.h>
++#include <linux/smp.h>
++#include <linux/reboot.h>
++#include <linux/kexec.h>
++#include <linux/bootmem.h>
++#include <linux/crash_dump.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/irq.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++
++#ifdef CONFIG_CRASH_DUMP
++unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
++#endif
++
++/* This keeps a track of which one is crashing cpu. */
++int crashing_cpu = -1;
++static cpumask_t cpus_in_crash = CPU_MASK_NONE;
++
++#ifdef CONFIG_SMP
++void crash_shutdown_secondary(void *ignore)
++{
++ struct pt_regs *regs;
++ int cpu = smp_processor_id();
++
++ regs = task_pt_regs(current);
++
++ if (!cpu_online(cpu))
++ return;
++
++ local_irq_disable();
++ if (!cpu_isset(cpu, cpus_in_crash))
++ crash_save_cpu(regs, cpu);
++ cpu_set(cpu, cpus_in_crash);
++
++ while (!atomic_read(&kexec_ready_to_reboot))
++ cpu_relax();
++ relocated_kexec_smp_wait(NULL);
++ /* NOTREACHED */
++}
++
++static void crash_kexec_prepare_cpus(void)
++{
++ unsigned int msecs;
++
++ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
++
++ dump_send_ipi(crash_shutdown_secondary);
++ smp_wmb();
++
++ /*
++ * The crash CPU sends an IPI and wait for other CPUs to
++ * respond. Delay of at least 10 seconds.
++ */
++ printk(KERN_EMERG "Sending IPI to other cpus...\n");
++ msecs = 10000;
++ while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
++ cpu_relax();
++ mdelay(1);
++ }
++}
++
++#else
++static void crash_kexec_prepare_cpus(void) {}
++#endif
++
++void default_machine_crash_shutdown(struct pt_regs *regs)
++{
++ local_irq_disable();
++ crashing_cpu = smp_processor_id();
++ crash_save_cpu(regs, crashing_cpu);
++ crash_kexec_prepare_cpus();
++ cpu_set(crashing_cpu, cpus_in_crash);
++}
+--- /dev/null
++++ b/arch/mips/kernel/crash_dump.c
+@@ -0,0 +1,86 @@
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/crash_dump.h>
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_PROC_VMCORE
++static int __init parse_elfcorehdr(char *p)
++{
++ if (p)
++ elfcorehdr_addr = memparse(p, &p);
++ return 1;
++}
++__setup("elfcorehdr=", parse_elfcorehdr);
++#endif
++
++static int __init parse_savemaxmem(char *p)
++{
++ if (p)
++ saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
++
++ return 1;
++}
++__setup("savemaxmem=", parse_savemaxmem);
++
++
++static void *kdump_buf_page;
++
++/**
++ * copy_oldmem_page - copy one page from "oldmem"
++ * @pfn: page frame number to be copied
++ * @buf: target memory address for the copy; this can be in kernel address
++ * space or user address space (see @userbuf)
++ * @csize: number of bytes to copy
++ * @offset: offset in bytes into the page (based on pfn) to begin the copy
++ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
++ * otherwise @buf is in kernel address space, use memcpy().
++ *
++ * Copy a page from "oldmem". For this page, there is no pte mapped
++ * in the current kernel.
++ *
++ * Calling copy_to_user() in atomic context is not desirable. Hence first
++ * copying the data to a pre-allocated kernel page and then copying to user
++ * space in non-atomic context.
++ */
++ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
++ size_t csize, unsigned long offset, int userbuf)
++{
++ void *vaddr;
++
++ if (!csize)
++ return 0;
++
++ vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
++
++ if (!userbuf) {
++ memcpy(buf, (vaddr + offset), csize);
++ kunmap_atomic(vaddr, KM_PTE0);
++ } else {
++ if (!kdump_buf_page) {
++ printk(KERN_WARNING "Kdump: Kdump buffer page not"
++ " allocated\n");
++ return -EFAULT;
++ }
++ copy_page(kdump_buf_page, vaddr);
++ kunmap_atomic(vaddr, KM_PTE0);
++ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
++ return -EFAULT;
++ }
++
++ return csize;
++}
++
++static int __init kdump_buf_page_init(void)
++{
++ int ret = 0;
++
++ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ if (!kdump_buf_page) {
++ printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
++ " page\n");
++ ret = -ENOMEM;
++ }
++
++ return ret;
++}
++arch_initcall(kdump_buf_page_init);
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -19,9 +19,19 @@ extern const size_t relocate_new_kernel_
+ extern unsigned long kexec_start_address;
+ extern unsigned long kexec_indirection_page;
+
++int (*_machine_kexec_prepare)(struct kimage *) = NULL;
++void (*_machine_kexec_shutdown)(void) = NULL;
++void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
++#ifdef CONFIG_SMP
++void (*relocated_kexec_smp_wait) (void *);
++atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
++#endif
++
+ int
+ machine_kexec_prepare(struct kimage *kimage)
+ {
++ if (_machine_kexec_prepare)
++ return _machine_kexec_prepare(kimage);
+ return 0;
+ }
+
+@@ -33,11 +43,17 @@ machine_kexec_cleanup(struct kimage *kim
+ void
+ machine_shutdown(void)
+ {
++ if (_machine_kexec_shutdown)
++ _machine_kexec_shutdown();
+ }
+
+ void
+ machine_crash_shutdown(struct pt_regs *regs)
+ {
++ if (_machine_crash_shutdown)
++ _machine_crash_shutdown(regs);
++ else
++ default_machine_crash_shutdown(regs);
+ }
+
+ typedef void (*noretfun_t)(void) __attribute__((noreturn));
+@@ -52,7 +68,9 @@ machine_kexec(struct kimage *image)
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+- kexec_start_address = (unsigned long) phys_to_virt(image->start);
++ kexec_start_address =
++ (unsigned long) phys_to_virt(image->start);
++
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+
+@@ -63,7 +81,7 @@ machine_kexec(struct kimage *image)
+ * The generic kexec code builds a page list with physical
+ * addresses. they are directly accessible through KSEG0 (or
+ * CKSEG0 or XPHYS if on 64bit system), hence the
+- * pys_to_virt() call.
++ * phys_to_virt() call.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+@@ -81,5 +99,13 @@ machine_kexec(struct kimage *image)
+ printk("Will call new kernel at %08lx\n", image->start);
+ printk("Bye ...\n");
+ __flush_cache_all();
++#ifdef CONFIG_SMP
++ /* All secondary cpus now may jump to kexec_wait cycle */
++ relocated_kexec_smp_wait = reboot_code_buffer +
++ (void *)(kexec_smp_wait - relocate_new_kernel);
++ smp_wmb();
++ atomic_set(&kexec_ready_to_reboot, 1);
++#endif
+ ((noretfun_t) reboot_code_buffer)();
+ }
++
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -15,6 +15,11 @@
+ #include <asm/addrspace.h>
+
+ LEAF(relocate_new_kernel)
++ PTR_L a0, arg0
++ PTR_L a1, arg1
++ PTR_L a2, arg2
++ PTR_L a3, arg3
++
+ PTR_L s0, kexec_indirection_page
+ PTR_L s1, kexec_start_address
+
+@@ -26,7 +31,6 @@ process_entry:
+ and s3, s2, 0x1
+ beq s3, zero, 1f
+ and s4, s2, ~0x1 /* store destination addr in s4 */
+- move a0, s4
+ b process_entry
+
+ 1:
+@@ -60,23 +64,100 @@ copy_word:
+ b process_entry
+
+ done:
++#ifdef CONFIG_SMP
++ /* kexec_flag reset is signal to other CPUs what kernel
++ was moved to it's location. Note - we need relocated address
++ of kexec_flag. */
++
++ bal 1f
++ 1: move t1,ra;
++ PTR_LA t2,1b
++ PTR_LA t0,kexec_flag
++ PTR_SUB t0,t0,t2;
++ PTR_ADD t0,t1,t0;
++ LONG_S zero,(t0)
++#endif
++
++ sync
+ /* jump to kexec_start_address */
+ j s1
+ END(relocate_new_kernel)
+
+-kexec_start_address:
+- EXPORT(kexec_start_address)
++#ifdef CONFIG_SMP
++/*
++ * Other CPUs should wait until code is relocated and
++ * then start at entry (?) point.
++ */
++LEAF(kexec_smp_wait)
++ PTR_L a0, s_arg0
++ PTR_L a1, s_arg1
++ PTR_L a2, s_arg2
++ PTR_L a3, s_arg3
++ PTR_L s1, kexec_start_address
++
++ /* Non-relocated address works for args and kexec_start_address ( old
++ * kernel is not overwritten). But we need relocated address of
++ * kexec_flag.
++ */
++
++ bal 1f
++1: move t1,ra;
++ PTR_LA t2,1b
++ PTR_LA t0,kexec_flag
++ PTR_SUB t0,t0,t2;
++ PTR_ADD t0,t1,t0;
++
++1: LONG_L s0, (t0)
++ bne s0, zero,1b
++
++ sync
++ j s1
++ END(kexec_smp_wait)
++#endif
++
++#ifdef __mips64
++ /* all PTR's must be aligned to 8 byte in 64-bit mode */
++ .align 3
++#endif
++
++/* All parameters to new kernel are passed in registers a0-a3.
++ * kexec_args[0..3] are uses to prepare register values.
++ */
++
++EXPORT(kexec_args)
++arg0: PTR 0x0
++arg1: PTR 0x0
++arg2: PTR 0x0
++arg3: PTR 0x0
++ .size kexec_args,PTRSIZE*4
++
++#ifdef CONFIG_SMP
++/*
++ * Secondary CPUs may have different kernel parameters in
++ * their registers a0-a3. secondary_kexec_args[0..3] are used
++ * to prepare register values.
++ */
++EXPORT(secondary_kexec_args)
++s_arg0: PTR 0x0
++s_arg1: PTR 0x0
++s_arg2: PTR 0x0
++s_arg3: PTR 0x0
++ .size secondary_kexec_args,PTRSIZE*4
++kexec_flag:
++ LONG 0x1
++
++#endif
++
++EXPORT(kexec_start_address)
+ PTR 0x0
+ .size kexec_start_address, PTRSIZE
+
+-kexec_indirection_page:
+- EXPORT(kexec_indirection_page)
++EXPORT(kexec_indirection_page)
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
+
+ relocate_new_kernel_end:
+
+-relocate_new_kernel_size:
+- EXPORT(relocate_new_kernel_size)
++EXPORT(relocate_new_kernel_size)
+ PTR relocate_new_kernel_end - relocate_new_kernel
+ .size relocate_new_kernel_size, PTRSIZE
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -22,6 +22,7 @@
+ #include <linux/console.h>
+ #include <linux/pfn.h>
+ #include <linux/debugfs.h>
++#include <linux/kexec.h>
+
+ #include <asm/addrspace.h>
+ #include <asm/bootinfo.h>
+@@ -523,12 +524,62 @@ static void __init arch_mem_init(char **
+ }
+
+ bootmem_init();
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ reserve_bootmem(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1,
++ BOOTMEM_DEFAULT);
++#endif
+ device_tree_init();
+ sparse_init();
+ plat_swiotlb_setup();
+ paging_init();
+ }
+
++#ifdef CONFIG_KEXEC
++static inline unsigned long long get_total_mem(void)
++{
++ unsigned long long total;
++ total = max_pfn - min_low_pfn;
++ return total << PAGE_SHIFT;
++}
++
++static void __init mips_parse_crashkernel(void)
++{
++ unsigned long long total_mem;
++ unsigned long long crash_size, crash_base;
++ int ret;
++
++ total_mem = get_total_mem();
++ ret = parse_crashkernel(boot_command_line, total_mem,
++ &crash_size, &crash_base);
++ if (ret != 0 || crash_size <= 0)
++ return;
++
++ crashk_res.start = crash_base;
++ crashk_res.end = crash_base + crash_size - 1;
++}
++static void __init request_crashkernel(struct resource *res)
++{
++ int ret;
++
++ ret = request_resource(res, &crashk_res);
++ if (!ret)
++ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
++ "for crashkernel\n",
++ (unsigned long)((crashk_res.end -
++ crashk_res.start + 1) >> 20),
++ (unsigned long)(crashk_res.start >> 20));
++}
++#else
++static void __init mips_parse_crashkernel(void)
++{
++}
++static void __init request_crashkernel(struct resource *res)
++{
++}
++#endif
++
+ static void __init resource_init(void)
+ {
+ int i;
+@@ -544,6 +595,8 @@ static void __init resource_init(void)
+ /*
+ * Request address space for all standard RAM.
+ */
++ mips_parse_crashkernel();
++
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ struct resource *res;
+ unsigned long start, end;
+@@ -580,6 +633,7 @@ static void __init resource_init(void)
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
++ request_crashkernel(res);
+ }
+ }
+
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -433,3 +433,21 @@ void flush_tlb_one(unsigned long vaddr)
+
+ EXPORT_SYMBOL(flush_tlb_page);
+ EXPORT_SYMBOL(flush_tlb_one);
++
++#if defined(CONFIG_KEXEC)
++void (*dump_ipi_function_ptr)(void *) = NULL;
++void dump_send_ipi(void (*dump_ipi_callback)(void *))
++{
++ int i;
++ int cpu = smp_processor_id();
++
++ dump_ipi_function_ptr = dump_ipi_callback;
++ smp_mb();
++ for_each_online_cpu(i)
++ if (i != cpu)
++ core_send_ipi(i, SMP_DUMP);
++
++}
++EXPORT_SYMBOL(dump_send_ipi);
++#endif
++
+--- a/arch/mips/include/asm/kexec.h
++++ b/arch/mips/include/asm/kexec.h
+@@ -9,22 +9,45 @@
+ #ifndef _MIPS_KEXEC
+ # define _MIPS_KEXEC
+
++#include <asm/stacktrace.h>
++
++extern unsigned long long elfcorehdr_addr;
++
+ /* Maximum physical address we can use pages from */
+ #define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
+ /* Maximum address we can reach in physical address mode */
+ #define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
+ /* Maximum address we can use for the control code buffer */
+ #define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
+-
+-#define KEXEC_CONTROL_PAGE_SIZE 4096
++/* Reserve 3*4096 bytes for board-specific info */
++#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
+
+ /* The native architecture */
+ #define KEXEC_ARCH KEXEC_ARCH_MIPS
++#define MAX_NOTE_BYTES 1024
+
+ static inline void crash_setup_regs(struct pt_regs *newregs,
+- struct pt_regs *oldregs)
++ struct pt_regs *oldregs)
+ {
+- /* Dummy implementation for now */
++ if (oldregs)
++ memcpy(newregs, oldregs, sizeof(*newregs));
++ else
++ prepare_frametrace(newregs);
+ }
+
++#ifdef CONFIG_KEXEC
++struct kimage;
++extern unsigned long kexec_args[4];
++extern int (*_machine_kexec_prepare)(struct kimage *);
++extern void (*_machine_kexec_shutdown)(void);
++extern void (*_machine_crash_shutdown)(struct pt_regs *regs);
++extern void default_machine_crash_shutdown(struct pt_regs *regs);
++#ifdef CONFIG_SMP
++extern const unsigned char kexec_smp_wait[];
++extern unsigned long secondary_kexec_args[4];
++extern void (*relocated_kexec_smp_wait) (void *);
++extern atomic_t kexec_ready_to_reboot;
++#endif
++#endif
++
+ #endif /* !_MIPS_KEXEC */
+--- a/arch/mips/include/asm/smp.h
++++ b/arch/mips/include/asm/smp.h
+@@ -40,6 +40,8 @@ extern int __cpu_logical_map[NR_CPUS];
+ #define SMP_CALL_FUNCTION 0x2
+ /* Octeon - Tell another core to flush its icache */
+ #define SMP_ICACHE_FLUSH 0x4
++/* Used by kexec crashdump to save all cpu's state */
++#define SMP_DUMP 0x8
+
+ extern volatile cpumask_t cpu_callin_map;
+
+@@ -91,4 +93,9 @@ static inline void arch_send_call_functi
+ mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
+ }
+
++extern void core_send_ipi(int cpu, unsigned int action);
++#if defined(CONFIG_KEXEC)
++extern void (*dump_ipi_function_ptr)(void *);
++void dump_send_ipi(void (*dump_ipi_callback)(void *));
++#endif
+ #endif /* __ASM_SMP_H */
diff --git a/target/linux/generic/patches-3.3/331-mips-kexec-enhanche-the-support.patch b/target/linux/generic/patches-3.3/331-mips-kexec-enhanche-the-support.patch
new file mode 100644
index 000000000..5ffc2e29b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/331-mips-kexec-enhanche-the-support.patch
@@ -0,0 +1,159 @@
+From 03cd81fbca6b91317ec1a7b3b3c09fb8d08f83a6 Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Tue, 11 Jan 2011 18:42:08 +0000
+Subject: MIPS: Kexec: Enhance the support
+
+Changes:
+ o Print more information in machine_kexec() for debugging
+ E.g. with this information, the kexec_start_address has been found
+ it was wrong with 64bit kernel / o32 kexec-tools. Which must be
+ fixed later.
+ o Link relocate_kernel.S to a section for future extension
+ This allows more functions can be added for the kexec relocation
+ part even written in C. to add code into that section, you just need
+ to mark your function or data with __kexec or
+ __attribute__((__section__(".__kexec.relocate")))
+
+TODO:
+
+1. Make 64bit kernel / o32|n32|64 kexec-tools works
+
+Fix the user-space kexec-tools, seems the tool only work for 32bit
+machine. So, we need to add 64bit support for it. The address of the
+entry point(kexec_start_address) is wrong and make the "kexec -e" fail.
+the real entry point must be read from the new kernel image by the
+user-space kexec-tools, otherwise, it will not work. The above 64bit
+support tested is 64bit kernel with o32 user-space kexec-tools. The root
+cause may be the different definition of virt_to_phys() and
+phys_to_virt() in the kexec-tools and kernel space for 64bit system /
+o32 kernel.
+
+Ref: http://www.linux-mips.org/archives/linux-mips/2009-08/msg00149.html
+
+2. Pass the arguments from kexec-tools to the new kernel image
+
+Please refer to: "MIPS: Loongson: Kexec: Pass parameters to new kernel"
+
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+--- a/arch/mips/include/asm/kexec.h
++++ b/arch/mips/include/asm/kexec.h
+@@ -36,6 +36,16 @@ static inline void crash_setup_regs(stru
+ }
+
+ #ifdef CONFIG_KEXEC
++
++#define __kexec __attribute__((__section__(".__kexec.relocate")))
++
++/* The linker tells us where the relocate_new_kernel part is. */
++extern const unsigned char __start___kexec_relocate;
++extern const unsigned char __end___kexec_relocate;
++
++extern unsigned long kexec_start_address;
++extern unsigned long kexec_indirection_page;
++
+ struct kimage;
+ extern unsigned long kexec_args[4];
+ extern int (*_machine_kexec_prepare)(struct kimage *);
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -13,12 +13,6 @@
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
+
+-extern const unsigned char relocate_new_kernel[];
+-extern const size_t relocate_new_kernel_size;
+-
+-extern unsigned long kexec_start_address;
+-extern unsigned long kexec_indirection_page;
+-
+ int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+ void (*_machine_kexec_shutdown)(void) = NULL;
+ void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+@@ -61,21 +55,34 @@ typedef void (*noretfun_t)(void) __attri
+ void
+ machine_kexec(struct kimage *image)
+ {
++ unsigned long kexec_relocate_size;
+ unsigned long reboot_code_buffer;
+ unsigned long entry;
+ unsigned long *ptr;
+
++ kexec_relocate_size = (unsigned long)(&__end___kexec_relocate) -
++ (unsigned long)(&__start___kexec_relocate);
++ pr_info("kexec_relocate_size = %lu\n", kexec_relocate_size);
++
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
++ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
+
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
++ pr_info("kexec_start_address(entry point of new kernel) = %p\n",
++ (void *)kexec_start_address);
+
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
++ pr_info("kexec_indirection_page = %p\n",
++ (void *)kexec_indirection_page);
+
+- memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+- relocate_new_kernel_size);
++ memcpy((void *)reboot_code_buffer, &__start___kexec_relocate,
++ kexec_relocate_size);
++
++ pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
++ &__start___kexec_relocate, (void *)reboot_code_buffer);
+
+ /*
+ * The generic kexec code builds a page list with physical
+@@ -96,8 +103,8 @@ machine_kexec(struct kimage *image)
+ */
+ local_irq_disable();
+
+- printk("Will call new kernel at %08lx\n", image->start);
+- printk("Bye ...\n");
++ pr_info("Will call new kernel at %p\n", (void *)kexec_start_address);
++ pr_info("Bye ...\n");
+ __flush_cache_all();
+ #ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+@@ -108,4 +115,3 @@ machine_kexec(struct kimage *image)
+ #endif
+ ((noretfun_t) reboot_code_buffer)();
+ }
+-
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -14,6 +14,8 @@
+ #include <asm/stackframe.h>
+ #include <asm/addrspace.h>
+
++ .section .kexec.relocate, "ax"
++
+ LEAF(relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+@@ -155,9 +157,3 @@ EXPORT(kexec_start_address)
+ EXPORT(kexec_indirection_page)
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
+-
+-relocate_new_kernel_end:
+-
+-EXPORT(relocate_new_kernel_size)
+- PTR relocate_new_kernel_end - relocate_new_kernel
+- .size relocate_new_kernel_size, PTRSIZE
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -50,6 +50,10 @@ SECTIONS
+ *(.text.*)
+ *(.fixup)
+ *(.gnu.warning)
++ __start___kexec_relocate = .;
++ KEEP(*(.kexec.relocate))
++ KEEP(*(.__kexec.relocate))
++ __end___kexec_relocate = .;
+ } :text = 0
+ _etext = .; /* End of text section */
+
diff --git a/target/linux/generic/patches-3.3/332-mips-kexec-init-the-arguments-for-the-new-kernel-image.patch b/target/linux/generic/patches-3.3/332-mips-kexec-init-the-arguments-for-the-new-kernel-image.patch
new file mode 100644
index 000000000..5507dde64
--- /dev/null
+++ b/target/linux/generic/patches-3.3/332-mips-kexec-init-the-arguments-for-the-new-kernel-image.patch
@@ -0,0 +1,52 @@
+From 49d07a29653b1f2c6ae273b3d8fe93d981f43004 Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Wed, 12 Jan 2011 20:59:32 +0000
+Subject: MIPS: Kexec: Init the arguments for the new kernel image
+
+Whenever the kexec-tools pass the command lines to the new kernel image,
+init the arguments as the ones for the 1st kernel image. This fixed the
+booting failure of Kexec on YeeLoong.
+
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -10,6 +10,7 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+
++#include <asm/bootinfo.h>
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
+
+@@ -21,9 +22,30 @@ void (*relocated_kexec_smp_wait) (void *
+ atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+ #endif
+
++static void machine_kexec_init_args(void)
++{
++ kexec_args[0] = fw_arg0;
++ kexec_args[1] = fw_arg1;
++ kexec_args[2] = fw_arg2;
++ kexec_args[3] = fw_arg3;
++
++ pr_info("kexec_args[0] (argc): %lu\n", kexec_args[0]);
++ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
++ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
++ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
++}
++
+ int
+ machine_kexec_prepare(struct kimage *kimage)
+ {
++ /*
++ * Whenever arguments passed from kexec-tools, Init the arguments as
++ * the original ones to avoid booting failure.
++ *
++ * This can be overrided by _machine_kexec_prepare().
++ */
++ machine_kexec_init_args();
++
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+ return 0;
diff --git a/target/linux/generic/patches-3.3/333-mips-kexec-get-kernel-parameters-from-kexec-tools.patch b/target/linux/generic/patches-3.3/333-mips-kexec-get-kernel-parameters-from-kexec-tools.patch
new file mode 100644
index 000000000..9da936314
--- /dev/null
+++ b/target/linux/generic/patches-3.3/333-mips-kexec-get-kernel-parameters-from-kexec-tools.patch
@@ -0,0 +1,88 @@
+From 240c76841b26f1b09aaced33414ee1d08b6454cf Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Sat, 15 Jan 2011 12:46:03 +0000
+Subject: MIPS: Get kernel parameters from kexec-tools
+
+Before, we simply use the command lines from the original bootloader,
+but it is not convenient. Now, we accept the kernel parameters from the
+--command-line or --append option of the kexec-tools. But If not
+--command-line or --apend option indicated, will fall back to use the
+ones from the original bootloader.
+
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -13,6 +13,7 @@
+ #include <asm/bootinfo.h>
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
++#include <asm/uaccess.h>
+
+ int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+ void (*_machine_kexec_shutdown)(void) = NULL;
+@@ -35,6 +36,56 @@ static void machine_kexec_init_args(void
+ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
+ }
+
++#define ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
++
++int machine_kexec_pass_args(struct kimage *image)
++{
++ int i, argc = 0;
++ char *bootloader = "kexec";
++ int *kexec_argv = (int *)kexec_args[1];
++
++ for (i = 0; i < image->nr_segments; i++) {
++ if (!strncmp(bootloader, (char *)image->segment[i].buf,
++ strlen(bootloader))) {
++ /*
++ * convert command line string to array
++ * of parameters (as bootloader does).
++ */
++ /*
++ * Note: we do treat the 1st string "kexec" as an
++ * argument ;-) so, argc here is 1.
++ */
++ char *str = (char *)image->segment[i].buf;
++ char *ptr = strchr(str, ' ');
++ char *kbuf = (char *)kexec_argv[0];
++ /* Whenever --command-line or --append used, "kexec" is copied */
++ argc = 1;
++ /* Parse the offset */
++ while (ptr && (ARGV_MAX_ARGS > argc)) {
++ *ptr = '\0';
++ if (ptr[1] != ' ' && ptr[1] != '\0') {
++ int offt = (int)(ptr - str + 1);
++ kexec_argv[argc] = (int)kbuf + offt;
++ argc++;
++ }
++ ptr = strchr(ptr + 1, ' ');
++ }
++ if (argc > 1) {
++ /* Copy to kernel space */
++ copy_from_user(kbuf, (char *)image->segment[i].buf, image->segment[i].bufsz);
++ fw_arg0 = kexec_args[0] = argc;
++ }
++ break;
++ }
++ }
++
++ pr_info("argc = %lu\n", kexec_args[0]);
++ for (i = 0; i < kexec_args[0]; i++)
++ pr_info("argv[%d] = %p, %s\n", i, (char *)kexec_argv[i], (char *)kexec_argv[i]);
++
++ return 0;
++}
++
+ int
+ machine_kexec_prepare(struct kimage *kimage)
+ {
+@@ -45,6 +96,7 @@ machine_kexec_prepare(struct kimage *kim
+ * This can be overrided by _machine_kexec_prepare().
+ */
+ machine_kexec_init_args();
++ machine_kexec_pass_args(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
diff --git a/target/linux/generic/patches-3.3/334-mips-fix-compiling-failure-of-relocate_kernel.patch b/target/linux/generic/patches-3.3/334-mips-fix-compiling-failure-of-relocate_kernel.patch
new file mode 100644
index 000000000..46a7395d5
--- /dev/null
+++ b/target/linux/generic/patches-3.3/334-mips-fix-compiling-failure-of-relocate_kernel.patch
@@ -0,0 +1,83 @@
+From 4aded085fa0057a9a1e1dcec631f950307360c1f Mon Sep 17 00:00:00 2001
+From: Wu Zhangjin <wuzhangjin@gmail.com>
+Date: Tue, 11 Jan 2011 13:46:19 +0000
+Subject: MIPS: Fix compiling failure of relocate_kernel.S
+
+The following errors is fixed with the help of <asm/asm_nosec.h>. for
+this file need to put different symbols in the same section, the
+original LEAF, NESTED and EXPORT (without explicit section indication)
+must be used, <asm/asm_nosec.h> does it.
+
+arch/mips/kernel/relocate_kernel.S: Assembler messages:
+arch/mips/kernel/relocate_kernel.S:162: Error: operation combines symbols in different segments
+
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+---
+(limited to 'arch/mips/kernel')
+
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -7,6 +7,7 @@
+ */
+
+ #include <asm/asm.h>
++#include <asm/asm_nosec.h>
+ #include <asm/asmmacro.h>
+ #include <asm/regdef.h>
+ #include <asm/page.h>
+--- /dev/null
++++ b/arch/mips/include/asm/asm_nosec.h
+@@ -0,0 +1,53 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
++ * Copyright (C) 1999 by Silicon Graphics, Inc.
++ * Copyright (C) 2001 MIPS Technologies, Inc.
++ * Copyright (C) 2002 Maciej W. Rozycki
++ * Copyright (C) 2010 Wu Zhangjin <wuzhangjin@gmail.com>
++ *
++ * Derive from <asm/asm.h>
++ *
++ * Override the macros without -ffunction-sections and -fdata-sections support.
++ * If several functions or data must be put in the same section, please include
++ * this header file after the <asm/asm.h> to override the generic definition.
++ */
++
++#ifndef __ASM_ASM_NOSEC_H
++#define __ASM_ASM_NOSEC_H
++
++#undef LEAF
++#undef NESTED
++#undef EXPORT
++
++/*
++ * LEAF - declare leaf routine
++ */
++#define LEAF(symbol) \
++ .globl symbol; \
++ .align 2; \
++ .type symbol, @function; \
++ .ent symbol, 0; \
++symbol: .frame sp, 0, ra
++
++/*
++ * NESTED - declare nested routine entry point
++ */
++#define NESTED(symbol, framesize, rpc) \
++ .globl symbol; \
++ .align 2; \
++ .type symbol, @function; \
++ .ent symbol, 0; \
++symbol: .frame sp, framesize, rpc
++
++/*
++ * EXPORT - export definition of symbol
++ */
++#define EXPORT(symbol) \
++ .globl symbol; \
++symbol:
++
++#endif /* __ASM_ASM_NOSEC_H */
diff --git a/target/linux/generic/patches-3.3/335-mips-kexec-cleanup-kexec-tools-parameter-handling.patch b/target/linux/generic/patches-3.3/335-mips-kexec-cleanup-kexec-tools-parameter-handling.patch
new file mode 100644
index 000000000..f7a8eed00
--- /dev/null
+++ b/target/linux/generic/patches-3.3/335-mips-kexec-cleanup-kexec-tools-parameter-handling.patch
@@ -0,0 +1,186 @@
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -23,67 +23,104 @@ void (*relocated_kexec_smp_wait) (void *
+ atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+ #endif
+
+-static void machine_kexec_init_args(void)
++#define KEXEC_MIPS_ARGV_BUF_SIZE COMMAND_LINE_SIZE
++#define KEXEC_MIPS_ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
++
++char kexec_argv_buf[KEXEC_MIPS_ARGV_BUF_SIZE] __kexec;
++char *kexec_argv[KEXEC_MIPS_ARGV_MAX_ARGS] __kexec;
++
++static void
++machine_kexec_print_args(void)
+ {
+- kexec_args[0] = fw_arg0;
+- kexec_args[1] = fw_arg1;
+- kexec_args[2] = fw_arg2;
+- kexec_args[3] = fw_arg3;
++ int i;
+
+ pr_info("kexec_args[0] (argc): %lu\n", kexec_args[0]);
+ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
+ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
+ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
+-}
+
+-#define ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
++ for (i = 0; i < kexec_args[0]; i++)
++ pr_info("kexec_argv[%d] = %p, %s\n", i,
++ (char *)kexec_argv[i], (char *)kexec_argv[i]);
++}
+
+-int machine_kexec_pass_args(struct kimage *image)
++static void
++machine_kexec_init_argv(struct kimage *image)
+ {
+- int i, argc = 0;
+- char *bootloader = "kexec";
+- int *kexec_argv = (int *)kexec_args[1];
++ void __user *buf = NULL;
++ size_t bufsz;
++ size_t size;
++ int i;
+
++ bufsz = 0;
+ for (i = 0; i < image->nr_segments; i++) {
+- if (!strncmp(bootloader, (char *)image->segment[i].buf,
+- strlen(bootloader))) {
+- /*
+- * convert command line string to array
+- * of parameters (as bootloader does).
+- */
+- /*
+- * Note: we do treat the 1st string "kexec" as an
+- * argument ;-) so, argc here is 1.
+- */
+- char *str = (char *)image->segment[i].buf;
+- char *ptr = strchr(str, ' ');
+- char *kbuf = (char *)kexec_argv[0];
+- /* Whenever --command-line or --append used, "kexec" is copied */
+- argc = 1;
+- /* Parse the offset */
+- while (ptr && (ARGV_MAX_ARGS > argc)) {
+- *ptr = '\0';
+- if (ptr[1] != ' ' && ptr[1] != '\0') {
+- int offt = (int)(ptr - str + 1);
+- kexec_argv[argc] = (int)kbuf + offt;
+- argc++;
+- }
+- ptr = strchr(ptr + 1, ' ');
+- }
+- if (argc > 1) {
+- /* Copy to kernel space */
+- copy_from_user(kbuf, (char *)image->segment[i].buf, image->segment[i].bufsz);
+- fw_arg0 = kexec_args[0] = argc;
+- }
+- break;
++ struct kexec_segment *seg;
++
++ seg = &image->segment[i];
++ if (seg->bufsz < 6)
++ continue;
++
++ if (strncmp((char *) seg->buf, "kexec", 5))
++ continue;
++
++ /* don't copy "kexec" */
++ buf = seg->buf + 5;
++ bufsz = seg->bufsz - 5;
++ break;
++ }
++
++ if (!buf)
++ return;
++
++ size = KEXEC_MIPS_ARGV_BUF_SIZE - 1;
++ size = min(size, bufsz);
++ if (size < bufsz)
++ pr_warn("kexec command line truncated to %zd bytes\n", size);
++
++ /* Copy to kernel space */
++ copy_from_user(kexec_argv_buf, buf, size);
++}
++
++static void
++machine_kexec_parse_argv(struct kimage *image)
++{
++ char *reboot_code_buffer;
++ int reloc_delta;
++ char *ptr;
++ int argc;
++ int i;
++
++ ptr = kexec_argv_buf;
++ argc = 0;
++
++ /*
++ * convert command line string to array of parameters
++ * (as bootloader does).
++ */
++ while (ptr && *ptr && (KEXEC_MIPS_ARGV_MAX_ARGS > argc)) {
++ if (*ptr == ' ') {
++ *ptr++ = '\0';
++ continue;
+ }
++
++ kexec_argv[argc++] = ptr;
++ ptr = strchr(ptr, ' ');
+ }
+
+- pr_info("argc = %lu\n", kexec_args[0]);
+- for (i = 0; i < kexec_args[0]; i++)
+- pr_info("argv[%d] = %p, %s\n", i, (char *)kexec_argv[i], (char *)kexec_argv[i]);
++ if (!argc)
++ return;
+
+- return 0;
++ kexec_args[0] = argc;
++ kexec_args[1] = (unsigned long)kexec_argv;
++ kexec_args[2] = 0;
++ kexec_args[3] = 0;
++
++ reboot_code_buffer = page_address(image->control_code_page);
++ reloc_delta = reboot_code_buffer - (char *) &__start___kexec_relocate;
++
++ kexec_args[1] += reloc_delta;
++ for (i = 0; i < argc; i++)
++ kexec_argv[i] += reloc_delta;
+ }
+
+ int
+@@ -95,8 +132,14 @@ machine_kexec_prepare(struct kimage *kim
+ *
+ * This can be overrided by _machine_kexec_prepare().
+ */
+- machine_kexec_init_args();
+- machine_kexec_pass_args(kimage);
++
++ kexec_args[0] = fw_arg0;
++ kexec_args[1] = fw_arg1;
++ kexec_args[2] = fw_arg2;
++ kexec_args[3] = fw_arg3;
++
++ machine_kexec_init_argv(kimage);
++ machine_kexec_parse_argv(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+@@ -152,11 +195,13 @@ machine_kexec(struct kimage *image)
+ pr_info("kexec_indirection_page = %p\n",
+ (void *)kexec_indirection_page);
+
++ pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
++ &__start___kexec_relocate, (void *)reboot_code_buffer);
++
+ memcpy((void *)reboot_code_buffer, &__start___kexec_relocate,
+ kexec_relocate_size);
+
+- pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
+- &__start___kexec_relocate, (void *)reboot_code_buffer);
++ machine_kexec_print_args();
+
+ /*
+ * The generic kexec code builds a page list with physical
diff --git a/target/linux/generic/patches-3.3/340-module_alloc_size_check.patch b/target/linux/generic/patches-3.3/340-module_alloc_size_check.patch
new file mode 100644
index 000000000..549df5a66
--- /dev/null
+++ b/target/linux/generic/patches-3.3/340-module_alloc_size_check.patch
@@ -0,0 +1,20 @@
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2322,12 +2322,15 @@ static void dynamic_debug_remove(struct
+
+ void * __weak module_alloc(unsigned long size)
+ {
+- return size == 0 ? NULL : vmalloc_exec(size);
++ return vmalloc_exec(size);
+ }
+
+ static void *module_alloc_update_bounds(unsigned long size)
+ {
+- void *ret = module_alloc(size);
++ void *ret = NULL;
++
++ if (size)
++ ret = module_alloc(size);
+
+ if (ret) {
+ mutex_lock(&module_mutex);
diff --git a/target/linux/generic/patches-3.3/400-rootfs_split.patch b/target/linux/generic/patches-3.3/400-rootfs_split.patch
new file mode 100644
index 000000000..349185a30
--- /dev/null
+++ b/target/linux/generic/patches-3.3/400-rootfs_split.patch
@@ -0,0 +1,327 @@
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -23,6 +23,14 @@ config MTD_TESTS
+ WARNING: some of the tests will ERASE entire MTD device which they
+ test. Do not use these tests unless you really know what you do.
+
++config MTD_ROOTFS_ROOT_DEV
++ bool "Automatically set 'rootfs' partition to be root filesystem"
++ default y
++
++config MTD_ROOTFS_SPLIT
++ bool "Automatically split 'rootfs' partition for squashfs"
++ default y
++
+ config MTD_REDBOOT_PARTS
+ tristate "RedBoot partition table parsing"
+ ---help---
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -29,6 +29,8 @@
+ #include <linux/kmod.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/root_dev.h>
++#include <linux/magic.h>
+ #include <linux/err.h>
+
+ #include "mtdcore.h"
+@@ -50,7 +52,7 @@ struct mtd_part {
+ * the pointer to that structure with this macro.
+ */
+ #define PART(x) ((struct mtd_part *)(x))
+-
++#define IS_PART(mtd) (mtd->read == part_read)
+
+ /*
+ * MTD methods which simply translate the effective address and pass through
+@@ -643,6 +645,155 @@ int mtd_del_partition(struct mtd_info *m
+ }
+ EXPORT_SYMBOL_GPL(mtd_del_partition);
+
++#ifdef CONFIG_MTD_ROOTFS_SPLIT
++#define ROOTFS_SPLIT_NAME "rootfs_data"
++#define ROOTFS_REMOVED_NAME "<removed>"
++
++struct squashfs_super_block {
++ __le32 s_magic;
++ __le32 pad0[9];
++ __le64 bytes_used;
++};
++
++
++static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
++{
++ struct squashfs_super_block sb;
++ int len, ret;
++
++ ret = master->read(master, offset, sizeof(sb), &len, (void *) &sb);
++ if (ret || (len != sizeof(sb))) {
++ printk(KERN_ALERT "split_squashfs: error occured while reading "
++ "from \"%s\"\n", master->name);
++ return -EINVAL;
++ }
++
++ if (SQUASHFS_MAGIC != le32_to_cpu(sb.s_magic) ) {
++ printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
++ master->name);
++ *split_offset = 0;
++ return 0;
++ }
++
++ if (le64_to_cpu((sb.bytes_used)) <= 0) {
++ printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
++ master->name);
++ *split_offset = 0;
++ return 0;
++ }
++
++ len = (u32) le64_to_cpu(sb.bytes_used);
++ len += (offset & 0x000fffff);
++ len += (master->erasesize - 1);
++ len &= ~(master->erasesize - 1);
++ len -= (offset & 0x000fffff);
++ *split_offset = offset + len;
++
++ return 0;
++}
++
++static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, const struct mtd_partition *part)
++{
++ struct mtd_partition *dpart;
++ struct mtd_part *slave = NULL;
++ struct mtd_part *spart;
++ int ret, split_offset = 0;
++
++ spart = PART(rpart);
++ ret = split_squashfs(master, spart->offset, &split_offset);
++ if (ret)
++ return ret;
++
++ if (split_offset <= 0)
++ return 0;
++
++ dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
++ if (dpart == NULL) {
++ printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
++ ROOTFS_SPLIT_NAME);
++ return -ENOMEM;
++ }
++
++ memcpy(dpart, part, sizeof(*part));
++ dpart->name = (unsigned char *)&dpart[1];
++ strcpy(dpart->name, ROOTFS_SPLIT_NAME);
++
++ dpart->size = rpart->size - (split_offset - spart->offset);
++ dpart->offset = split_offset;
++
++ if (dpart == NULL)
++ return 1;
++
++ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%llX, len=%llX \n",
++ ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
++
++ slave = allocate_partition(master, dpart, 0, split_offset);
++ if (IS_ERR(slave))
++ return PTR_ERR(slave);
++ mutex_lock(&mtd_partitions_mutex);
++ list_add(&slave->list, &mtd_partitions);
++ mutex_unlock(&mtd_partitions_mutex);
++
++ add_mtd_device(&slave->mtd);
++
++ rpart->split = &slave->mtd;
++
++ return 0;
++}
++
++static int refresh_rootfs_split(struct mtd_info *mtd)
++{
++ struct mtd_partition tpart;
++ struct mtd_part *part;
++ char *name;
++ //int index = 0;
++ int offset, size;
++ int ret;
++
++ part = PART(mtd);
++
++ /* check for the new squashfs offset first */
++ ret = split_squashfs(part->master, part->offset, &offset);
++ if (ret)
++ return ret;
++
++ if ((offset > 0) && !mtd->split) {
++ printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
++ /* if we don't have a rootfs split partition, create a new one */
++ tpart.name = (char *) mtd->name;
++ tpart.size = mtd->size;
++ tpart.offset = part->offset;
++
++ return split_rootfs_data(part->master, &part->mtd, &tpart);
++ } else if ((offset > 0) && mtd->split) {
++ /* update the offsets of the existing partition */
++ size = mtd->size + part->offset - offset;
++
++ part = PART(mtd->split);
++ part->offset = offset;
++ part->mtd.size = size;
++ printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
++ __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
++ (u32) part->offset, (u32) part->mtd.size);
++ name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
++ strcpy(name, ROOTFS_SPLIT_NAME);
++ part->mtd.name = name;
++ } else if ((offset <= 0) && mtd->split) {
++ printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
++
++ /* mark existing partition as removed */
++ part = PART(mtd->split);
++ name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
++ strcpy(name, ROOTFS_REMOVED_NAME);
++ part->mtd.name = name;
++ part->offset = 0;
++ part->mtd.size = 0;
++ }
++
++ return 0;
++}
++#endif /* CONFIG_MTD_ROOTFS_SPLIT */
++
+ /*
+ * This function, given a master MTD object and a partition table, creates
+ * and registers slave MTD objects which are bound to the master according to
+@@ -659,6 +810,9 @@ int add_mtd_partitions(struct mtd_info *
+ struct mtd_part *slave;
+ uint64_t cur_offset = 0;
+ int i;
++#ifdef CONFIG_MTD_ROOTFS_SPLIT
++ int ret;
++#endif
+
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+
+@@ -673,12 +827,53 @@ int add_mtd_partitions(struct mtd_info *
+
+ add_mtd_device(&slave->mtd);
+
++ if (!strcmp(parts[i].name, "rootfs")) {
++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
++ if (ROOT_DEV == 0) {
++ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
++ "set to be root filesystem\n");
++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
++ }
++#endif
++#ifdef CONFIG_MTD_ROOTFS_SPLIT
++ ret = split_rootfs_data(master, &slave->mtd, &parts[i]);
++ /* if (ret == 0)
++ * j++; */
++#endif
++ }
++
+ cur_offset = slave->offset + slave->mtd.size;
+ }
+
+ return 0;
+ }
+
++int mtd_device_refresh(struct mtd_info *mtd)
++{
++ int ret = 0;
++
++ if (IS_PART(mtd)) {
++ struct mtd_part *part;
++ struct mtd_info *master;
++
++ part = PART(mtd);
++ master = part->master;
++ if (master->refresh_device)
++ ret = master->refresh_device(master);
++ }
++
++ if (!ret && mtd->refresh_device)
++ ret = mtd->refresh_device(mtd);
++
++#ifdef CONFIG_MTD_ROOTFS_SPLIT
++ if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
++ refresh_rootfs_split(mtd);
++#endif
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(mtd_device_refresh);
++
+ static DEFINE_SPINLOCK(part_parser_lock);
+ static LIST_HEAD(part_parsers);
+
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -1005,6 +1005,12 @@ static int mtdchar_ioctl(struct file *fi
+ break;
+ }
+
++ case MTDREFRESH:
++ {
++ ret = mtd_device_refresh(mtd);
++ break;
++ }
++
+ default:
+ ret = -ENOTTY;
+ }
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -114,6 +114,7 @@ struct nand_ecclayout {
+
+ struct module; /* only needed for owner field in mtd_info */
+
++struct mtd_info;
+ struct mtd_info {
+ u_char type;
+ uint32_t flags;
+@@ -214,6 +215,9 @@ struct mtd_info {
+ int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*suspend) (struct mtd_info *mtd);
+ void (*resume) (struct mtd_info *mtd);
++ int (*refresh_device)(struct mtd_info *mtd);
++ struct mtd_info *split;
++
+ /*
+ * If the driver is something smart, like UBI, it may need to maintain
+ * its own reference counting. The below functions are only for driver.
+@@ -502,6 +506,7 @@ extern int mtd_device_parse_register(str
+ int defnr_parts);
+ #define mtd_device_register(master, parts, nr_parts) \
+ mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
++extern int mtd_device_refresh(struct mtd_info *master);
+ extern int mtd_device_unregister(struct mtd_info *master);
+ extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
+ extern int __get_mtd_device(struct mtd_info *mtd);
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -36,12 +36,14 @@
+ * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
+ */
+
++struct mtd_partition;
+ struct mtd_partition {
+ char *name; /* identifier string */
+ uint64_t size; /* partition size */
+ uint64_t offset; /* offset within the master MTD space */
+ uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
++ int (*refresh_partition)(struct mtd_info *);
+ };
+
+ #define MTDPART_OFS_RETAIN (-3)
+--- a/include/mtd/mtd-abi.h
++++ b/include/mtd/mtd-abi.h
+@@ -202,6 +202,7 @@ struct otp_info {
+ * without OOB, e.g., NOR flash.
+ */
+ #define MEMWRITE _IOWR('M', 24, struct mtd_write_req)
++#define MTDREFRESH _IO('M', 50)
+
+ /*
+ * Obsolete legacy interface. Keep it in order not to break userspace
diff --git a/target/linux/generic/patches-3.3/401-partial_eraseblock_write.patch b/target/linux/generic/patches-3.3/401-partial_eraseblock_write.patch
new file mode 100644
index 000000000..3b22cfcb2
--- /dev/null
+++ b/target/linux/generic/patches-3.3/401-partial_eraseblock_write.patch
@@ -0,0 +1,145 @@
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -35,6 +35,8 @@
+
+ #include "mtdcore.h"
+
++#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
++
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+ static DEFINE_MUTEX(mtd_partitions_mutex);
+@@ -252,13 +254,60 @@ static int part_erase(struct mtd_info *m
+ return -EROFS;
+ if (instr->addr >= mtd->size)
+ return -EINVAL;
++
++ instr->partial_start = false;
++ if (mtd->flags & MTD_ERASE_PARTIAL) {
++ size_t readlen = 0;
++ u64 mtd_ofs;
++
++ instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
++ if (!instr->erase_buf)
++ return -ENOMEM;
++
++ mtd_ofs = part->offset + instr->addr;
++ instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
++
++ if (instr->erase_buf_ofs > 0) {
++ instr->addr -= instr->erase_buf_ofs;
++ ret = mtd_read(part->master,
++ instr->addr + part->offset,
++ part->master->erasesize,
++ &readlen, instr->erase_buf);
++
++ instr->partial_start = true;
++ } else {
++ mtd_ofs = part->offset + part->mtd.size;
++ instr->erase_buf_ofs = part->master->erasesize -
++ do_div(mtd_ofs, part->master->erasesize);
++
++ if (instr->erase_buf_ofs > 0) {
++ instr->len += instr->erase_buf_ofs;
++ ret = mtd_read(part->master,
++ part->offset + instr->addr +
++ instr->len - part->master->erasesize,
++ part->master->erasesize, &readlen,
++ instr->erase_buf);
++ } else {
++ ret = 0;
++ }
++ }
++ if (ret < 0) {
++ kfree(instr->erase_buf);
++ return ret;
++ }
++
++ }
++
+ instr->addr += part->offset;
+ ret = mtd_erase(part->master, instr);
+ if (ret) {
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
++ if (mtd->flags & MTD_ERASE_PARTIAL)
++ kfree(instr->erase_buf);
+ }
++
+ return ret;
+ }
+
+@@ -266,7 +315,25 @@ void mtd_erase_callback(struct erase_inf
+ {
+ if (instr->mtd->erase == part_erase) {
+ struct mtd_part *part = PART(instr->mtd);
++ size_t wrlen = 0;
+
++ if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
++ if (instr->partial_start) {
++ part->master->write(part->master,
++ instr->addr, instr->erase_buf_ofs,
++ &wrlen, instr->erase_buf);
++ instr->addr += instr->erase_buf_ofs;
++ } else {
++ instr->len -= instr->erase_buf_ofs;
++ part->master->write(part->master,
++ instr->addr + instr->len,
++ instr->erase_buf_ofs, &wrlen,
++ instr->erase_buf +
++ part->master->erasesize -
++ instr->erase_buf_ofs);
++ }
++ kfree(instr->erase_buf);
++ }
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
+@@ -537,18 +604,24 @@ static struct mtd_part *allocate_partiti
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->offset, &slave->mtd)) {
+ /* Doesn't start on a boundary of major erase size */
+- /* FIXME: Let it be writable if it is on a boundary of
+- * _minor_ erase size though */
+- slave->mtd.flags &= ~MTD_WRITEABLE;
+- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+- part->name);
++ slave->mtd.flags |= MTD_ERASE_PARTIAL;
++ if (((u32) slave->mtd.size) > master->erasesize)
++ slave->mtd.flags &= ~MTD_WRITEABLE;
++ else
++ slave->mtd.erasesize = slave->mtd.size;
+ }
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+- mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
+- slave->mtd.flags &= ~MTD_WRITEABLE;
+- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+- part->name);
++ mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) {
++ slave->mtd.flags |= MTD_ERASE_PARTIAL;
++
++ if ((u32) slave->mtd.size > master->erasesize)
++ slave->mtd.flags &= ~MTD_WRITEABLE;
++ else
++ slave->mtd.erasesize = slave->mtd.size;
+ }
++ if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL)
++ printk(KERN_WARNING"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
++ part->name);
+
+ slave->mtd.ecclayout = master->ecclayout;
+ if (master->block_isbad) {
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -58,6 +58,10 @@ struct erase_info {
+ u_long priv;
+ u_char state;
+ struct erase_info *next;
++
++ u8 *erase_buf;
++ u32 erase_buf_ofs;
++ bool partial_start;
+ };
+
+ struct mtd_erase_region_info {
diff --git a/target/linux/generic/patches-3.3/410-mtd_info_move_forward_decl.patch b/target/linux/generic/patches-3.3/410-mtd_info_move_forward_decl.patch
new file mode 100644
index 000000000..251f522e4
--- /dev/null
+++ b/target/linux/generic/patches-3.3/410-mtd_info_move_forward_decl.patch
@@ -0,0 +1,18 @@
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -35,6 +35,7 @@
+ * Note: writeable partitions require their size and offset be
+ * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
+ */
++struct mtd_info;
+
+ struct mtd_partition;
+ struct mtd_partition {
+@@ -52,7 +53,6 @@ struct mtd_partition {
+ #define MTDPART_SIZ_FULL (0)
+
+
+-struct mtd_info;
+ struct device_node;
+
+ /**
diff --git a/target/linux/generic/patches-3.3/420-redboot_space.patch b/target/linux/generic/patches-3.3/420-redboot_space.patch
new file mode 100644
index 000000000..fb6700b07
--- /dev/null
+++ b/target/linux/generic/patches-3.3/420-redboot_space.patch
@@ -0,0 +1,30 @@
+--- a/drivers/mtd/redboot.c
++++ b/drivers/mtd/redboot.c
+@@ -267,14 +267,21 @@ static int parse_redboot_partitions(stru
+ #endif
+ names += strlen(names)+1;
+
+-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+- i++;
+- parts[i].offset = parts[i-1].size + parts[i-1].offset;
+- parts[i].size = fl->next->img->flash_base - parts[i].offset;
+- parts[i].name = nullname;
+- }
++ if (!strcmp(parts[i].name, "rootfs")) {
++ parts[i].size = fl->next->img->flash_base;
++ parts[i].size &= ~(master->erasesize - 1);
++ parts[i].size -= parts[i].offset;
++#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
++ nrparts--;
++ } else {
++ i++;
++ parts[i].offset = parts[i-1].size + parts[i-1].offset;
++ parts[i].size = fl->next->img->flash_base - parts[i].offset;
++ parts[i].name = nullname;
+ #endif
++ }
++ }
+ tmp_fl = fl;
+ fl = fl->next;
+ kfree(tmp_fl);
diff --git a/target/linux/generic/patches-3.3/421-redboot_boardconfig.patch b/target/linux/generic/patches-3.3/421-redboot_boardconfig.patch
new file mode 100644
index 000000000..db8377b30
--- /dev/null
+++ b/target/linux/generic/patches-3.3/421-redboot_boardconfig.patch
@@ -0,0 +1,60 @@
+--- a/drivers/mtd/redboot.c
++++ b/drivers/mtd/redboot.c
+@@ -30,6 +30,8 @@
+ #include <linux/mtd/partitions.h>
+ #include <linux/module.h>
+
++#define BOARD_CONFIG_PART "boardconfig"
++
+ struct fis_image_desc {
+ unsigned char name[16]; // Null terminated name
+ uint32_t flash_base; // Address within FLASH of image
+@@ -60,6 +62,7 @@ static int parse_redboot_partitions(stru
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+ {
++ unsigned long max_offset = 0;
+ int nrparts = 0;
+ struct fis_image_desc *buf;
+ struct mtd_partition *parts;
+@@ -227,14 +230,14 @@ static int parse_redboot_partitions(stru
+ }
+ }
+ #endif
+- parts = kzalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
++ parts = kzalloc(sizeof(*parts) * (nrparts + 1) + nulllen + namelen + sizeof(BOARD_CONFIG_PART), GFP_KERNEL);
+
+ if (!parts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+- nullname = (char *)&parts[nrparts];
++ nullname = (char *)&parts[nrparts + 1];
+ #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (nulllen > 0) {
+ strcpy(nullname, nullstring);
+@@ -253,6 +256,8 @@ static int parse_redboot_partitions(stru
+ }
+ #endif
+ for ( ; i<nrparts; i++) {
++ if(max_offset < buf[i].flash_base + buf[i].size)
++ max_offset = buf[i].flash_base + buf[i].size;
+ parts[i].size = fl->img->size;
+ parts[i].offset = fl->img->flash_base;
+ parts[i].name = names;
+@@ -286,6 +291,14 @@ static int parse_redboot_partitions(stru
+ fl = fl->next;
+ kfree(tmp_fl);
+ }
++ if(master->size - max_offset >= master->erasesize)
++ {
++ parts[nrparts].size = master->size - max_offset;
++ parts[nrparts].offset = max_offset;
++ parts[nrparts].name = names;
++ strcpy(names, BOARD_CONFIG_PART);
++ nrparts++;
++ }
+ ret = nrparts;
+ *pparts = parts;
+ out:
diff --git a/target/linux/generic/patches-3.3/430-mtd_myloader_partition_parser.patch b/target/linux/generic/patches-3.3/430-mtd_myloader_partition_parser.patch
new file mode 100644
index 000000000..d60126dcd
--- /dev/null
+++ b/target/linux/generic/patches-3.3/430-mtd_myloader_partition_parser.patch
@@ -0,0 +1,35 @@
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -156,6 +156,22 @@ config MTD_BCM63XX_PARTS
+ This provides partions parsing for BCM63xx devices with CFE
+ bootloaders.
+
++config MTD_MYLOADER_PARTS
++ tristate "MyLoader partition parsing"
++ depends on ADM5120 || ATHEROS_AR231X || ATHEROS_AR71XX || ATH79
++ ---help---
++ MyLoader is a bootloader which allows the user to define partitions
++ in flash devices, by putting a table in the second erase block
++ on the device, similar to a partition table. This table gives the
++ offsets and lengths of the user defined partitions.
++
++ If you need code which can detect and parse these tables, and
++ register MTD 'partitions' corresponding to each image detected,
++ enable this option.
++
++ You will still need the parsing functions to be called by the driver
++ for your particular device. It won't happen automatically.
++
+ comment "User Modules And Translation Layers"
+
+ config MTD_CHAR
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdli
+ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+ obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+ obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
++obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
+
+ # 'Users' - code which presents functionality to userspace.
+ obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/target/linux/generic/patches-3.3/440-block2mtd_init.patch b/target/linux/generic/patches-3.3/440-block2mtd_init.patch
new file mode 100644
index 000000000..00208c870
--- /dev/null
+++ b/target/linux/generic/patches-3.3/440-block2mtd_init.patch
@@ -0,0 +1,116 @@
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -14,6 +14,7 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
+ #include <linux/mutex.h>
+ #include <linux/mount.h>
+ #include <linux/slab.h>
+@@ -231,11 +232,12 @@ static void block2mtd_free_device(struct
+
+
+ /* FIXME: ensure that mtd->size % erase_size == 0 */
+-static struct block2mtd_dev *add_device(char *devname, int erase_size)
++static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
+ {
+ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
+ struct block_device *bdev;
+ struct block2mtd_dev *dev;
++ struct mtd_partition *part;
+ char *name;
+
+ if (!devname)
+@@ -274,13 +276,16 @@ static struct block2mtd_dev *add_device(
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
++ if (!mtdname)
++ mtdname = devname;
++ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
+ if (!name)
+ goto devinit_err;
+
++ strcpy(name, mtdname);
+ dev->mtd.name = name;
+
+- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
++ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
+ dev->mtd.writebufsize = PAGE_SIZE;
+@@ -294,14 +299,17 @@ static struct block2mtd_dev *add_device(
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
+
+- if (mtd_device_register(&dev->mtd, NULL, 0)) {
++ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
++ part->name = name;
++ part->offset = 0;
++ part->size = dev->mtd.size;
++ if (mtd_device_register(&dev->mtd, part, 1)) {
+ /* Device didn't get added, so free the entry */
+ goto devinit_err;
+ }
+ list_add(&dev->list, &blkmtd_device_list);
+ INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
+- dev->mtd.name + strlen("block2mtd: "),
+- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
++ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ return dev;
+
+ devinit_err:
+@@ -374,9 +382,9 @@ static char block2mtd_paramline[80 + 12]
+
+ static int block2mtd_setup2(const char *val)
+ {
+- char buf[80 + 12]; /* 80 for device, 12 for erase size */
++ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
+ char *str = buf;
+- char *token[2];
++ char *token[3];
+ char *name;
+ size_t erase_size = PAGE_SIZE;
+ int i, ret;
+@@ -387,7 +395,7 @@ static int block2mtd_setup2(const char *
+ strcpy(str, val);
+ kill_final_newline(str);
+
+- for (i = 0; i < 2; i++)
++ for (i = 0; i < 3; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str)
+@@ -406,8 +414,10 @@ static int block2mtd_setup2(const char *
+ parse_err("illegal erase size");
+ }
+ }
++ if (token[2] && (strlen(token[2]) + 1 > 80))
++ parse_err("mtd device name too long");
+
+- add_device(name, erase_size);
++ add_device(name, erase_size, token[2]);
+
+ return 0;
+ }
+@@ -441,7 +451,7 @@ static int block2mtd_setup(const char *v
+
+
+ module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+
+ static int __init block2mtd_init(void)
+ {
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -546,6 +546,7 @@ int invalidate_partitions(struct gendisk
+
+ return 0;
+ }
++EXPORT_SYMBOL(rescan_partitions);
+
+ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+ {
diff --git a/target/linux/generic/patches-3.3/441-block2mtd_refresh.patch b/target/linux/generic/patches-3.3/441-block2mtd_refresh.patch
new file mode 100644
index 000000000..979b43b6e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/441-block2mtd_refresh.patch
@@ -0,0 +1,291 @@
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -29,6 +29,8 @@ struct block2mtd_dev {
+ struct block_device *blkdev;
+ struct mtd_info mtd;
+ struct mutex write_mutex;
++ rwlock_t bdev_mutex;
++ char devname[0];
+ };
+
+
+@@ -81,6 +83,12 @@ static int block2mtd_erase(struct mtd_in
+ size_t len = instr->len;
+ int err;
+
++ read_lock(&dev->bdev_mutex);
++ if (!dev->blkdev) {
++ err = -EINVAL;
++ goto done;
++ }
++
+ instr->state = MTD_ERASING;
+ mutex_lock(&dev->write_mutex);
+ err = _block2mtd_erase(dev, from, len);
+@@ -92,6 +100,10 @@ static int block2mtd_erase(struct mtd_in
+ instr->state = MTD_ERASE_DONE;
+
+ mtd_erase_callback(instr);
++
++done:
++ read_unlock(&dev->bdev_mutex);
++
+ return err;
+ }
+
+@@ -103,10 +115,14 @@ static int block2mtd_read(struct mtd_inf
+ struct page *page;
+ int index = from >> PAGE_SHIFT;
+ int offset = from & (PAGE_SIZE-1);
+- int cpylen;
++ int cpylen, err = 0;
++
++ read_lock(&dev->bdev_mutex);
++ if (!dev->blkdev || (from > mtd->size)) {
++ err = -EINVAL;
++ goto done;
++ }
+
+- if (from > mtd->size)
+- return -EINVAL;
+ if (from + len > mtd->size)
+ len = mtd->size - from;
+
+@@ -121,10 +137,14 @@ static int block2mtd_read(struct mtd_inf
+ len = len - cpylen;
+
+ page = page_read(dev->blkdev->bd_inode->i_mapping, index);
+- if (!page)
+- return -ENOMEM;
+- if (IS_ERR(page))
+- return PTR_ERR(page);
++ if (!page) {
++ err = -ENOMEM;
++ goto done;
++ }
++ if (IS_ERR(page)) {
++ err = PTR_ERR(page);
++ goto done;
++ }
+
+ memcpy(buf, page_address(page) + offset, cpylen);
+ page_cache_release(page);
+@@ -135,7 +155,10 @@ static int block2mtd_read(struct mtd_inf
+ offset = 0;
+ index++;
+ }
+- return 0;
++
++done:
++ read_unlock(&dev->bdev_mutex);
++ return err;
+ }
+
+
+@@ -187,12 +210,22 @@ static int block2mtd_write(struct mtd_in
+ size_t *retlen, const u_char *buf)
+ {
+ struct block2mtd_dev *dev = mtd->priv;
+- int err;
++ int err = 0;
++
++ read_lock(&dev->bdev_mutex);
++ if (!dev->blkdev) {
++ err = -EINVAL;
++ goto done;
++ }
+
+ if (!len)
+- return 0;
+- if (to >= mtd->size)
+- return -ENOSPC;
++ goto done;
++
++ if (to >= mtd->size) {
++ err = -ENOSPC;
++ goto done;
++ }
++
+ if (to + len > mtd->size)
+ len = mtd->size - to;
+
+@@ -201,6 +234,9 @@ static int block2mtd_write(struct mtd_in
+ mutex_unlock(&dev->write_mutex);
+ if (err > 0)
+ err = 0;
++
++done:
++ read_unlock(&dev->bdev_mutex);
+ return err;
+ }
+
+@@ -209,33 +245,110 @@ static int block2mtd_write(struct mtd_in
+ static void block2mtd_sync(struct mtd_info *mtd)
+ {
+ struct block2mtd_dev *dev = mtd->priv;
++ read_lock(&dev->bdev_mutex);
++ if (dev->blkdev)
+ sync_blockdev(dev->blkdev);
++ read_unlock(&dev->bdev_mutex);
++
+ return;
+ }
+
+
++static int _open_bdev(struct block2mtd_dev *dev)
++{
++ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
++ struct block_device *bdev;
++
++ /* Get a handle on the device */
++ bdev = blkdev_get_by_path(dev->devname, mode, dev);
++#ifndef MODULE
++ if (IS_ERR(bdev)) {
++ dev_t devt;
++
++ /* We might not have rootfs mounted at this point. Try
++ to resolve the device name by other means. */
++
++ devt = name_to_dev_t(dev->devname);
++ if (devt)
++ bdev = blkdev_get_by_dev(devt, mode, dev);
++ }
++#endif
++
++ if (IS_ERR(bdev)) {
++ ERROR("error: cannot open device %s", dev->devname);
++ return 1;
++ }
++ dev->blkdev = bdev;
++
++ if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
++ ERROR("attempting to use an MTD device as a block device");
++ return 1;
++ }
++
++ return 0;
++}
++
++static void _close_bdev(struct block2mtd_dev *dev)
++{
++ struct block_device *bdev;
++
++ if (!dev->blkdev)
++ return;
++
++ bdev = dev->blkdev;
++ invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1);
++ blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
++ dev->blkdev = NULL;
++}
++
+ static void block2mtd_free_device(struct block2mtd_dev *dev)
+ {
+ if (!dev)
+ return;
+
+ kfree(dev->mtd.name);
+-
+- if (dev->blkdev) {
+- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
+- 0, -1);
+- blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+- }
+-
++ _close_bdev(dev);
+ kfree(dev);
+ }
+
+
+-/* FIXME: ensure that mtd->size % erase_size == 0 */
+-static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
++static int block2mtd_refresh(struct mtd_info *mtd)
+ {
+- const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
++ struct block2mtd_dev *dev = mtd->priv;
+ struct block_device *bdev;
++ dev_t devt;
++ int err = 0;
++
++ /* no other mtd function can run at this point */
++ write_lock(&dev->bdev_mutex);
++
++ /* get the device number for the whole disk */
++ devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0);
++
++ /* close the old block device */
++ _close_bdev(dev);
++
++ /* open the whole disk, issue a partition rescan, then */
++ bdev = blkdev_get_by_dev(devt, FMODE_WRITE | FMODE_READ, mtd);
++ if (!bdev || !bdev->bd_disk)
++ err = -EINVAL;
++#ifndef CONFIG_MTD_BLOCK2MTD_MODULE
++ else
++ err = rescan_partitions(bdev->bd_disk, bdev);
++#endif
++ if (bdev)
++ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
++
++ /* try to open the partition block device again */
++ _open_bdev(dev);
++ write_unlock(&dev->bdev_mutex);
++
++ return err;
++}
++
++/* FIXME: ensure that mtd->size % erase_size == 0 */
++static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
++{
+ struct block2mtd_dev *dev;
+ struct mtd_partition *part;
+ char *name;
+@@ -243,36 +356,17 @@ static struct block2mtd_dev *add_device(
+ if (!devname)
+ return NULL;
+
+- dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
++ dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+- /* Get a handle on the device */
+- bdev = blkdev_get_by_path(devname, mode, dev);
+-#ifndef MODULE
+- if (IS_ERR(bdev)) {
+-
+- /* We might not have rootfs mounted at this point. Try
+- to resolve the device name by other means. */
++ strcpy(dev->devname, devname);
+
+- dev_t devt = name_to_dev_t(devname);
+- if (devt)
+- bdev = blkdev_get_by_dev(devt, mode, dev);
+- }
+-#endif
+-
+- if (IS_ERR(bdev)) {
+- ERROR("error: cannot open device %s", devname);
++ if (_open_bdev(dev))
+ goto devinit_err;
+- }
+- dev->blkdev = bdev;
+-
+- if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
+- ERROR("attempting to use an MTD device as a block device");
+- goto devinit_err;
+- }
+
+ mutex_init(&dev->write_mutex);
++ rwlock_init(&dev->bdev_mutex);
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+@@ -298,6 +392,7 @@ static struct block2mtd_dev *add_device(
+ dev->mtd.read = block2mtd_read;
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
++ dev->mtd.refresh_device = block2mtd_refresh;
+
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = name;
diff --git a/target/linux/generic/patches-3.3/442-block2mtd_probe.patch b/target/linux/generic/patches-3.3/442-block2mtd_probe.patch
new file mode 100644
index 000000000..c427e9fd0
--- /dev/null
+++ b/target/linux/generic/patches-3.3/442-block2mtd_probe.patch
@@ -0,0 +1,10 @@
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -268,6 +268,7 @@ static int _open_bdev(struct block2mtd_d
+ /* We might not have rootfs mounted at this point. Try
+ to resolve the device name by other means. */
+
++ wait_for_device_probe();
+ devt = name_to_dev_t(dev->devname);
+ if (devt)
+ bdev = blkdev_get_by_dev(devt, mode, dev);
diff --git a/target/linux/generic/patches-3.3/443-block2mtd-avoid-recursive-call-of-mtd_writev.patch b/target/linux/generic/patches-3.3/443-block2mtd-avoid-recursive-call-of-mtd_writev.patch
new file mode 100644
index 000000000..0ce9368f9
--- /dev/null
+++ b/target/linux/generic/patches-3.3/443-block2mtd-avoid-recursive-call-of-mtd_writev.patch
@@ -0,0 +1,10 @@
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -388,7 +388,6 @@ static struct block2mtd_dev *add_device(
+ dev->mtd.flags = MTD_CAP_RAM;
+ dev->mtd.erase = block2mtd_erase;
+ dev->mtd.write = block2mtd_write;
+- dev->mtd.writev = mtd_writev;
+ dev->mtd.sync = block2mtd_sync;
+ dev->mtd.read = block2mtd_read;
+ dev->mtd.priv = dev;
diff --git a/target/linux/generic/patches-3.3/450-mtd_plat_nand_chip_fixup.patch b/target/linux/generic/patches-3.3/450-mtd_plat_nand_chip_fixup.patch
new file mode 100644
index 000000000..fd2b9b57b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/450-mtd_plat_nand_chip_fixup.patch
@@ -0,0 +1,37 @@
+---
+ drivers/mtd/nand/plat_nand.c | 13 ++++++++++++-
+ include/linux/mtd/nand.h | 1 +
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -622,6 +622,7 @@ struct platform_nand_chip {
+ unsigned int options;
+ unsigned int bbt_options;
+ const char **part_probe_types;
++ int (*chip_fixup)(struct mtd_info *mtd);
+ };
+
+ /* Keep gcc happy */
+--- a/drivers/mtd/nand/plat_nand.c
++++ b/drivers/mtd/nand/plat_nand.c
+@@ -93,7 +93,18 @@ static int __devinit plat_nand_probe(str
+ }
+
+ /* Scan to find existence of the device */
+- if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
++ if (nand_scan_ident(&data->mtd, pdata->chip.nr_chips, NULL)) {
++ err = -ENXIO;
++ goto out;
++ }
++
++ if (pdata->chip.chip_fixup) {
++ err = pdata->chip.chip_fixup(&data->mtd);
++ if (err)
++ goto out;
++ }
++
++ if (nand_scan_tail(&data->mtd)) {
+ err = -ENXIO;
+ goto out;
+ }
diff --git a/target/linux/generic/patches-3.3/451-mtd_fix_nand_correct_data_return_code.patch b/target/linux/generic/patches-3.3/451-mtd_fix_nand_correct_data_return_code.patch
new file mode 100644
index 000000000..2f72d8594
--- /dev/null
+++ b/target/linux/generic/patches-3.3/451-mtd_fix_nand_correct_data_return_code.patch
@@ -0,0 +1,12 @@
+--- a/drivers/mtd/nand/nand_ecc.c
++++ b/drivers/mtd/nand/nand_ecc.c
+@@ -507,8 +507,7 @@ int __nand_correct_data(unsigned char *b
+ if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+ return 1; /* error in ECC data; no action needed */
+
+- printk(KERN_ERR "uncorrectable error : ");
+- return -1;
++ return -EBADMSG;
+ }
+ EXPORT_SYMBOL(__nand_correct_data);
+
diff --git a/target/linux/generic/patches-3.3/460-cfi_cmdset_0002_no_erase_suspend.patch b/target/linux/generic/patches-3.3/460-cfi_cmdset_0002_no_erase_suspend.patch
new file mode 100644
index 000000000..0c4b9bea0
--- /dev/null
+++ b/target/linux/generic/patches-3.3/460-cfi_cmdset_0002_no_erase_suspend.patch
@@ -0,0 +1,11 @@
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -682,7 +682,7 @@ static int get_chip(struct map_info *map
+ return 0;
+
+ case FL_ERASING:
+- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
++ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ goto sleep;
diff --git a/target/linux/generic/patches-3.3/470-mtd_m25p80_add_pm25lv_flash_support.patch b/target/linux/generic/patches-3.3/470-mtd_m25p80_add_pm25lv_flash_support.patch
new file mode 100644
index 000000000..ff27d04f8
--- /dev/null
+++ b/target/linux/generic/patches-3.3/470-mtd_m25p80_add_pm25lv_flash_support.patch
@@ -0,0 +1,39 @@
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -45,6 +45,7 @@
+ #define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
+ #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
+ #define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
++#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips*/
+ #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
+ #define OPCODE_RDID 0x9f /* Read JEDEC ID */
+
+@@ -625,6 +626,7 @@ struct flash_info {
+ u16 flags;
+ #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
+ #define M25P_NO_ERASE 0x02 /* No erase command needed */
++#define SECT_4K_PMC 0x04 /* OPCODE_BE_4K_PMC works uniformly */
+ };
+
+ #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+@@ -686,6 +688,10 @@ static const struct spi_device_id m25p_i
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+
++ /* PMC -- pm25x "blocks" are 32K, sectors are 4K */
++ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
++ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
++
+ /* Spansion -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+@@ -921,6 +927,9 @@ static int __devinit m25p_probe(struct s
+ if (info->flags & SECT_4K) {
+ flash->erase_opcode = OPCODE_BE_4K;
+ flash->mtd.erasesize = 4096;
++ } else if (info->flags & SECT_4K_PMC) {
++ flash->erase_opcode = OPCODE_BE_4K_PMC;
++ flash->mtd.erasesize = 4096;
+ } else {
+ flash->erase_opcode = OPCODE_SE;
+ flash->mtd.erasesize = info->sector_size;
diff --git a/target/linux/generic/patches-3.3/473-mtd_m25p80_add_w25q128.patch b/target/linux/generic/patches-3.3/473-mtd_m25p80_add_w25q128.patch
new file mode 100644
index 000000000..b00c9fae3
--- /dev/null
+++ b/target/linux/generic/patches-3.3/473-mtd_m25p80_add_w25q128.patch
@@ -0,0 +1,10 @@
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -765,6 +765,7 @@ static const struct spi_device_id m25p_i
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
++ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
diff --git a/target/linux/generic/patches-3.3/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch b/target/linux/generic/patches-3.3/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
new file mode 100644
index 000000000..3b43535da
--- /dev/null
+++ b/target/linux/generic/patches-3.3/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
@@ -0,0 +1,18 @@
+From: George Kashperko <george@znau.edu.ua>
+
+Issue map read after Write Buffer Load command to ensure chip is ready
+to receive data.
+Signed-off-by: George Kashperko <george@znau.edu.ua>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
+ 1 file changed, 1 insertion(+)
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -1409,6 +1409,7 @@ static int __xipram do_write_buffer(stru
+
+ /* Write Buffer Load */
+ map_write(map, CMD(0x25), cmd_adr);
++ (void) map_read(map, cmd_adr);
+
+ chip->state = FL_WRITING_TO_BUFFER;
+
diff --git a/target/linux/generic/patches-3.3/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch b/target/linux/generic/patches-3.3/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch
new file mode 100644
index 000000000..231d4548c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch
@@ -0,0 +1,41 @@
+--- a/drivers/mtd/devices/Kconfig
++++ b/drivers/mtd/devices/Kconfig
+@@ -102,6 +102,14 @@ config M25PXX_USE_FAST_READ
+ help
+ This option enables FAST_READ access supported by ST M25Pxx.
+
++config M25PXX_PREFER_SMALL_SECTOR_ERASE
++ bool "Prefer small sector erase"
++ depends on MTD_M25P80
++ default y
++ help
++ This option enables use of the small erase sectors if that is
++ supported by the flash chip.
++
+ config MTD_SST25L
+ tristate "Support SST25L (non JEDEC) SPI Flash chips"
+ depends on SPI_MASTER
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -84,6 +84,12 @@
+
+ #define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+
++#ifdef CONFIG_M25PXX_PREFER_SMALL_SECTOR_ERASE
++#define PREFER_SMALL_SECTOR_ERASE 1
++#else
++#define PREFER_SMALL_SECTOR_ERASE 0
++#endif
++
+ /****************************************************************************/
+
+ struct m25p {
+@@ -925,7 +931,7 @@ static int __devinit m25p_probe(struct s
+ flash->mtd.write = m25p80_write;
+
+ /* prefer "small sector" erase if possible */
+- if (info->flags & SECT_4K) {
++ if (PREFER_SMALL_SECTOR_ERASE && (info->flags & SECT_4K)) {
+ flash->erase_opcode = OPCODE_BE_4K;
+ flash->mtd.erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
diff --git a/target/linux/generic/patches-3.3/477-mtd-m25p80-add-support-for-the-EON-EN25Q64-chip.patch b/target/linux/generic/patches-3.3/477-mtd-m25p80-add-support-for-the-EON-EN25Q64-chip.patch
new file mode 100644
index 000000000..439cbde16
--- /dev/null
+++ b/target/linux/generic/patches-3.3/477-mtd-m25p80-add-support-for-the-EON-EN25Q64-chip.patch
@@ -0,0 +1,10 @@
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -677,6 +677,7 @@ static const struct spi_device_id m25p_i
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
++ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
diff --git a/target/linux/generic/patches-3.3/500-yaffs_support.patch b/target/linux/generic/patches-3.3/500-yaffs_support.patch
new file mode 100644
index 000000000..7776dcfb9
--- /dev/null
+++ b/target/linux/generic/patches-3.3/500-yaffs_support.patch
@@ -0,0 +1,18 @@
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -35,6 +35,7 @@ source "fs/gfs2/Kconfig"
+ source "fs/ocfs2/Kconfig"
+ source "fs/btrfs/Kconfig"
+ source "fs/nilfs2/Kconfig"
++source "fs/yaffs2/Kconfig"
+
+ endif # BLOCK
+
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -125,3 +125,5 @@ obj-$(CONFIG_GFS2_FS) += gfs2/
+ obj-y += exofs/ # Multiple modules
+ obj-$(CONFIG_CEPH_FS) += ceph/
+ obj-$(CONFIG_PSTORE) += pstore/
++obj-$(CONFIG_YAFFS_FS) += yaffs2/
++
diff --git a/target/linux/generic/patches-3.3/501-yaffs_cvs_2009_04_24.patch b/target/linux/generic/patches-3.3/501-yaffs_cvs_2009_04_24.patch
new file mode 100644
index 000000000..c334b17d0
--- /dev/null
+++ b/target/linux/generic/patches-3.3/501-yaffs_cvs_2009_04_24.patch
@@ -0,0 +1,12344 @@
+--- a/fs/yaffs2/devextras.h
++++ b/fs/yaffs2/devextras.h
+@@ -14,194 +14,135 @@
+ */
+
+ /*
+- * This file is just holds extra declarations used during development.
+- * Most of these are from kernel includes placed here so we can use them in
+- * applications.
++ * This file is just holds extra declarations of macros that would normally
++ * be providesd in the Linux kernel. These macros have been written from
++ * scratch but are functionally equivalent to the Linux ones.
+ *
+ */
+
+ #ifndef __EXTRAS_H__
+ #define __EXTRAS_H__
+
+-#if defined WIN32
+-#define __inline__ __inline
+-#define new newHack
+-#endif
+-
+-#if !(defined __KERNEL__) || (defined WIN32)
+
+-/* User space defines */
++#if !(defined __KERNEL__)
+
++/* Definition of types */
+ typedef unsigned char __u8;
+ typedef unsigned short __u16;
+ typedef unsigned __u32;
+
++#endif
++
+ /*
+- * Simple doubly linked list implementation.
+- *
+- * Some of the internal functions ("__xxx") are useful when
+- * manipulating whole lists rather than single entries, as
+- * sometimes we already know the next/prev entries and we can
+- * generate better code by using them directly rather than
+- * using the generic single-entry routines.
++ * This is a simple doubly linked list implementation that matches the
++ * way the Linux kernel doubly linked list implementation works.
+ */
+
+-#define prefetch(x) 1
+-
+-struct list_head {
+- struct list_head *next, *prev;
++struct ylist_head {
++ struct ylist_head *next; /* next in chain */
++ struct ylist_head *prev; /* previous in chain */
+ };
+
+-#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+-#define LIST_HEAD(name) \
+- struct list_head name = LIST_HEAD_INIT(name)
++/* Initialise a static list */
++#define YLIST_HEAD(name) \
++struct ylist_head name = { &(name), &(name)}
++
+
+-#define INIT_LIST_HEAD(ptr) do { \
+- (ptr)->next = (ptr); (ptr)->prev = (ptr); \
++
++/* Initialise a list head to an empty list */
++#define YINIT_LIST_HEAD(p) \
++do { \
++ (p)->next = (p);\
++ (p)->prev = (p); \
+ } while (0)
+
+-/*
+- * Insert a new entry between two known consecutive entries.
+- *
+- * This is only for internal list manipulation where we know
+- * the prev/next entries already!
+- */
+-static __inline__ void __list_add(struct list_head *new,
+- struct list_head *prev,
+- struct list_head *next)
+-{
+- next->prev = new;
+- new->next = next;
+- new->prev = prev;
+- prev->next = new;
+-}
+
+-/**
+- * list_add - add a new entry
+- * @new: new entry to be added
+- * @head: list head to add it after
+- *
+- * Insert a new entry after the specified head.
+- * This is good for implementing stacks.
+- */
+-static __inline__ void list_add(struct list_head *new, struct list_head *head)
++/* Add an element to a list */
++static __inline__ void ylist_add(struct ylist_head *newEntry,
++ struct ylist_head *list)
+ {
+- __list_add(new, head, head->next);
+-}
++ struct ylist_head *listNext = list->next;
++
++ list->next = newEntry;
++ newEntry->prev = list;
++ newEntry->next = listNext;
++ listNext->prev = newEntry;
+
+-/**
+- * list_add_tail - add a new entry
+- * @new: new entry to be added
+- * @head: list head to add it before
+- *
+- * Insert a new entry before the specified head.
+- * This is useful for implementing queues.
+- */
+-static __inline__ void list_add_tail(struct list_head *new,
+- struct list_head *head)
+-{
+- __list_add(new, head->prev, head);
+ }
+
+-/*
+- * Delete a list entry by making the prev/next entries
+- * point to each other.
+- *
+- * This is only for internal list manipulation where we know
+- * the prev/next entries already!
+- */
+-static __inline__ void __list_del(struct list_head *prev,
+- struct list_head *next)
++static __inline__ void ylist_add_tail(struct ylist_head *newEntry,
++ struct ylist_head *list)
+ {
+- next->prev = prev;
+- prev->next = next;
++ struct ylist_head *listPrev = list->prev;
++
++ list->prev = newEntry;
++ newEntry->next = list;
++ newEntry->prev = listPrev;
++ listPrev->next = newEntry;
++
+ }
+
+-/**
+- * list_del - deletes entry from list.
+- * @entry: the element to delete from the list.
+- * Note: list_empty on entry does not return true after this, the entry is
+- * in an undefined state.
+- */
+-static __inline__ void list_del(struct list_head *entry)
++
++/* Take an element out of its current list, with or without
++ * reinitialising the links.of the entry*/
++static __inline__ void ylist_del(struct ylist_head *entry)
+ {
+- __list_del(entry->prev, entry->next);
++ struct ylist_head *listNext = entry->next;
++ struct ylist_head *listPrev = entry->prev;
++
++ listNext->prev = listPrev;
++ listPrev->next = listNext;
++
+ }
+
+-/**
+- * list_del_init - deletes entry from list and reinitialize it.
+- * @entry: the element to delete from the list.
+- */
+-static __inline__ void list_del_init(struct list_head *entry)
++static __inline__ void ylist_del_init(struct ylist_head *entry)
+ {
+- __list_del(entry->prev, entry->next);
+- INIT_LIST_HEAD(entry);
++ ylist_del(entry);
++ entry->next = entry->prev = entry;
+ }
+
+-/**
+- * list_empty - tests whether a list is empty
+- * @head: the list to test.
+- */
+-static __inline__ int list_empty(struct list_head *head)
++
++/* Test if the list is empty */
++static __inline__ int ylist_empty(struct ylist_head *entry)
+ {
+- return head->next == head;
++ return (entry->next == entry);
+ }
+
+-/**
+- * list_splice - join two lists
+- * @list: the new list to add.
+- * @head: the place to add it in the first list.
++
++/* ylist_entry takes a pointer to a list entry and offsets it to that
++ * we can find a pointer to the object it is embedded in.
+ */
+-static __inline__ void list_splice(struct list_head *list,
+- struct list_head *head)
+-{
+- struct list_head *first = list->next;
+
+- if (first != list) {
+- struct list_head *last = list->prev;
+- struct list_head *at = head->next;
+-
+- first->prev = head;
+- head->next = first;
+-
+- last->next = at;
+- at->prev = last;
+- }
+-}
+
+-/**
+- * list_entry - get the struct for this entry
+- * @ptr: the &struct list_head pointer.
+- * @type: the type of the struct this is embedded in.
+- * @member: the name of the list_struct within the struct.
+- */
+-#define list_entry(ptr, type, member) \
+- ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+-
+-/**
+- * list_for_each - iterate over a list
+- * @pos: the &struct list_head to use as a loop counter.
+- * @head: the head for your list.
+- */
+-#define list_for_each(pos, head) \
+- for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+- pos = pos->next, prefetch(pos->next))
+-
+-/**
+- * list_for_each_safe - iterate over a list safe against removal
+- * of list entry
+- * @pos: the &struct list_head to use as a loop counter.
+- * @n: another &struct list_head to use as temporary storage
+- * @head: the head for your list.
+- */
+-#define list_for_each_safe(pos, n, head) \
+- for (pos = (head)->next, n = pos->next; pos != (head); \
+- pos = n, n = pos->next)
++#define ylist_entry(entry, type, member) \
++ ((type *)((char *)(entry)-(unsigned long)(&((type *)NULL)->member)))
+
+-/*
+- * File types
++
++/* ylist_for_each and list_for_each_safe iterate over lists.
++ * ylist_for_each_safe uses temporary storage to make the list delete safe
+ */
++
++#define ylist_for_each(itervar, list) \
++ for (itervar = (list)->next; itervar != (list); itervar = itervar->next)
++
++#define ylist_for_each_safe(itervar, saveVar, list) \
++ for (itervar = (list)->next, saveVar = (list)->next->next; \
++ itervar != (list); itervar = saveVar, saveVar = saveVar->next)
++
++
++#if !(defined __KERNEL__)
++
++
++#ifndef WIN32
++#include <sys/stat.h>
++#endif
++
++
++#ifdef CONFIG_YAFFS_PROVIDE_DEFS
++/* File types */
++
++
+ #define DT_UNKNOWN 0
+ #define DT_FIFO 1
+ #define DT_CHR 2
+@@ -212,6 +153,7 @@ static __inline__ void list_splice(struc
+ #define DT_SOCK 12
+ #define DT_WHT 14
+
++
+ #ifndef WIN32
+ #include <sys/stat.h>
+ #endif
+@@ -227,10 +169,6 @@ static __inline__ void list_splice(struc
+ #define ATTR_ATIME 16
+ #define ATTR_MTIME 32
+ #define ATTR_CTIME 64
+-#define ATTR_ATIME_SET 128
+-#define ATTR_MTIME_SET 256
+-#define ATTR_FORCE 512 /* Not a change, but a change it */
+-#define ATTR_ATTR_FLAG 1024
+
+ struct iattr {
+ unsigned int ia_valid;
+@@ -244,21 +182,15 @@ struct iattr {
+ unsigned int ia_attr_flags;
+ };
+
+-#define KERN_DEBUG
++#endif
+
+ #else
+
+-#ifndef WIN32
+ #include <linux/types.h>
+-#include <linux/list.h>
+ #include <linux/fs.h>
+ #include <linux/stat.h>
+-#endif
+
+ #endif
+
+-#if defined WIN32
+-#undef new
+-#endif
+
+ #endif
+--- a/fs/yaffs2/Kconfig
++++ b/fs/yaffs2/Kconfig
+@@ -5,7 +5,7 @@
+ config YAFFS_FS
+ tristate "YAFFS2 file system support"
+ default n
+- depends on MTD
++ depends on MTD_BLOCK
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+@@ -43,7 +43,8 @@ config YAFFS_9BYTE_TAGS
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+- older-style format. See notes on tags formats and MTD versions.
++ older-style format. See notes on tags formats and MTD versions
++ in yaffs_mtdif1.c.
+
+ If unsure, say N.
+
+@@ -109,26 +110,6 @@ config YAFFS_DISABLE_LAZY_LOAD
+
+ If unsure, say N.
+
+-config YAFFS_CHECKPOINT_RESERVED_BLOCKS
+- int "Reserved blocks for checkpointing"
+- depends on YAFFS_YAFFS2
+- default 10
+- help
+- Give the number of Blocks to reserve for checkpointing.
+- Checkpointing saves the state at unmount so that mounting is
+- much faster as a scan of all the flash to regenerate this state
+- is not needed. These Blocks are reserved per partition, so if
+- you have very small partitions the default (10) may be a mess
+- for you. You can set this value to 0, but that does not mean
+- checkpointing is disabled at all. There only won't be any
+- specially reserved blocks for checkpointing, so if there is
+- enough free space on the filesystem, it will be used for
+- checkpointing.
+-
+- If unsure, leave at default (10), but don't wonder if there are
+- always 2MB used on your large page device partition (10 x 2k
+- pagesize). When using small partitions or when being very small
+- on space, you probably want to set this to zero.
+
+ config YAFFS_DISABLE_WIDE_TNODES
+ bool "Turn off wide tnodes"
+--- a/fs/yaffs2/Makefile
++++ b/fs/yaffs2/Makefile
+@@ -5,7 +5,6 @@
+ obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+ yaffs-y := yaffs_ecc.o yaffs_fs.o yaffs_guts.o yaffs_checkptrw.o
+-yaffs-y += yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
+ yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+-yaffs-y += yaffs_mtdif1.o yaffs_packedtags1.o
+-yaffs-y += yaffs_mtdif.o yaffs_mtdif2.o
++yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
+--- a/fs/yaffs2/moduleconfig.h
++++ b/fs/yaffs2/moduleconfig.h
+@@ -27,12 +27,12 @@
+
+ /* Default: Not selected */
+ /* Meaning: Yaffs does its own ECC, rather than using MTD ECC */
+-//#define CONFIG_YAFFS_DOES_ECC
++/* #define CONFIG_YAFFS_DOES_ECC */
+
+ /* Default: Not selected */
+ /* Meaning: ECC byte order is 'wrong'. Only meaningful if */
+ /* CONFIG_YAFFS_DOES_ECC is set */
+-//#define CONFIG_YAFFS_ECC_WRONG_ORDER
++/* #define CONFIG_YAFFS_ECC_WRONG_ORDER */
+
+ /* Default: Selected */
+ /* Meaning: Disables testing whether chunks are erased before writing to them*/
+@@ -54,11 +54,11 @@ that you need to continue to support. N
+ older-style format.
+ Note: Use of this option generally requires that MTD's oob layout be
+ adjusted to use the older-style format. See notes on tags formats and
+-MTD versions.
++MTD versions in yaffs_mtdif1.c.
+ */
+ /* Default: Not selected */
+ /* Meaning: Use older-style on-NAND data format with pageStatus byte */
+-#define CONFIG_YAFFS_9BYTE_TAGS
++/* #define CONFIG_YAFFS_9BYTE_TAGS */
+
+ #endif /* YAFFS_OUT_OF_TREE */
+
+--- a/fs/yaffs2/yaffs_checkptrw.c
++++ b/fs/yaffs2/yaffs_checkptrw.c
+@@ -12,48 +12,43 @@
+ */
+
+ const char *yaffs_checkptrw_c_version =
+- "$Id: yaffs_checkptrw.c,v 1.14 2007-05-15 20:07:40 charles Exp $";
++ "$Id: yaffs_checkptrw.c,v 1.18 2009-03-06 17:20:49 wookey Exp $";
+
+
+ #include "yaffs_checkptrw.h"
+-
++#include "yaffs_getblockinfo.h"
+
+ static int yaffs_CheckpointSpaceOk(yaffs_Device *dev)
+ {
+-
+ int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
+
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("checkpt blocks available = %d" TENDSTR),
+ blocksAvailable));
+
+-
+ return (blocksAvailable <= 0) ? 0 : 1;
+ }
+
+
+ static int yaffs_CheckpointErase(yaffs_Device *dev)
+ {
+-
+ int i;
+
+-
+- if(!dev->eraseBlockInNAND)
++ if (!dev->eraseBlockInNAND)
+ return 0;
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("checking blocks %d to %d"TENDSTR),
+- dev->internalStartBlock,dev->internalEndBlock));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checking blocks %d to %d"TENDSTR),
++ dev->internalStartBlock, dev->internalEndBlock));
+
+- for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+- if(bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("erasing checkpt block %d"TENDSTR),i));
+- if(dev->eraseBlockInNAND(dev,i- dev->blockOffset /* realign */)){
++ for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++ if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("erasing checkpt block %d"TENDSTR), i));
++ if (dev->eraseBlockInNAND(dev, i - dev->blockOffset /* realign */)) {
+ bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+ dev->nErasedBlocks++;
+ dev->nFreeChunks += dev->nChunksPerBlock;
+- }
+- else {
+- dev->markNANDBlockBad(dev,i);
++ } else {
++ dev->markNANDBlockBad(dev, i);
+ bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+@@ -71,23 +66,23 @@ static void yaffs_CheckpointFindNextEras
+ int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("allocating checkpt block: erased %d reserved %d avail %d next %d "TENDSTR),
+- dev->nErasedBlocks,dev->nReservedBlocks,blocksAvailable,dev->checkpointNextBlock));
++ dev->nErasedBlocks, dev->nReservedBlocks, blocksAvailable, dev->checkpointNextBlock));
+
+- if(dev->checkpointNextBlock >= 0 &&
+- dev->checkpointNextBlock <= dev->internalEndBlock &&
+- blocksAvailable > 0){
+-
+- for(i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++){
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+- if(bi->blockState == YAFFS_BLOCK_STATE_EMPTY){
++ if (dev->checkpointNextBlock >= 0 &&
++ dev->checkpointNextBlock <= dev->internalEndBlock &&
++ blocksAvailable > 0) {
++
++ for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++ if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpointNextBlock = i + 1;
+ dev->checkpointCurrentBlock = i;
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("allocating checkpt block %d"TENDSTR),i));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("allocating checkpt block %d"TENDSTR), i));
+ return;
+ }
+ }
+ }
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("out of checkpt blocks"TENDSTR)));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("out of checkpt blocks"TENDSTR)));
+
+ dev->checkpointNextBlock = -1;
+ dev->checkpointCurrentBlock = -1;
+@@ -98,30 +93,31 @@ static void yaffs_CheckpointFindNextChec
+ int i;
+ yaffs_ExtendedTags tags;
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("find next checkpt block: start: blocks %d next %d" TENDSTR),
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: start: blocks %d next %d" TENDSTR),
+ dev->blocksInCheckpoint, dev->checkpointNextBlock));
+
+- if(dev->blocksInCheckpoint < dev->checkpointMaxBlocks)
+- for(i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++){
++ if (dev->blocksInCheckpoint < dev->checkpointMaxBlocks)
++ for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
+ int chunk = i * dev->nChunksPerBlock;
+ int realignedChunk = chunk - dev->chunkOffset;
+
+- dev->readChunkWithTagsFromNAND(dev,realignedChunk,NULL,&tags);
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR),
+- i, tags.objectId,tags.sequenceNumber,tags.eccResult));
++ dev->readChunkWithTagsFromNAND(dev, realignedChunk,
++ NULL, &tags);
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR),
++ i, tags.objectId, tags.sequenceNumber, tags.eccResult));
+
+- if(tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA){
++ if (tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ /* Right kind of block */
+ dev->checkpointNextBlock = tags.objectId;
+ dev->checkpointCurrentBlock = i;
+ dev->checkpointBlockList[dev->blocksInCheckpoint] = i;
+ dev->blocksInCheckpoint++;
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("found checkpt block %d"TENDSTR),i));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("found checkpt block %d"TENDSTR), i));
+ return;
+ }
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("found no more checkpt blocks"TENDSTR)));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("found no more checkpt blocks"TENDSTR)));
+
+ dev->checkpointNextBlock = -1;
+ dev->checkpointCurrentBlock = -1;
+@@ -133,17 +129,17 @@ int yaffs_CheckpointOpen(yaffs_Device *d
+
+ /* Got the functions we need? */
+ if (!dev->writeChunkWithTagsToNAND ||
+- !dev->readChunkWithTagsFromNAND ||
+- !dev->eraseBlockInNAND ||
+- !dev->markNANDBlockBad)
++ !dev->readChunkWithTagsFromNAND ||
++ !dev->eraseBlockInNAND ||
++ !dev->markNANDBlockBad)
+ return 0;
+
+- if(forWriting && !yaffs_CheckpointSpaceOk(dev))
++ if (forWriting && !yaffs_CheckpointSpaceOk(dev))
+ return 0;
+
+- if(!dev->checkpointBuffer)
+- dev->checkpointBuffer = YMALLOC_DMA(dev->nDataBytesPerChunk);
+- if(!dev->checkpointBuffer)
++ if (!dev->checkpointBuffer)
++ dev->checkpointBuffer = YMALLOC_DMA(dev->totalBytesPerChunk);
++ if (!dev->checkpointBuffer)
+ return 0;
+
+
+@@ -159,12 +155,10 @@ int yaffs_CheckpointOpen(yaffs_Device *d
+ dev->checkpointNextBlock = dev->internalStartBlock;
+
+ /* Erase all the blocks in the checkpoint area */
+- if(forWriting){
+- memset(dev->checkpointBuffer,0,dev->nDataBytesPerChunk);
++ if (forWriting) {
++ memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
+ dev->checkpointByteOffset = 0;
+ return yaffs_CheckpointErase(dev);
+-
+-
+ } else {
+ int i;
+ /* Set to a value that will kick off a read */
+@@ -174,7 +168,7 @@ int yaffs_CheckpointOpen(yaffs_Device *d
+ dev->blocksInCheckpoint = 0;
+ dev->checkpointMaxBlocks = (dev->internalEndBlock - dev->internalStartBlock)/16 + 2;
+ dev->checkpointBlockList = YMALLOC(sizeof(int) * dev->checkpointMaxBlocks);
+- for(i = 0; i < dev->checkpointMaxBlocks; i++)
++ for (i = 0; i < dev->checkpointMaxBlocks; i++)
+ dev->checkpointBlockList[i] = -1;
+ }
+
+@@ -191,18 +185,17 @@ int yaffs_GetCheckpointSum(yaffs_Device
+
+ static int yaffs_CheckpointFlushBuffer(yaffs_Device *dev)
+ {
+-
+ int chunk;
+ int realignedChunk;
+
+ yaffs_ExtendedTags tags;
+
+- if(dev->checkpointCurrentBlock < 0){
++ if (dev->checkpointCurrentBlock < 0) {
+ yaffs_CheckpointFindNextErasedBlock(dev);
+ dev->checkpointCurrentChunk = 0;
+ }
+
+- if(dev->checkpointCurrentBlock < 0)
++ if (dev->checkpointCurrentBlock < 0)
+ return 0;
+
+ tags.chunkDeleted = 0;
+@@ -210,10 +203,10 @@ static int yaffs_CheckpointFlushBuffer(y
+ tags.chunkId = dev->checkpointPageSequence + 1;
+ tags.sequenceNumber = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.byteCount = dev->nDataBytesPerChunk;
+- if(dev->checkpointCurrentChunk == 0){
++ if (dev->checkpointCurrentChunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,dev->checkpointCurrentBlock);
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointCurrentBlock);
+ bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocksInCheckpoint++;
+ }
+@@ -221,28 +214,29 @@ static int yaffs_CheckpointFlushBuffer(y
+ chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock + dev->checkpointCurrentChunk;
+
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
+- chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk,tags.objectId,tags.chunkId));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
++ chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk, tags.objectId, tags.chunkId));
+
+ realignedChunk = chunk - dev->chunkOffset;
+
+- dev->writeChunkWithTagsToNAND(dev,realignedChunk,dev->checkpointBuffer,&tags);
++ dev->writeChunkWithTagsToNAND(dev, realignedChunk,
++ dev->checkpointBuffer, &tags);
+ dev->checkpointByteOffset = 0;
+ dev->checkpointPageSequence++;
+ dev->checkpointCurrentChunk++;
+- if(dev->checkpointCurrentChunk >= dev->nChunksPerBlock){
++ if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock) {
+ dev->checkpointCurrentChunk = 0;
+ dev->checkpointCurrentBlock = -1;
+ }
+- memset(dev->checkpointBuffer,0,dev->nDataBytesPerChunk);
++ memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
+
+ return 1;
+ }
+
+
+-int yaffs_CheckpointWrite(yaffs_Device *dev,const void *data, int nBytes)
++int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes)
+ {
+- int i=0;
++ int i = 0;
+ int ok = 1;
+
+
+@@ -250,17 +244,14 @@ int yaffs_CheckpointWrite(yaffs_Device *
+
+
+
+- if(!dev->checkpointBuffer)
++ if (!dev->checkpointBuffer)
+ return 0;
+
+- if(!dev->checkpointOpenForWrite)
++ if (!dev->checkpointOpenForWrite)
+ return -1;
+
+- while(i < nBytes && ok) {
+-
+-
+-
+- dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes ;
++ while (i < nBytes && ok) {
++ dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes;
+ dev->checkpointSum += *dataBytes;
+ dev->checkpointXor ^= *dataBytes;
+
+@@ -270,18 +261,17 @@ int yaffs_CheckpointWrite(yaffs_Device *
+ dev->checkpointByteCount++;
+
+
+- if(dev->checkpointByteOffset < 0 ||
++ if (dev->checkpointByteOffset < 0 ||
+ dev->checkpointByteOffset >= dev->nDataBytesPerChunk)
+ ok = yaffs_CheckpointFlushBuffer(dev);
+-
+ }
+
+- return i;
++ return i;
+ }
+
+ int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes)
+ {
+- int i=0;
++ int i = 0;
+ int ok = 1;
+ yaffs_ExtendedTags tags;
+
+@@ -291,52 +281,54 @@ int yaffs_CheckpointRead(yaffs_Device *d
+
+ __u8 *dataBytes = (__u8 *)data;
+
+- if(!dev->checkpointBuffer)
++ if (!dev->checkpointBuffer)
+ return 0;
+
+- if(dev->checkpointOpenForWrite)
++ if (dev->checkpointOpenForWrite)
+ return -1;
+
+- while(i < nBytes && ok) {
++ while (i < nBytes && ok) {
+
+
+- if(dev->checkpointByteOffset < 0 ||
+- dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
++ if (dev->checkpointByteOffset < 0 ||
++ dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
+
+- if(dev->checkpointCurrentBlock < 0){
++ if (dev->checkpointCurrentBlock < 0) {
+ yaffs_CheckpointFindNextCheckpointBlock(dev);
+ dev->checkpointCurrentChunk = 0;
+ }
+
+- if(dev->checkpointCurrentBlock < 0)
++ if (dev->checkpointCurrentBlock < 0)
+ ok = 0;
+ else {
+-
+- chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock +
+- dev->checkpointCurrentChunk;
++ chunk = dev->checkpointCurrentBlock *
++ dev->nChunksPerBlock +
++ dev->checkpointCurrentChunk;
+
+ realignedChunk = chunk - dev->chunkOffset;
+
+- /* read in the next chunk */
+- /* printf("read checkpoint page %d\n",dev->checkpointPage); */
+- dev->readChunkWithTagsFromNAND(dev, realignedChunk,
+- dev->checkpointBuffer,
+- &tags);
+-
+- if(tags.chunkId != (dev->checkpointPageSequence + 1) ||
+- tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+- ok = 0;
++ /* read in the next chunk */
++ /* printf("read checkpoint page %d\n",dev->checkpointPage); */
++ dev->readChunkWithTagsFromNAND(dev,
++ realignedChunk,
++ dev->checkpointBuffer,
++ &tags);
++
++ if (tags.chunkId != (dev->checkpointPageSequence + 1) ||
++ tags.eccResult > YAFFS_ECC_RESULT_FIXED ||
++ tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ ok = 0;
+
+ dev->checkpointByteOffset = 0;
+ dev->checkpointPageSequence++;
+ dev->checkpointCurrentChunk++;
+
+- if(dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
++ if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
+ dev->checkpointCurrentBlock = -1;
+ }
+ }
+
+- if(ok){
++ if (ok) {
+ *dataBytes = dev->checkpointBuffer[dev->checkpointByteOffset];
+ dev->checkpointSum += *dataBytes;
+ dev->checkpointXor ^= *dataBytes;
+@@ -353,17 +345,17 @@ int yaffs_CheckpointRead(yaffs_Device *d
+ int yaffs_CheckpointClose(yaffs_Device *dev)
+ {
+
+- if(dev->checkpointOpenForWrite){
+- if(dev->checkpointByteOffset != 0)
++ if (dev->checkpointOpenForWrite) {
++ if (dev->checkpointByteOffset != 0)
+ yaffs_CheckpointFlushBuffer(dev);
+ } else {
+ int i;
+- for(i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++){
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,dev->checkpointBlockList[i]);
+- if(bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
++ for (i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++) {
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointBlockList[i]);
++ if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
+ bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
+ else {
+- // Todo this looks odd...
++ /* Todo this looks odd... */
+ }
+ }
+ YFREE(dev->checkpointBlockList);
+@@ -374,27 +366,25 @@ int yaffs_CheckpointClose(yaffs_Device *
+ dev->nErasedBlocks -= dev->blocksInCheckpoint;
+
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint byte count %d" TENDSTR),
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint byte count %d" TENDSTR),
+ dev->checkpointByteCount));
+
+- if(dev->checkpointBuffer){
++ if (dev->checkpointBuffer) {
+ /* free the buffer */
+ YFREE(dev->checkpointBuffer);
+ dev->checkpointBuffer = NULL;
+ return 1;
+- }
+- else
++ } else
+ return 0;
+-
+ }
+
+ int yaffs_CheckpointInvalidateStream(yaffs_Device *dev)
+ {
+ /* Erase the first checksum block */
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint invalidate"TENDSTR)));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint invalidate"TENDSTR)));
+
+- if(!yaffs_CheckpointSpaceOk(dev))
++ if (!yaffs_CheckpointSpaceOk(dev))
+ return 0;
+
+ return yaffs_CheckpointErase(dev);
+--- a/fs/yaffs2/yaffs_checkptrw.h
++++ b/fs/yaffs2/yaffs_checkptrw.h
+@@ -20,9 +20,9 @@
+
+ int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting);
+
+-int yaffs_CheckpointWrite(yaffs_Device *dev,const void *data, int nBytes);
++int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes);
+
+-int yaffs_CheckpointRead(yaffs_Device *dev,void *data, int nBytes);
++int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes);
+
+ int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum);
+
+--- a/fs/yaffs2/yaffs_ecc.c
++++ b/fs/yaffs2/yaffs_ecc.c
+@@ -29,7 +29,7 @@
+ */
+
+ const char *yaffs_ecc_c_version =
+- "$Id: yaffs_ecc.c,v 1.9 2007-02-14 01:09:06 wookey Exp $";
++ "$Id: yaffs_ecc.c,v 1.11 2009-03-06 17:20:50 wookey Exp $";
+
+ #include "yportenv.h"
+
+@@ -109,12 +109,10 @@ void yaffs_ECCCalculate(const unsigned c
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+- if (b & 0x01) // odd number of bits in the byte
+- {
++ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+-
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+@@ -158,7 +156,7 @@ void yaffs_ECCCalculate(const unsigned c
+ ecc[0] = ~t;
+
+ #ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+- // Swap the bytes into the wrong order
++ /* Swap the bytes into the wrong order */
+ t = ecc[0];
+ ecc[0] = ecc[1];
+ ecc[1] = t;
+@@ -189,7 +187,7 @@ int yaffs_ECCCorrect(unsigned char *data
+ unsigned bit;
+
+ #ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+- // swap the bytes to correct for the wrong order
++ /* swap the bytes to correct for the wrong order */
+ unsigned char t;
+
+ t = d0;
+@@ -251,7 +249,7 @@ int yaffs_ECCCorrect(unsigned char *data
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+ void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
+- yaffs_ECCOther * eccOther)
++ yaffs_ECCOther *eccOther)
+ {
+ unsigned int i;
+
+@@ -278,8 +276,8 @@ void yaffs_ECCCalculateOther(const unsig
+ }
+
+ int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
+- yaffs_ECCOther * read_ecc,
+- const yaffs_ECCOther * test_ecc)
++ yaffs_ECCOther *read_ecc,
++ const yaffs_ECCOther *test_ecc)
+ {
+ unsigned char cDelta; /* column parity delta */
+ unsigned lDelta; /* line parity delta */
+@@ -294,8 +292,7 @@ int yaffs_ECCCorrectOther(unsigned char
+ return 0; /* no error */
+
+ if (lDelta == ~lDeltaPrime &&
+- (((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15))
+- {
++ (((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+@@ -307,7 +304,7 @@ int yaffs_ECCCorrectOther(unsigned char
+ if (cDelta & 0x02)
+ bit |= 0x01;
+
+- if(lDelta >= nBytes)
++ if (lDelta >= nBytes)
+ return -1;
+
+ data[lDelta] ^= (1 << bit);
+@@ -316,7 +313,7 @@ int yaffs_ECCCorrectOther(unsigned char
+ }
+
+ if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) +
+- yaffs_CountBits(cDelta)) == 1) {
++ yaffs_CountBits(cDelta)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+@@ -326,6 +323,4 @@ int yaffs_ECCCorrectOther(unsigned char
+ /* Unrecoverable error */
+
+ return -1;
+-
+ }
+-
+--- a/fs/yaffs2/yaffs_ecc.h
++++ b/fs/yaffs2/yaffs_ecc.h
+@@ -13,15 +13,15 @@
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+- /*
+- * This code implements the ECC algorithm used in SmartMedia.
+- *
+- * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+- * The two unused bit are set to 1.
+- * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+- * blocks are used on a 512-byte NAND page.
+- *
+- */
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
++ * blocks are used on a 512-byte NAND page.
++ *
++ */
+
+ #ifndef __YAFFS_ECC_H__
+ #define __YAFFS_ECC_H__
+@@ -34,11 +34,11 @@ typedef struct {
+
+ void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc);
+ int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
+- const unsigned char *test_ecc);
++ const unsigned char *test_ecc);
+
+ void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
+- yaffs_ECCOther * ecc);
++ yaffs_ECCOther *ecc);
+ int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
+- yaffs_ECCOther * read_ecc,
+- const yaffs_ECCOther * test_ecc);
++ yaffs_ECCOther *read_ecc,
++ const yaffs_ECCOther *test_ecc);
+ #endif
+--- a/fs/yaffs2/yaffs_fs.c
++++ b/fs/yaffs2/yaffs_fs.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2009 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -32,18 +32,17 @@
+ */
+
+ const char *yaffs_fs_c_version =
+- "$Id: yaffs_fs.c,v 1.63 2007-09-19 20:35:40 imcd Exp $";
++ "$Id: yaffs_fs.c,v 1.79 2009-03-17 01:12:00 wookey Exp $";
+ extern const char *yaffs_guts_c_version;
+
+ #include <linux/version.h>
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ #include <linux/config.h>
+ #endif
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+-#include <linux/list.h>
+ #include <linux/fs.h>
+ #include <linux/proc_fs.h>
+ #include <linux/smp_lock.h>
+@@ -53,10 +52,12 @@ extern const char *yaffs_guts_c_version;
+ #include <linux/string.h>
+ #include <linux/ctype.h>
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#include "asm/div64.h"
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ #include <linux/statfs.h> /* Added NCB 15-8-2003 */
+-#include <asm/statfs.h>
++#include <linux/statfs.h>
+ #define UnlockPage(p) unlock_page(p)
+ #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+@@ -69,22 +70,45 @@ extern const char *yaffs_guts_c_version;
+ #define BDEVNAME_SIZE 0
+ #define yaffs_devname(sb, buf) kdevname(sb->s_dev)
+
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+ /* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+ #define __user
+ #endif
+
+ #endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define YPROC_ROOT (&proc_root)
++#else
++#define YPROC_ROOT NULL
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ #define WRITE_SIZE_STR "writesize"
+-#define WRITE_SIZE(mtd) (mtd)->writesize
++#define WRITE_SIZE(mtd) ((mtd)->writesize)
+ #else
+ #define WRITE_SIZE_STR "oobblock"
+-#define WRITE_SIZE(mtd) (mtd)->oobblock
++#define WRITE_SIZE(mtd) ((mtd)->oobblock)
+ #endif
+
+-#include <asm/uaccess.h>
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
++#define YAFFS_USE_WRITE_BEGIN_END 1
++#else
++#define YAFFS_USE_WRITE_BEGIN_END 0
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
++{
++ uint64_t result = partition_size;
++ do_div(result, block_size);
++ return (uint32_t)result;
++}
++#else
++#define YCALCBLOCKS(s, b) ((s)/(b))
++#endif
++
++#include <linux/uaccess.h>
+
+ #include "yportenv.h"
+ #include "yaffs_guts.h"
+@@ -96,28 +120,44 @@ extern const char *yaffs_guts_c_version;
+
+ unsigned int yaffs_traceMask = YAFFS_TRACE_BAD_BLOCKS;
+ unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++unsigned int yaffs_auto_checkpoint = 1;
+
+ /* Module Parameters */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+-module_param(yaffs_traceMask,uint,0644);
+-module_param(yaffs_wr_attempts,uint,0644);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++module_param(yaffs_traceMask, uint, 0644);
++module_param(yaffs_wr_attempts, uint, 0644);
++module_param(yaffs_auto_checkpoint, uint, 0644);
++#else
++MODULE_PARM(yaffs_traceMask, "i");
++MODULE_PARM(yaffs_wr_attempts, "i");
++MODULE_PARM(yaffs_auto_checkpoint, "i");
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++/* use iget and read_inode */
++#define Y_IGET(sb, inum) iget((sb), (inum))
++static void yaffs_read_inode(struct inode *inode);
++
+ #else
+-MODULE_PARM(yaffs_traceMask,"i");
+-MODULE_PARM(yaffs_wr_attempts,"i");
++/* Call local equivalent */
++#define YAFFS_USE_OWN_IGET
++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino);
+ #endif
+
+ /*#define T(x) printk x */
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18))
+-#define yaffs_InodeToObjectLV(iptr) (iptr)->i_private
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->i_private)
+ #else
+-#define yaffs_InodeToObjectLV(iptr) (iptr)->u.generic_ip
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->u.generic_ip)
+ #endif
+
+ #define yaffs_InodeToObject(iptr) ((yaffs_Object *)(yaffs_InodeToObjectLV(iptr)))
+ #define yaffs_DentryToObject(dptr) yaffs_InodeToObject((dptr)->d_inode)
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ #define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->s_fs_info)
+ #else
+ #define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->u.generic_sbp)
+@@ -126,47 +166,49 @@ MODULE_PARM(yaffs_wr_attempts,"i");
+ static void yaffs_put_super(struct super_block *sb);
+
+ static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+- loff_t * pos);
++ loff_t *pos);
++static ssize_t yaffs_hold_space(struct file *f);
++static void yaffs_release_space(struct file *f);
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_file_flush(struct file *file, fl_owner_t id);
+ #else
+ static int yaffs_file_flush(struct file *file);
+ #endif
+
+ static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+- int datasync);
++ int datasync);
+
+ static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n);
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+- struct nameidata *n);
++ struct nameidata *n);
+ #else
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
+ #endif
+ static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *dentry);
++ struct dentry *dentry);
+ static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
+ static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+- const char *symname);
++ const char *symname);
+ static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- dev_t dev);
++ dev_t dev);
+ #else
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- int dev);
++ int dev);
+ #endif
+ static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+ static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_sync_fs(struct super_block *sb, int wait);
+ static void yaffs_write_super(struct super_block *sb);
+ #else
+@@ -174,33 +216,47 @@ static int yaffs_sync_fs(struct super_bl
+ static int yaffs_write_super(struct super_block *sb);
+ #endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
+ #else
+ static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
+ #endif
+-static void yaffs_read_inode(struct inode *inode);
+
++#ifdef YAFFS_HAS_PUT_INODE
+ static void yaffs_put_inode(struct inode *inode);
++#endif
++
+ static void yaffs_delete_inode(struct inode *);
+ static void yaffs_clear_inode(struct inode *);
+
+ static int yaffs_readpage(struct file *file, struct page *page);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
+ #else
+ static int yaffs_writepage(struct page *page);
+ #endif
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END != 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata);
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata);
++#else
+ static int yaffs_prepare_write(struct file *f, struct page *pg,
+- unsigned offset, unsigned to);
++ unsigned offset, unsigned to);
+ static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+- unsigned to);
++ unsigned to);
+
+-static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+- int buflen);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++#endif
++
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+ static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+ #else
+ static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+@@ -209,12 +265,17 @@ static int yaffs_follow_link(struct dent
+ static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++ .write_begin = yaffs_write_begin,
++ .write_end = yaffs_write_end,
++#else
+ .prepare_write = yaffs_prepare_write,
+ .commit_write = yaffs_commit_write,
++#endif
+ };
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22))
+-static struct file_operations yaffs_file_operations = {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
++static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+@@ -224,11 +285,12 @@ static struct file_operations yaffs_file
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
++ .llseek = generic_file_llseek,
+ };
+
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18))
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+
+-static struct file_operations yaffs_file_operations = {
++static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+@@ -241,29 +303,29 @@ static struct file_operations yaffs_file
+
+ #else
+
+-static struct file_operations yaffs_file_operations = {
++static const struct file_operations yaffs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ .sendfile = generic_file_sendfile,
+ #endif
+ };
+ #endif
+
+-static struct inode_operations yaffs_file_inode_operations = {
++static const struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+ };
+
+-static struct inode_operations yaffs_symlink_inode_operations = {
++static const struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+ .follow_link = yaffs_follow_link,
+ .setattr = yaffs_setattr,
+ };
+
+-static struct inode_operations yaffs_dir_inode_operations = {
++static const struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+@@ -276,16 +338,21 @@ static struct inode_operations yaffs_dir
+ .setattr = yaffs_setattr,
+ };
+
+-static struct file_operations yaffs_dir_operations = {
++static const struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = yaffs_readdir,
+ .fsync = yaffs_sync_object,
+ };
+
+-static struct super_operations yaffs_super_ops = {
++static const struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
++
++#ifndef YAFFS_USE_OWN_IGET
+ .read_inode = yaffs_read_inode,
++#endif
++#ifdef YAFFS_HAS_PUT_INODE
+ .put_inode = yaffs_put_inode,
++#endif
+ .put_super = yaffs_put_super,
+ .delete_inode = yaffs_delete_inode,
+ .clear_inode = yaffs_clear_inode,
+@@ -293,22 +360,21 @@ static struct super_operations yaffs_sup
+ .write_super = yaffs_write_super,
+ };
+
+-static void yaffs_GrossLock(yaffs_Device * dev)
++static void yaffs_GrossLock(yaffs_Device *dev)
+ {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs locking\n"));
+-
++ T(YAFFS_TRACE_OS, ("yaffs locking %p\n", current));
+ down(&dev->grossLock);
++ T(YAFFS_TRACE_OS, ("yaffs locked %p\n", current));
+ }
+
+-static void yaffs_GrossUnlock(yaffs_Device * dev)
++static void yaffs_GrossUnlock(yaffs_Device *dev)
+ {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs unlocking\n"));
++ T(YAFFS_TRACE_OS, ("yaffs unlocking %p\n", current));
+ up(&dev->grossLock);
+-
+ }
+
+-static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+- int buflen)
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen)
+ {
+ unsigned char *alias;
+ int ret;
+@@ -329,7 +395,7 @@ static int yaffs_readlink(struct dentry
+ return ret;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+ static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+ #else
+ static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -345,32 +411,31 @@ static int yaffs_follow_link(struct dent
+
+ yaffs_GrossUnlock(dev);
+
+- if (!alias)
+- {
++ if (!alias) {
+ ret = -ENOMEM;
+ goto out;
+- }
++ }
+
+ ret = vfs_follow_link(nd, alias);
+ kfree(alias);
+ out:
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
+- return ERR_PTR (ret);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++ return ERR_PTR(ret);
+ #else
+ return ret;
+ #endif
+ }
+
+ struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+- yaffs_Object * obj);
++ yaffs_Object *obj);
+
+ /*
+ * Lookup is used to find objects in the fs
+ */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+- struct nameidata *n)
++ struct nameidata *n)
+ #else
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+ #endif
+@@ -383,12 +448,11 @@ static struct dentry *yaffs_lookup(struc
+ yaffs_GrossLock(dev);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_lookup for %d:%s\n",
+- yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
++ ("yaffs_lookup for %d:%s\n",
++ yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
+
+- obj =
+- yaffs_FindObjectByName(yaffs_InodeToObject(dir),
+- dentry->d_name.name);
++ obj = yaffs_FindObjectByName(yaffs_InodeToObject(dir),
++ dentry->d_name.name);
+
+ obj = yaffs_GetEquivalentObject(obj); /* in case it was a hardlink */
+
+@@ -397,13 +461,13 @@ static struct dentry *yaffs_lookup(struc
+
+ if (obj) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_lookup found %d\n", obj->objectId));
++ ("yaffs_lookup found %d\n", obj->objectId));
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+
+ if (inode) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_loookup dentry \n"));
++ ("yaffs_loookup dentry \n"));
+ /* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
+ * d_add even if NULL inode */
+ #if 0
+@@ -416,7 +480,7 @@ static struct dentry *yaffs_lookup(struc
+ }
+
+ } else {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_lookup not found\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_lookup not found\n"));
+
+ }
+
+@@ -425,20 +489,22 @@ static struct dentry *yaffs_lookup(struc
+ d_add(dentry, inode);
+
+ return NULL;
+- /* return (ERR_PTR(-EIO)); */
+-
+ }
+
++
++#ifdef YAFFS_HAS_PUT_INODE
++
+ /* For now put inode is just for debugging
+ * Put inode is called when the inode **structure** is put.
+ */
+ static void yaffs_put_inode(struct inode *inode)
+ {
+ T(YAFFS_TRACE_OS,
+- ("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count)));
++ ("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
++ atomic_read(&inode->i_count)));
+
+ }
++#endif
+
+ /* clear is called to tell the fs to release any per-inode data it holds */
+ static void yaffs_clear_inode(struct inode *inode)
+@@ -449,9 +515,9 @@ static void yaffs_clear_inode(struct ino
+ obj = yaffs_InodeToObject(inode);
+
+ T(YAFFS_TRACE_OS,
+- ("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count),
+- obj ? "object exists" : "null object"));
++ ("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
+
+ if (obj) {
+ dev = obj->myDev;
+@@ -486,23 +552,23 @@ static void yaffs_delete_inode(struct in
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+- ("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count),
+- obj ? "object exists" : "null object"));
++ ("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
+
+ if (obj) {
+ dev = obj->myDev;
+ yaffs_GrossLock(dev);
+- yaffs_DeleteFile(obj);
++ yaffs_DeleteObject(obj);
+ yaffs_GrossUnlock(dev);
+ }
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
+- truncate_inode_pages (&inode->i_data, 0);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++ truncate_inode_pages(&inode->i_data, 0);
+ #endif
+ clear_inode(inode);
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_file_flush(struct file *file, fl_owner_t id)
+ #else
+ static int yaffs_file_flush(struct file *file)
+@@ -513,8 +579,8 @@ static int yaffs_file_flush(struct file
+ yaffs_Device *dev = obj->myDev;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_file_flush object %d (%s)\n", obj->objectId,
+- obj->dirty ? "dirty" : "clean"));
++ ("yaffs_file_flush object %d (%s)\n", obj->objectId,
++ obj->dirty ? "dirty" : "clean"));
+
+ yaffs_GrossLock(dev);
+
+@@ -535,15 +601,15 @@ static int yaffs_readpage_nolock(struct
+
+ yaffs_Device *dev;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage at %08x, size %08x\n",
+- (unsigned)(pg->index << PAGE_CACHE_SHIFT),
+- (unsigned)PAGE_CACHE_SIZE));
++ T(YAFFS_TRACE_OS, ("yaffs_readpage at %08x, size %08x\n",
++ (unsigned)(pg->index << PAGE_CACHE_SHIFT),
++ (unsigned)PAGE_CACHE_SIZE));
+
+ obj = yaffs_DentryToObject(f->f_dentry);
+
+ dev = obj->myDev;
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ BUG_ON(!PageLocked(pg));
+ #else
+ if (!PageLocked(pg))
+@@ -555,9 +621,9 @@ static int yaffs_readpage_nolock(struct
+
+ yaffs_GrossLock(dev);
+
+- ret =
+- yaffs_ReadDataFromFile(obj, pg_buf, pg->index << PAGE_CACHE_SHIFT,
+- PAGE_CACHE_SIZE);
++ ret = yaffs_ReadDataFromFile(obj, pg_buf,
++ pg->index << PAGE_CACHE_SHIFT,
++ PAGE_CACHE_SIZE);
+
+ yaffs_GrossUnlock(dev);
+
+@@ -575,7 +641,7 @@ static int yaffs_readpage_nolock(struct
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage done\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_readpage done\n"));
+ return ret;
+ }
+
+@@ -593,7 +659,7 @@ static int yaffs_readpage(struct file *f
+
+ /* writepage inspired by/stolen from smbfs */
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+ #else
+ static int yaffs_writepage(struct page *page)
+@@ -616,12 +682,11 @@ static int yaffs_writepage(struct page *
+
+ if (offset > inode->i_size) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_writepage at %08x, inode size = %08x!!!\n",
+- (unsigned)(page->index << PAGE_CACHE_SHIFT),
+- (unsigned)inode->i_size));
++ ("yaffs_writepage at %08x, inode size = %08x!!!\n",
++ (unsigned)(page->index << PAGE_CACHE_SHIFT),
++ (unsigned)inode->i_size));
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG " -> don't care!!\n"));
++ (" -> don't care!!\n"));
+ unlock_page(page);
+ return 0;
+ }
+@@ -629,11 +694,10 @@ static int yaffs_writepage(struct page *
+ end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+
+ /* easy case */
+- if (page->index < end_index) {
++ if (page->index < end_index)
+ nBytes = PAGE_CACHE_SIZE;
+- } else {
++ else
+ nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);
+- }
+
+ get_page(page);
+
+@@ -643,19 +707,18 @@ static int yaffs_writepage(struct page *
+ yaffs_GrossLock(obj->myDev);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_writepage at %08x, size %08x\n",
+- (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
++ ("yaffs_writepage at %08x, size %08x\n",
++ (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "writepag0: obj = %05x, ino = %05x\n",
+- (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
++ ("writepag0: obj = %05x, ino = %05x\n",
++ (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+
+- nWritten =
+- yaffs_WriteDataToFile(obj, buffer, page->index << PAGE_CACHE_SHIFT,
+- nBytes, 0);
++ nWritten = yaffs_WriteDataToFile(obj, buffer,
++ page->index << PAGE_CACHE_SHIFT, nBytes, 0);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "writepag1: obj = %05x, ino = %05x\n",
+- (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
++ ("writepag1: obj = %05x, ino = %05x\n",
++ (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+
+ yaffs_GrossUnlock(obj->myDev);
+
+@@ -667,100 +730,207 @@ static int yaffs_writepage(struct page *
+ return (nWritten == nBytes) ? 0 : -ENOSPC;
+ }
+
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata)
++{
++ struct page *pg = NULL;
++ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
++ uint32_t offset = pos & (PAGE_CACHE_SIZE - 1);
++ uint32_t to = offset + len;
++
++ int ret = 0;
++ int space_held = 0;
++
++ T(YAFFS_TRACE_OS, ("start yaffs_write_begin\n"));
++ /* Get a page */
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28)
++ pg = grab_cache_page_write_begin(mapping, index, flags);
++#else
++ pg = __grab_cache_page(mapping, index);
++#endif
++
++ *pagep = pg;
++ if (!pg) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ /* Get fs space */
++ space_held = yaffs_hold_space(filp);
++
++ if (!space_held) {
++ ret = -ENOSPC;
++ goto out;
++ }
++
++ /* Update page if required */
++
++ if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
++ ret = yaffs_readpage_nolock(filp, pg);
++
++ if (ret)
++ goto out;
++
++ /* Happy path return */
++ T(YAFFS_TRACE_OS, ("end yaffs_write_begin - ok\n"));
++
++ return 0;
++
++out:
++ T(YAFFS_TRACE_OS, ("end yaffs_write_begin fail returning %d\n", ret));
++ if (space_held)
++ yaffs_release_space(filp);
++ if (pg) {
++ unlock_page(pg);
++ page_cache_release(pg);
++ }
++ return ret;
++}
++
++#else
++
+ static int yaffs_prepare_write(struct file *f, struct page *pg,
+- unsigned offset, unsigned to)
++ unsigned offset, unsigned to)
+ {
++ T(YAFFS_TRACE_OS, ("yaffs_prepair_write\n"));
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_prepair_write\n"));
+ if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
+ return yaffs_readpage_nolock(f, pg);
+-
+ return 0;
++}
++#endif
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata)
++{
++ int ret = 0;
++ void *addr, *kva;
++ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
++
++ kva = kmap(pg);
++ addr = kva + offset_into_page;
++
++ T(YAFFS_TRACE_OS,
++ ("yaffs_write_end addr %x pos %x nBytes %d\n",
++ (unsigned) addr,
++ (int)pos, copied));
++
++ ret = yaffs_file_write(filp, addr, copied, &pos);
++
++ if (ret != copied) {
++ T(YAFFS_TRACE_OS,
++ ("yaffs_write_end not same size ret %d copied %d\n",
++ ret, copied));
++ SetPageError(pg);
++ ClearPageUptodate(pg);
++ } else {
++ SetPageUptodate(pg);
++ }
++
++ kunmap(pg);
+
++ yaffs_release_space(filp);
++ unlock_page(pg);
++ page_cache_release(pg);
++ return ret;
+ }
++#else
+
+ static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+- unsigned to)
++ unsigned to)
+ {
++ void *addr, *kva;
+
+- void *addr = page_address(pg) + offset;
+ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
+ int nBytes = to - offset;
+ int nWritten;
+
+ unsigned spos = pos;
+- unsigned saddr = (unsigned)addr;
++ unsigned saddr;
++
++ kva = kmap(pg);
++ addr = kva + offset;
++
++ saddr = (unsigned) addr;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_commit_write addr %x pos %x nBytes %d\n", saddr,
+- spos, nBytes));
++ ("yaffs_commit_write addr %x pos %x nBytes %d\n",
++ saddr, spos, nBytes));
+
+ nWritten = yaffs_file_write(f, addr, nBytes, &pos);
+
+ if (nWritten != nBytes) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_commit_write not same size nWritten %d nBytes %d\n",
+- nWritten, nBytes));
++ ("yaffs_commit_write not same size nWritten %d nBytes %d\n",
++ nWritten, nBytes));
+ SetPageError(pg);
+ ClearPageUptodate(pg);
+ } else {
+ SetPageUptodate(pg);
+ }
+
++ kunmap(pg);
++
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_commit_write returning %d\n",
+- nWritten == nBytes ? 0 : nWritten));
++ ("yaffs_commit_write returning %d\n",
++ nWritten == nBytes ? 0 : nWritten));
+
+ return nWritten == nBytes ? 0 : nWritten;
+-
+ }
++#endif
++
+
+-static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object * obj)
++static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object *obj)
+ {
+ if (inode && obj) {
+
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+- __u32 mode = obj->yst_mode;
+- switch( obj->variantType ){
+- case YAFFS_OBJECT_TYPE_FILE :
+- if( ! S_ISREG(mode) ){
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFREG;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK :
+- if( ! S_ISLNK(mode) ){
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFLNK;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY :
+- if( ! S_ISDIR(mode) ){
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFDIR;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN :
+- case YAFFS_OBJECT_TYPE_HARDLINK :
+- case YAFFS_OBJECT_TYPE_SPECIAL :
+- default:
+- /* TODO? */
+- break;
+- }
++ __u32 mode = obj->yst_mode;
++ switch (obj->variantType) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (!S_ISREG(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFREG;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ if (!S_ISLNK(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFLNK;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if (!S_ISDIR(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFDIR;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ default:
++ /* TODO? */
++ break;
++ }
++
++ inode->i_flags |= S_NOATIME;
+
+ inode->i_ino = obj->objectId;
+ inode->i_mode = obj->yst_mode;
+ inode->i_uid = obj->yst_uid;
+ inode->i_gid = obj->yst_gid;
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ inode->i_blksize = inode->i_sb->s_blocksize;
+ #endif
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+@@ -781,26 +951,25 @@ static void yaffs_FillInodeFromObject(st
+ inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
+- inode->i_mode, inode->i_uid, inode->i_gid,
+- (int)inode->i_size, atomic_read(&inode->i_count)));
++ ("yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
++ inode->i_mode, inode->i_uid, inode->i_gid,
++ (int)inode->i_size, atomic_read(&inode->i_count)));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ init_special_inode(inode, obj->yst_mode,
+- old_decode_dev(obj->yst_rdev));
++ old_decode_dev(obj->yst_rdev));
+ #else
+ init_special_inode(inode, obj->yst_mode,
+- (dev_t) (obj->yst_rdev));
++ (dev_t) (obj->yst_rdev));
+ #endif
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+- &yaffs_file_address_operations;
++ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+@@ -817,34 +986,36 @@ static void yaffs_FillInodeFromObject(st
+
+ } else {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_FileInode invalid parameters\n"));
++ ("yaffs_FileInode invalid parameters\n"));
+ }
+
+ }
+
+ struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+- yaffs_Object * obj)
++ yaffs_Object *obj)
+ {
+ struct inode *inode;
+
+ if (!sb) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_get_inode for NULL super_block!!\n"));
++ ("yaffs_get_inode for NULL super_block!!\n"));
+ return NULL;
+
+ }
+
+ if (!obj) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_get_inode for NULL object!!\n"));
++ ("yaffs_get_inode for NULL object!!\n"));
+ return NULL;
+
+ }
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_get_inode for object %d\n", obj->objectId));
++ ("yaffs_get_inode for object %d\n", obj->objectId));
+
+- inode = iget(sb, obj->objectId);
++ inode = Y_IGET(sb, obj->objectId);
++ if (IS_ERR(inode))
++ return NULL;
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+@@ -854,7 +1025,7 @@ struct inode *yaffs_get_inode(struct sup
+ }
+
+ static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+- loff_t * pos)
++ loff_t *pos)
+ {
+ yaffs_Object *obj;
+ int nWritten, ipos;
+@@ -869,28 +1040,26 @@ static ssize_t yaffs_file_write(struct f
+
+ inode = f->f_dentry->d_inode;
+
+- if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND) {
++ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+ ipos = inode->i_size;
+- } else {
++ else
+ ipos = *pos;
+- }
+
+- if (!obj) {
++ if (!obj)
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_file_write: hey obj is null!\n"));
+- } else {
++ ("yaffs_file_write: hey obj is null!\n"));
++ else
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_file_write about to write writing %d bytes"
+- "to object %d at %d\n",
+- n, obj->objectId, ipos));
+- }
++ ("yaffs_file_write about to write writing %zu bytes"
++ "to object %d at %d\n",
++ n, obj->objectId, ipos));
+
+ nWritten = yaffs_WriteDataToFile(obj, buf, ipos, n, 0);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_file_write writing %d bytes, %d written at %d\n",
+- n, nWritten, ipos));
++ ("yaffs_file_write writing %zu bytes, %d written at %d\n",
++ n, nWritten, ipos));
++
+ if (nWritten > 0) {
+ ipos += nWritten;
+ *pos = ipos;
+@@ -899,10 +1068,9 @@ static ssize_t yaffs_file_write(struct f
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG
+- "yaffs_file_write size updated to %d bytes, "
+- "%d blocks\n",
+- ipos, (int)(inode->i_blocks)));
++ ("yaffs_file_write size updated to %d bytes, "
++ "%d blocks\n",
++ ipos, (int)(inode->i_blocks)));
+ }
+
+ }
+@@ -910,13 +1078,54 @@ static ssize_t yaffs_file_write(struct f
+ return nWritten == 0 ? -ENOSPC : nWritten;
+ }
+
++/* Space holding and freeing is done to ensure we have space available for write_begin/end */
++/* For now we just assume few parallel writes and check against a small number. */
++/* Todo: need to do this with a counter to handle parallel reads better */
++
++static ssize_t yaffs_hold_space(struct file *f)
++{
++ yaffs_Object *obj;
++ yaffs_Device *dev;
++
++ int nFreeChunks;
++
++
++ obj = yaffs_DentryToObject(f->f_dentry);
++
++ dev = obj->myDev;
++
++ yaffs_GrossLock(dev);
++
++ nFreeChunks = yaffs_GetNumberOfFreeChunks(dev);
++
++ yaffs_GrossUnlock(dev);
++
++ return (nFreeChunks > 20) ? 1 : 0;
++}
++
++static void yaffs_release_space(struct file *f)
++{
++ yaffs_Object *obj;
++ yaffs_Device *dev;
++
++
++ obj = yaffs_DentryToObject(f->f_dentry);
++
++ dev = obj->myDev;
++
++ yaffs_GrossLock(dev);
++
++
++ yaffs_GrossUnlock(dev);
++}
++
+ static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+ {
+ yaffs_Object *obj;
+ yaffs_Device *dev;
+ struct inode *inode = f->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+- struct list_head *i;
++ struct ylist_head *i;
+ yaffs_Object *l;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+@@ -932,24 +1141,20 @@ static int yaffs_readdir(struct file *f,
+
+ if (offset == 0) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_readdir: entry . ino %d \n",
+- (int)inode->i_ino));
+- if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR)
+- < 0) {
++ ("yaffs_readdir: entry . ino %d \n",
++ (int)inode->i_ino));
++ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0)
+ goto out;
+- }
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_readdir: entry .. ino %d \n",
+- (int)f->f_dentry->d_parent->d_inode->i_ino));
+- if (filldir
+- (dirent, "..", 2, offset,
+- f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
++ ("yaffs_readdir: entry .. ino %d \n",
++ (int)f->f_dentry->d_parent->d_inode->i_ino));
++ if (filldir(dirent, "..", 2, offset,
++ f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
+ goto out;
+- }
+ offset++;
+ f->f_pos++;
+ }
+@@ -965,35 +1170,32 @@ static int yaffs_readdir(struct file *f,
+ f->f_version = inode->i_version;
+ }
+
+- list_for_each(i, &obj->variant.directoryVariant.children) {
++ ylist_for_each(i, &obj->variant.directoryVariant.children) {
+ curoffs++;
+ if (curoffs >= offset) {
+- l = list_entry(i, yaffs_Object, siblings);
++ l = ylist_entry(i, yaffs_Object, siblings);
+
+ yaffs_GetObjectName(l, name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_readdir: %s inode %d\n", name,
++ ("yaffs_readdir: %s inode %d\n", name,
+ yaffs_GetObjectInode(l)));
+
+ if (filldir(dirent,
+- name,
+- strlen(name),
+- offset,
+- yaffs_GetObjectInode(l),
+- yaffs_GetObjectType(l))
+- < 0) {
++ name,
++ strlen(name),
++ offset,
++ yaffs_GetObjectInode(l),
++ yaffs_GetObjectType(l)) < 0)
+ goto up_and_out;
+- }
+
+ offset++;
+ f->f_pos++;
+ }
+ }
+
+- up_and_out:
+- out:
+-
++up_and_out:
++out:
+ yaffs_GrossUnlock(dev);
+
+ return 0;
+@@ -1002,12 +1204,19 @@ static int yaffs_readdir(struct file *f,
+ /*
+ * File creation. Allocate an inode, and we're done..
+ */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++#define YCRED(x) x
++#else
++#define YCRED(x) (x->cred)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- dev_t rdev)
++ dev_t rdev)
+ #else
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- int rdev)
++ int rdev)
+ #endif
+ {
+ struct inode *inode;
+@@ -1018,25 +1227,25 @@ static int yaffs_mknod(struct inode *dir
+ yaffs_Object *parent = yaffs_InodeToObject(dir);
+
+ int error = -ENOSPC;
+- uid_t uid = current->fsuid;
+- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
++ uid_t uid = YCRED(current)->fsuid;
++ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+
+- if((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod: parent object %d type %d\n",
+- parent->objectId, parent->variantType));
++ ("yaffs_mknod: parent object %d type %d\n",
++ parent->objectId, parent->variantType));
+ } else {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod: could not get parent object\n"));
++ ("yaffs_mknod: could not get parent object\n"));
+ return -EPERM;
+ }
+
+ T(YAFFS_TRACE_OS, ("yaffs_mknod: making oject for %s, "
+- "mode %x dev %x\n",
+- dentry->d_name.name, mode, rdev));
++ "mode %x dev %x\n",
++ dentry->d_name.name, mode, rdev));
+
+ dev = parent->myDev;
+
+@@ -1045,33 +1254,28 @@ static int yaffs_mknod(struct inode *dir
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+- T(YAFFS_TRACE_OS, (KERN_DEBUG
+- "yaffs_mknod: making special\n"));
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
+- obj =
+- yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+- gid, old_encode_dev(rdev));
+-#else
+- obj =
+- yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+- gid, rdev);
++ T(YAFFS_TRACE_OS, ("yaffs_mknod: making special\n"));
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
++ gid, old_encode_dev(rdev));
++#else
++ obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
++ gid, rdev);
+ #endif
+ break;
+ case S_IFREG: /* file */
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mknod: making file\n"));
+- obj =
+- yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
+- gid);
++ T(YAFFS_TRACE_OS, ("yaffs_mknod: making file\n"));
++ obj = yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
++ gid);
+ break;
+ case S_IFDIR: /* directory */
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod: making directory\n"));
+- obj =
+- yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
+- uid, gid);
++ ("yaffs_mknod: making directory\n"));
++ obj = yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
++ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mknod: making file\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_mknod: making symlink\n"));
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+@@ -1083,12 +1287,12 @@ static int yaffs_mknod(struct inode *dir
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod created object %d count = %d\n",
+- obj->objectId, atomic_read(&inode->i_count)));
++ ("yaffs_mknod created object %d count = %d\n",
++ obj->objectId, atomic_read(&inode->i_count)));
+ error = 0;
+ } else {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_mknod failed making object\n"));
++ ("yaffs_mknod failed making object\n"));
+ error = -ENOMEM;
+ }
+
+@@ -1098,25 +1302,19 @@ static int yaffs_mknod(struct inode *dir
+ static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+ {
+ int retVal;
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mkdir\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_mkdir\n"));
+ retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+-#if 0
+- /* attempt to fix dir bug - didn't work */
+- if (!retVal) {
+- dget(dentry);
+- }
+-#endif
+ return retVal;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+ #else
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+ #endif
+ {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_create\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_create\n"));
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+ }
+
+@@ -1127,8 +1325,8 @@ static int yaffs_unlink(struct inode *di
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_unlink %d:%s\n", (int)(dir->i_ino),
+- dentry->d_name.name));
++ ("yaffs_unlink %d:%s\n", (int)(dir->i_ino),
++ dentry->d_name.name));
+
+ dev = yaffs_InodeToObject(dir)->myDev;
+
+@@ -1151,82 +1349,74 @@ static int yaffs_unlink(struct inode *di
+ * Create a link...
+ */
+ static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *dentry)
++ struct dentry *dentry)
+ {
+ struct inode *inode = old_dentry->d_inode;
+ yaffs_Object *obj = NULL;
+ yaffs_Object *link = NULL;
+ yaffs_Device *dev;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_link\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_link\n"));
+
+ obj = yaffs_InodeToObject(inode);
+ dev = obj->myDev;
+
+ yaffs_GrossLock(dev);
+
+- if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+- {
+- link =
+- yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
+- obj);
+- }
++ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
++ link = yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
++ obj);
+
+ if (link) {
+ old_dentry->d_inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_link link count %d i_count %d\n",
+- old_dentry->d_inode->i_nlink,
+- atomic_read(&old_dentry->d_inode->i_count)));
+-
++ ("yaffs_link link count %d i_count %d\n",
++ old_dentry->d_inode->i_nlink,
++ atomic_read(&old_dentry->d_inode->i_count)));
+ }
+
+ yaffs_GrossUnlock(dev);
+
+- if (link) {
+-
++ if (link)
+ return 0;
+- }
+
+ return -EPERM;
+ }
+
+ static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+- const char *symname)
++ const char *symname)
+ {
+ yaffs_Object *obj;
+ yaffs_Device *dev;
+- uid_t uid = current->fsuid;
+- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
++ uid_t uid = YCRED(current)->fsuid;
++ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_symlink\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_symlink\n"));
+
+ dev = yaffs_InodeToObject(dir)->myDev;
+ yaffs_GrossLock(dev);
+ obj = yaffs_MknodSymLink(yaffs_InodeToObject(dir), dentry->d_name.name,
+- S_IFLNK | S_IRWXUGO, uid, gid, symname);
++ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_GrossUnlock(dev);
+
+ if (obj) {
+-
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "symlink created OK\n"));
++ T(YAFFS_TRACE_OS, ("symlink created OK\n"));
+ return 0;
+ } else {
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "symlink not created\n"));
+-
++ T(YAFFS_TRACE_OS, ("symlink not created\n"));
+ }
+
+ return -ENOMEM;
+ }
+
+ static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+- int datasync)
++ int datasync)
+ {
+
+ yaffs_Object *obj;
+@@ -1236,7 +1426,7 @@ static int yaffs_sync_object(struct file
+
+ dev = obj->myDev;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_sync_object\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_sync_object\n"));
+ yaffs_GrossLock(dev);
+ yaffs_FlushFile(obj, 1);
+ yaffs_GrossUnlock(dev);
+@@ -1255,41 +1445,36 @@ static int yaffs_rename(struct inode *ol
+ int retVal = YAFFS_FAIL;
+ yaffs_Object *target;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_rename\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_rename\n"));
+ dev = yaffs_InodeToObject(old_dir)->myDev;
+
+ yaffs_GrossLock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+- target =
+- yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
+- new_dentry->d_name.name);
++ target = yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
++ new_dentry->d_name.name);
+
+
+
+- if (target &&
+- target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+- !list_empty(&target->variant.directoryVariant.children)) {
++ if (target && target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !ylist_empty(&target->variant.directoryVariant.children)) {
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "target is non-empty dir\n"));
++ T(YAFFS_TRACE_OS, ("target is non-empty dir\n"));
+
+ retVal = YAFFS_FAIL;
+ } else {
+-
+ /* Now does unlinking internally using shadowing mechanism */
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "calling yaffs_RenameObject\n"));
+-
+- retVal =
+- yaffs_RenameObject(yaffs_InodeToObject(old_dir),
+- old_dentry->d_name.name,
+- yaffs_InodeToObject(new_dir),
+- new_dentry->d_name.name);
++ T(YAFFS_TRACE_OS, ("calling yaffs_RenameObject\n"));
+
++ retVal = yaffs_RenameObject(yaffs_InodeToObject(old_dir),
++ old_dentry->d_name.name,
++ yaffs_InodeToObject(new_dir),
++ new_dentry->d_name.name);
+ }
+ yaffs_GrossUnlock(dev);
+
+ if (retVal == YAFFS_OK) {
+- if(target) {
++ if (target) {
+ new_dentry->d_inode->i_nlink--;
+ mark_inode_dirty(new_dentry->d_inode);
+ }
+@@ -1298,7 +1483,6 @@ static int yaffs_rename(struct inode *ol
+ } else {
+ return -ENOTEMPTY;
+ }
+-
+ }
+
+ static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+@@ -1308,15 +1492,15 @@ static int yaffs_setattr(struct dentry *
+ yaffs_Device *dev;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_setattr of object %d\n",
+- yaffs_InodeToObject(inode)->objectId));
+-
+- if ((error = inode_change_ok(inode, attr)) == 0) {
++ ("yaffs_setattr of object %d\n",
++ yaffs_InodeToObject(inode)->objectId));
+
++ error = inode_change_ok(inode, attr);
++ if (error == 0) {
+ dev = yaffs_InodeToObject(inode)->myDev;
+ yaffs_GrossLock(dev);
+ if (yaffs_SetAttributes(yaffs_InodeToObject(inode), attr) ==
+- YAFFS_OK) {
++ YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+@@ -1328,12 +1512,12 @@ static int yaffs_setattr(struct dentry *
+ return error;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+ struct super_block *sb = dentry->d_sb;
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+ {
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+@@ -1343,32 +1527,53 @@ static int yaffs_statfs(struct super_blo
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+ #endif
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_statfs\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_statfs\n"));
+
+ yaffs_GrossLock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+- if (sb->s_blocksize > dev->nDataBytesPerChunk) {
++
++ if (dev->nDataBytesPerChunk & (dev->nDataBytesPerChunk - 1)) {
++ /* Do this if chunk size is not a power of 2 */
++
++ uint64_t bytesInDev;
++ uint64_t bytesFree;
++
++ bytesInDev = ((uint64_t)((dev->endBlock - dev->startBlock + 1))) *
++ ((uint64_t)(dev->nChunksPerBlock * dev->nDataBytesPerChunk));
++
++ do_div(bytesInDev, sb->s_blocksize); /* bytesInDev becomes the number of blocks */
++ buf->f_blocks = bytesInDev;
++
++ bytesFree = ((uint64_t)(yaffs_GetNumberOfFreeChunks(dev))) *
++ ((uint64_t)(dev->nDataBytesPerChunk));
++
++ do_div(bytesFree, sb->s_blocksize);
++
++ buf->f_bfree = bytesFree;
++
++ } else if (sb->s_blocksize > dev->nDataBytesPerChunk) {
+
+ buf->f_blocks =
+- (dev->endBlock - dev->startBlock +
+- 1) * dev->nChunksPerBlock / (sb->s_blocksize /
+- dev->nDataBytesPerChunk);
++ (dev->endBlock - dev->startBlock + 1) *
++ dev->nChunksPerBlock /
++ (sb->s_blocksize / dev->nDataBytesPerChunk);
+ buf->f_bfree =
+- yaffs_GetNumberOfFreeChunks(dev) / (sb->s_blocksize /
+- dev->nDataBytesPerChunk);
++ yaffs_GetNumberOfFreeChunks(dev) /
++ (sb->s_blocksize / dev->nDataBytesPerChunk);
+ } else {
+-
+ buf->f_blocks =
+- (dev->endBlock - dev->startBlock +
+- 1) * dev->nChunksPerBlock * (dev->nDataBytesPerChunk /
+- sb->s_blocksize);
++ (dev->endBlock - dev->startBlock + 1) *
++ dev->nChunksPerBlock *
++ (dev->nDataBytesPerChunk / sb->s_blocksize);
++
+ buf->f_bfree =
+- yaffs_GetNumberOfFreeChunks(dev) * (dev->nDataBytesPerChunk /
+- sb->s_blocksize);
++ yaffs_GetNumberOfFreeChunks(dev) *
++ (dev->nDataBytesPerChunk / sb->s_blocksize);
+ }
++
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+@@ -1378,18 +1583,19 @@ static int yaffs_statfs(struct super_blo
+ }
+
+
+-/**
+ static int yaffs_do_sync_fs(struct super_block *sb)
+ {
+
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_do_sync_fs\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_do_sync_fs\n"));
+
+- if(sb->s_dirt) {
++ if (sb->s_dirt) {
+ yaffs_GrossLock(dev);
+
+- if(dev)
++ if (dev) {
++ yaffs_FlushEntireDeviceCache(dev);
+ yaffs_CheckpointSave(dev);
++ }
+
+ yaffs_GrossUnlock(dev);
+
+@@ -1397,35 +1603,73 @@ static int yaffs_do_sync_fs(struct super
+ }
+ return 0;
+ }
+-**/
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static void yaffs_write_super(struct super_block *sb)
+ #else
+ static int yaffs_write_super(struct super_block *sb)
+ #endif
+ {
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_write_super\n"));
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+- return 0; /* yaffs_do_sync_fs(sb);*/
++ T(YAFFS_TRACE_OS, ("yaffs_write_super\n"));
++ if (yaffs_auto_checkpoint >= 2)
++ yaffs_do_sync_fs(sb);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++ return 0;
+ #endif
+ }
+
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_sync_fs(struct super_block *sb, int wait)
+ #else
+ static int yaffs_sync_fs(struct super_block *sb)
+ #endif
+ {
++ T(YAFFS_TRACE_OS, ("yaffs_sync_fs\n"));
++
++ if (yaffs_auto_checkpoint >= 1)
++ yaffs_do_sync_fs(sb);
++
++ return 0;
++}
++
++#ifdef YAFFS_USE_OWN_IGET
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
++{
++ struct inode *inode;
++ yaffs_Object *obj;
++ yaffs_Device *dev = yaffs_SuperToDevice(sb);
++
++ T(YAFFS_TRACE_OS,
++ ("yaffs_iget for %lu\n", ino));
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_sync_fs\n"));
++ inode = iget_locked(sb, ino);
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++ if (!(inode->i_state & I_NEW))
++ return inode;
++
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
+
+- return 0; /* yaffs_do_sync_fs(sb);*/
++ yaffs_GrossLock(dev);
+
++ obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
++
++ yaffs_FillInodeFromObject(inode, obj);
++
++ yaffs_GrossUnlock(dev);
++
++ unlock_new_inode(inode);
++ return inode;
+ }
+
++#else
+
+ static void yaffs_read_inode(struct inode *inode)
+ {
+@@ -1438,7 +1682,7 @@ static void yaffs_read_inode(struct inod
+ yaffs_Device *dev = yaffs_SuperToDevice(inode->i_sb);
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_read_inode for %d\n", (int)inode->i_ino));
++ ("yaffs_read_inode for %d\n", (int)inode->i_ino));
+
+ yaffs_GrossLock(dev);
+
+@@ -1449,18 +1693,20 @@ static void yaffs_read_inode(struct inod
+ yaffs_GrossUnlock(dev);
+ }
+
+-static LIST_HEAD(yaffs_dev_list);
++#endif
++
++static YLIST_HEAD(yaffs_dev_list);
+
+-#if 0 // not used
++#if 0 /* not used */
+ static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
+ {
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+
+- if( *flags & MS_RDONLY ) {
++ if (*flags & MS_RDONLY) {
+ struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_remount_fs: %s: RO\n", dev->name ));
++ ("yaffs_remount_fs: %s: RO\n", dev->name));
+
+ yaffs_GrossLock(dev);
+
+@@ -1472,10 +1718,9 @@ static int yaffs_remount_fs(struct super
+ mtd->sync(mtd);
+
+ yaffs_GrossUnlock(dev);
+- }
+- else {
++ } else {
+ T(YAFFS_TRACE_OS,
+- (KERN_DEBUG "yaffs_remount_fs: %s: RW\n", dev->name ));
++ ("yaffs_remount_fs: %s: RW\n", dev->name));
+ }
+
+ return 0;
+@@ -1486,7 +1731,7 @@ static void yaffs_put_super(struct super
+ {
+ yaffs_Device *dev = yaffs_SuperToDevice(sb);
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_put_super\n"));
++ T(YAFFS_TRACE_OS, ("yaffs_put_super\n"));
+
+ yaffs_GrossLock(dev);
+
+@@ -1494,18 +1739,17 @@ static void yaffs_put_super(struct super
+
+ yaffs_CheckpointSave(dev);
+
+- if (dev->putSuperFunc) {
++ if (dev->putSuperFunc)
+ dev->putSuperFunc(sb);
+- }
+
+ yaffs_Deinitialise(dev);
+
+ yaffs_GrossUnlock(dev);
+
+ /* we assume this is protected by lock_kernel() in mount/umount */
+- list_del(&dev->devList);
++ ylist_del(&dev->devList);
+
+- if(dev->spareBuffer){
++ if (dev->spareBuffer) {
+ YFREE(dev->spareBuffer);
+ dev->spareBuffer = NULL;
+ }
+@@ -1516,12 +1760,10 @@ static void yaffs_put_super(struct super
+
+ static void yaffs_MTDPutSuper(struct super_block *sb)
+ {
+-
+ struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+
+- if (mtd->sync) {
++ if (mtd->sync)
+ mtd->sync(mtd);
+- }
+
+ put_mtd_device(mtd);
+ }
+@@ -1531,9 +1773,9 @@ static void yaffs_MarkSuperBlockDirty(vo
+ {
+ struct super_block *sb = (struct super_block *)vsb;
+
+- T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_MarkSuperBlockDirty() sb = %p\n",sb));
+-// if(sb)
+-// sb->s_dirt = 1;
++ T(YAFFS_TRACE_OS, ("yaffs_MarkSuperBlockDirty() sb = %p\n", sb));
++ if (sb)
++ sb->s_dirt = 1;
+ }
+
+ typedef struct {
+@@ -1546,48 +1788,48 @@ typedef struct {
+ #define MAX_OPT_LEN 20
+ static int yaffs_parse_options(yaffs_options *options, const char *options_str)
+ {
+- char cur_opt[MAX_OPT_LEN+1];
++ char cur_opt[MAX_OPT_LEN + 1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+- while(options_str && *options_str && !error){
+- memset(cur_opt,0,MAX_OPT_LEN+1);
++ while (options_str && *options_str && !error) {
++ memset(cur_opt, 0, MAX_OPT_LEN + 1);
+ p = 0;
+
+- while(*options_str && *options_str != ','){
+- if(p < MAX_OPT_LEN){
++ while (*options_str && *options_str != ',') {
++ if (p < MAX_OPT_LEN) {
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+- if(!strcmp(cur_opt,"inband-tags"))
++ if (!strcmp(cur_opt, "inband-tags"))
+ options->inband_tags = 1;
+- else if(!strcmp(cur_opt,"no-cache"))
++ else if (!strcmp(cur_opt, "no-cache"))
+ options->no_cache = 1;
+- else if(!strcmp(cur_opt,"no-checkpoint-read"))
++ else if (!strcmp(cur_opt, "no-checkpoint-read"))
+ options->skip_checkpoint_read = 1;
+- else if(!strcmp(cur_opt,"no-checkpoint-write"))
++ else if (!strcmp(cur_opt, "no-checkpoint-write"))
+ options->skip_checkpoint_write = 1;
+- else if(!strcmp(cur_opt,"no-checkpoint")){
++ else if (!strcmp(cur_opt, "no-checkpoint")) {
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+- printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",cur_opt);
++ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
++ cur_opt);
+ error = 1;
+ }
+-
+ }
+
+ return error;
+ }
+
+ static struct super_block *yaffs_internal_read_super(int yaffsVersion,
+- struct super_block *sb,
+- void *data, int silent)
++ struct super_block *sb,
++ void *data, int silent)
+ {
+ int nBlocks;
+ struct inode *inode = NULL;
+@@ -1602,6 +1844,7 @@ static struct super_block *yaffs_interna
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
++ sb->s_flags |= MS_NOATIME;
+
+ if (!sb)
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+@@ -1614,14 +1857,14 @@ static struct super_block *yaffs_interna
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf));
+
+- if(!data_str)
++ if (!data_str)
+ data_str = "";
+
+- printk(KERN_INFO "yaffs: passed flags \"%s\"\n",data_str);
++ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+- memset(&options,0,sizeof(options));
++ memset(&options, 0, sizeof(options));
+
+- if(yaffs_parse_options(&options,data_str)){
++ if (yaffs_parse_options(&options, data_str)) {
+ /* Option parsing failed */
+ return NULL;
+ }
+@@ -1645,9 +1888,9 @@ static struct super_block *yaffs_interna
+ yaffs_devname(sb, devname_buf)));
+
+ /* Check it's an mtd device..... */
+- if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR) {
++ if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
+ return NULL; /* This isn't an mtd device */
+- }
++
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+@@ -1673,29 +1916,23 @@ static struct super_block *yaffs_interna
+ T(YAFFS_TRACE_OS, (" %s %d\n", WRITE_SIZE_STR, WRITE_SIZE(mtd)));
+ T(YAFFS_TRACE_OS, (" oobsize %d\n", mtd->oobsize));
+ T(YAFFS_TRACE_OS, (" erasesize %d\n", mtd->erasesize));
+- T(YAFFS_TRACE_OS, (" size %d\n", mtd->size));
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++ T(YAFFS_TRACE_OS, (" size %u\n", mtd->size));
++#else
++ T(YAFFS_TRACE_OS, (" size %lld\n", mtd->size));
++#endif
+
+ #ifdef CONFIG_YAFFS_AUTO_YAFFS2
+
+- if (yaffsVersion == 1 &&
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- mtd->writesize >= 2048) {
+-#else
+- mtd->oobblock >= 2048) {
+-#endif
+- T(YAFFS_TRACE_ALWAYS,("yaffs: auto selecting yaffs2\n"));
+- yaffsVersion = 2;
++ if (yaffsVersion == 1 && WRITE_SIZE(mtd) >= 2048) {
++ T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs2\n"));
++ yaffsVersion = 2;
+ }
+
+ /* Added NCB 26/5/2006 for completeness */
+- if (yaffsVersion == 2 &&
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- mtd->writesize == 512) {
+-#else
+- mtd->oobblock == 512) {
+-#endif
+- T(YAFFS_TRACE_ALWAYS,("yaffs: auto selecting yaffs1\n"));
+- yaffsVersion = 1;
++ if (yaffsVersion == 2 && !options.inband_tags && WRITE_SIZE(mtd) == 512) {
++ T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs1\n"));
++ yaffsVersion = 1;
+ }
+
+ #endif
+@@ -1707,7 +1944,7 @@ static struct super_block *yaffs_interna
+ !mtd->block_markbad ||
+ !mtd->read ||
+ !mtd->write ||
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ !mtd->read_oob || !mtd->write_oob) {
+ #else
+ !mtd->write_ecc ||
+@@ -1719,12 +1956,9 @@ static struct super_block *yaffs_interna
+ return NULL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- if (mtd->writesize < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+-#else
+- if (mtd->oobblock < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+-#endif
+- mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) {
++ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
++ !options.inband_tags) {
+ T(YAFFS_TRACE_ALWAYS,
+ ("yaffs: MTD device does not have the "
+ "right page sizes\n"));
+@@ -1735,7 +1969,7 @@ static struct super_block *yaffs_interna
+ if (!mtd->erase ||
+ !mtd->read ||
+ !mtd->write ||
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ !mtd->read_oob || !mtd->write_oob) {
+ #else
+ !mtd->write_ecc ||
+@@ -1761,7 +1995,7 @@ static struct super_block *yaffs_interna
+ * Set the yaffs_Device up for mtd
+ */
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ sb->s_fs_info = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+ #else
+ sb->u.generic_sbp = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+@@ -1780,13 +2014,15 @@ static struct super_block *yaffs_interna
+
+ /* Set up the memory size parameters.... */
+
+- nBlocks = mtd->size / (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
++ nBlocks = YCALCBLOCKS(mtd->size, (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
++
+ dev->startBlock = 0;
+ dev->endBlock = nBlocks - 1;
+ dev->nChunksPerBlock = YAFFS_CHUNKS_PER_BLOCK;
+- dev->nDataBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
++ dev->totalBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
+ dev->nReservedBlocks = 5;
+ dev->nShortOpCaches = (options.no_cache) ? 0 : 10;
++ dev->inbandTags = options.inband_tags;
+
+ /* ... and the functions. */
+ if (yaffsVersion == 2) {
+@@ -1798,20 +2034,19 @@ static struct super_block *yaffs_interna
+ dev->queryNANDBlock = nandmtd2_QueryNANDBlock;
+ dev->spareBuffer = YMALLOC(mtd->oobsize);
+ dev->isYaffs2 = 1;
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- dev->nDataBytesPerChunk = mtd->writesize;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ dev->totalBytesPerChunk = mtd->writesize;
+ dev->nChunksPerBlock = mtd->erasesize / mtd->writesize;
+ #else
+- dev->nDataBytesPerChunk = mtd->oobblock;
++ dev->totalBytesPerChunk = mtd->oobblock;
+ dev->nChunksPerBlock = mtd->erasesize / mtd->oobblock;
+ #endif
+- nBlocks = mtd->size / mtd->erasesize;
++ nBlocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+- dev->nCheckpointReservedBlocks = CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS;
+ dev->startBlock = 0;
+ dev->endBlock = nBlocks - 1;
+ } else {
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ /* use the MTD interface in yaffs_mtdif1.c */
+ dev->writeChunkWithTagsToNAND =
+ nandmtd1_WriteChunkWithTagsToNAND;
+@@ -1847,7 +2082,7 @@ static struct super_block *yaffs_interna
+ dev->skipCheckpointWrite = options.skip_checkpoint_write;
+
+ /* we assume this is protected by lock_kernel() in mount/umount */
+- list_add_tail(&dev->devList, &yaffs_dev_list);
++ ylist_add_tail(&dev->devList, &yaffs_dev_list);
+
+ init_MUTEX(&dev->grossLock);
+
+@@ -1884,20 +2119,23 @@ static struct super_block *yaffs_interna
+ return NULL;
+ }
+ sb->s_root = root;
++ sb->s_dirt = !dev->isCheckpointed;
++ T(YAFFS_TRACE_ALWAYS,
++ ("yaffs_read_super: isCheckpointed %d\n", dev->isCheckpointed));
+
+ T(YAFFS_TRACE_OS, ("yaffs_read_super: done\n"));
+ return sb;
+ }
+
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+ {
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
+@@ -1938,14 +2176,14 @@ static DECLARE_FSTYPE(yaffs_fs_type, "ya
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+ {
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ static int yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+@@ -1990,12 +2228,12 @@ static char *yaffs_dump_dev(char *buf, y
+ {
+ buf += sprintf(buf, "startBlock......... %d\n", dev->startBlock);
+ buf += sprintf(buf, "endBlock........... %d\n", dev->endBlock);
++ buf += sprintf(buf, "totalBytesPerChunk. %d\n", dev->totalBytesPerChunk);
+ buf += sprintf(buf, "nDataBytesPerChunk. %d\n", dev->nDataBytesPerChunk);
+ buf += sprintf(buf, "chunkGroupBits..... %d\n", dev->chunkGroupBits);
+ buf += sprintf(buf, "chunkGroupSize..... %d\n", dev->chunkGroupSize);
+ buf += sprintf(buf, "nErasedBlocks...... %d\n", dev->nErasedBlocks);
+ buf += sprintf(buf, "nReservedBlocks.... %d\n", dev->nReservedBlocks);
+- buf += sprintf(buf, "nCheckptResBlocks.. %d\n", dev->nCheckpointReservedBlocks);
+ buf += sprintf(buf, "blocksInCheckpoint. %d\n", dev->blocksInCheckpoint);
+ buf += sprintf(buf, "nTnodesCreated..... %d\n", dev->nTnodesCreated);
+ buf += sprintf(buf, "nFreeTnodes........ %d\n", dev->nFreeTnodes);
+@@ -2006,10 +2244,8 @@ static char *yaffs_dump_dev(char *buf, y
+ buf += sprintf(buf, "nPageReads......... %d\n", dev->nPageReads);
+ buf += sprintf(buf, "nBlockErasures..... %d\n", dev->nBlockErasures);
+ buf += sprintf(buf, "nGCCopies.......... %d\n", dev->nGCCopies);
+- buf +=
+- sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
+- buf +=
+- sprintf(buf, "passiveGCs......... %d\n",
++ buf += sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
++ buf += sprintf(buf, "passiveGCs......... %d\n",
+ dev->passiveGarbageCollections);
+ buf += sprintf(buf, "nRetriedWrites..... %d\n", dev->nRetriedWrites);
+ buf += sprintf(buf, "nShortOpCaches..... %d\n", dev->nShortOpCaches);
+@@ -2025,6 +2261,7 @@ static char *yaffs_dump_dev(char *buf, y
+ sprintf(buf, "nBackgroudDeletions %d\n", dev->nBackgroundDeletions);
+ buf += sprintf(buf, "useNANDECC......... %d\n", dev->useNANDECC);
+ buf += sprintf(buf, "isYaffs2........... %d\n", dev->isYaffs2);
++ buf += sprintf(buf, "inbandTags......... %d\n", dev->inbandTags);
+
+ return buf;
+ }
+@@ -2033,7 +2270,7 @@ static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+ {
+- struct list_head *item;
++ struct ylist_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+@@ -2057,8 +2294,8 @@ static int yaffs_proc_read(char *page,
+ lock_kernel();
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+- list_for_each(item, &yaffs_dev_list) {
+- yaffs_Device *dev = list_entry(item, yaffs_Device, devList);
++ ylist_for_each(item, &yaffs_dev_list) {
++ yaffs_Device *dev = ylist_entry(item, yaffs_Device, devList);
+ if (n < step) {
+ n++;
+ continue;
+@@ -2119,7 +2356,7 @@ static int yaffs_proc_write(struct file
+ char *end;
+ char *mask_name;
+ const char *x;
+- char substring[MAX_MASK_NAME_LENGTH+1];
++ char substring[MAX_MASK_NAME_LENGTH + 1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+@@ -2129,9 +2366,8 @@ static int yaffs_proc_write(struct file
+
+ while (!done && (pos < count)) {
+ done = 1;
+- while ((pos < count) && isspace(buf[pos])) {
++ while ((pos < count) && isspace(buf[pos]))
+ pos++;
+- }
+
+ switch (buf[pos]) {
+ case '+':
+@@ -2148,20 +2384,21 @@ static int yaffs_proc_write(struct file
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+- for(x = buf + pos, i = 0;
+- (*x == '_' || (*x >='a' && *x <= 'z')) &&
+- i <MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+- substring[i] = *x;
++ for (x = buf + pos, i = 0;
++ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
++ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+- if(strcmp(substring,mask_flags[i].mask_name) == 0){
++ if (strcmp(substring, mask_flags[i].mask_name) == 0) {
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield = mask_flags[i].mask_bitfield;
+ done = 0;
+@@ -2172,7 +2409,7 @@ static int yaffs_proc_write(struct file
+
+ if (mask_name != NULL) {
+ done = 0;
+- switch(add) {
++ switch (add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+@@ -2191,13 +2428,13 @@ static int yaffs_proc_write(struct file
+
+ yaffs_traceMask = rg | YAFFS_TRACE_ALWAYS;
+
+- printk("new trace = 0x%08X\n",yaffs_traceMask);
++ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_traceMask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) == mask_flags[i].mask_bitfield) ? '+' : '-';
+- printk("%c%s\n", flag, mask_flags[i].mask_name);
++ printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name);
+ }
+ }
+
+@@ -2211,12 +2448,8 @@ struct file_system_to_install {
+ };
+
+ static struct file_system_to_install fs_to_install[] = {
+-//#ifdef CONFIG_YAFFS_YAFFS1
+ {&yaffs_fs_type, 0},
+-//#endif
+-//#ifdef CONFIG_YAFFS_YAFFS2
+ {&yaffs2_fs_type, 0},
+-//#endif
+ {NULL, 0}
+ };
+
+@@ -2231,15 +2464,14 @@ static int __init init_yaffs_fs(void)
+ /* Install the proc_fs entry */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG,
+- &proc_root);
++ YPROC_ROOT);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+- } else {
++ } else
+ return -ENOMEM;
+- }
+
+ /* Now add the file system entries */
+
+@@ -2247,9 +2479,8 @@ static int __init init_yaffs_fs(void)
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+- if (!error) {
++ if (!error)
+ fsinst->installed = 1;
+- }
+ fsinst++;
+ }
+
+@@ -2277,7 +2508,7 @@ static void __exit exit_yaffs_fs(void)
+ T(YAFFS_TRACE_ALWAYS, ("yaffs " __DATE__ " " __TIME__
+ " removing. \n"));
+
+- remove_proc_entry("yaffs", &proc_root);
++ remove_proc_entry("yaffs", YPROC_ROOT);
+
+ fsinst = fs_to_install;
+
+@@ -2288,7 +2519,6 @@ static void __exit exit_yaffs_fs(void)
+ }
+ fsinst++;
+ }
+-
+ }
+
+ module_init(init_yaffs_fs)
+--- /dev/null
++++ b/fs/yaffs2/yaffs_getblockinfo.h
+@@ -0,0 +1,34 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GETBLOCKINFO_H__
++#define __YAFFS_GETBLOCKINFO_H__
++
++#include "yaffs_guts.h"
++
++/* Function to manipulate block info */
++static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
++{
++ if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
++ blk));
++ YBUG();
++ }
++ return &dev->blockInfo[blk - dev->internalStartBlock];
++}
++
++#endif
+--- a/fs/yaffs2/yaffs_guts.c
++++ b/fs/yaffs2/yaffs_guts.c
+@@ -12,16 +12,17 @@
+ */
+
+ const char *yaffs_guts_c_version =
+- "$Id: yaffs_guts.c,v 1.49 2007-05-15 20:07:40 charles Exp $";
++ "$Id: yaffs_guts.c,v 1.82 2009-03-09 04:24:17 charles Exp $";
+
+ #include "yportenv.h"
+
+ #include "yaffsinterface.h"
+ #include "yaffs_guts.h"
+ #include "yaffs_tagsvalidity.h"
++#include "yaffs_getblockinfo.h"
+
+ #include "yaffs_tagscompat.h"
+-#ifndef CONFIG_YAFFS_USE_OWN_SORT
++#ifndef CONFIG_YAFFS_USE_OWN_SORT
+ #include "yaffs_qsort.h"
+ #endif
+ #include "yaffs_nand.h"
+@@ -32,116 +33,116 @@ const char *yaffs_guts_c_version =
+ #include "yaffs_packedtags2.h"
+
+
+-#ifdef CONFIG_YAFFS_WINCE
+-void yfsd_LockYAFFS(BOOL fsLockOnly);
+-void yfsd_UnlockYAFFS(BOOL fsLockOnly);
+-#endif
+-
+ #define YAFFS_PASSIVE_GC_CHUNKS 2
+
+ #include "yaffs_ecc.h"
+
+
+ /* Robustification (if it ever comes about...) */
+-static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND);
+-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk);
+-static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags * tags);
+-static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+- const yaffs_ExtendedTags * tags);
++static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND);
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
++ int erasedOk);
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags);
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++ const yaffs_ExtendedTags *tags);
+
+ /* Other local prototypes */
+-static int yaffs_UnlinkObject( yaffs_Object *obj);
++static int yaffs_UnlinkObject(yaffs_Object *obj);
+ static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj);
+
+ static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList);
+
+-static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device * dev,
+- const __u8 * buffer,
+- yaffs_ExtendedTags * tags,
+- int useReserve);
+-static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode,
+- int chunkInNAND, int inScan);
+-
+-static yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number,
+- yaffs_ObjectType type);
+-static void yaffs_AddObjectToDirectory(yaffs_Object * directory,
+- yaffs_Object * obj);
+-static int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name,
+- int force, int isShrink, int shadows);
+-static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj);
++static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device *dev,
++ const __u8 *buffer,
++ yaffs_ExtendedTags *tags,
++ int useReserve);
++static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
++ int chunkInNAND, int inScan);
++
++static yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
++ yaffs_ObjectType type);
++static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
++ yaffs_Object *obj);
++static int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name,
++ int force, int isShrink, int shadows);
++static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj);
+ static int yaffs_CheckStructures(void);
+-static int yaffs_DeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, __u32 level,
+- int chunkOffset, int *limit);
+-static int yaffs_DoGenericObjectDeletion(yaffs_Object * in);
+-
+-static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blockNo);
+-
+-static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo);
+-static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer,
+- int lineNo);
++static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
++ int chunkOffset, int *limit);
++static int yaffs_DoGenericObjectDeletion(yaffs_Object *in);
++
++static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device *dev, int blockNo);
+
+-static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND);
+
+-static int yaffs_UnlinkWorker(yaffs_Object * obj);
+-static void yaffs_DestroyObject(yaffs_Object * obj);
++static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
++ int chunkInNAND);
+
+-static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId,
+- int chunkInObject);
++static int yaffs_UnlinkWorker(yaffs_Object *obj);
+
+-loff_t yaffs_GetFileSize(yaffs_Object * obj);
++static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
++ int chunkInObject);
+
+-static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr);
++static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
++ yaffs_BlockInfo **blockUsedPtr);
+
+-static void yaffs_VerifyFreeChunks(yaffs_Device * dev);
++static void yaffs_VerifyFreeChunks(yaffs_Device *dev);
+
+ static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in);
+
++static void yaffs_VerifyDirectory(yaffs_Object *directory);
+ #ifdef YAFFS_PARANOID
+-static int yaffs_CheckFileSanity(yaffs_Object * in);
++static int yaffs_CheckFileSanity(yaffs_Object *in);
+ #else
+ #define yaffs_CheckFileSanity(in)
+ #endif
+
+-static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in);
+-static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId);
++static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in);
++static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId);
+
+ static void yaffs_InvalidateCheckpoint(yaffs_Device *dev);
+
+-static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode,
+- yaffs_ExtendedTags * tags);
++static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
++ yaffs_ExtendedTags *tags);
+
+-static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos);
+-static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev,
+- yaffs_FileStructure * fStruct,
+- __u32 chunkId);
++static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
++ unsigned pos);
++static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
++ yaffs_FileStructure *fStruct,
++ __u32 chunkId);
+
+
+ /* Function to calculate chunk and offset */
+
+-static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, __u32 *chunk, __u32 *offset)
++static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, int *chunkOut,
++ __u32 *offsetOut)
+ {
+- if(dev->chunkShift){
+- /* Easy-peasy power of 2 case */
+- *chunk = (__u32)(addr >> dev->chunkShift);
+- *offset = (__u32)(addr & dev->chunkMask);
+- }
+- else if(dev->crumbsPerChunk)
+- {
+- /* Case where we're using "crumbs" */
+- *offset = (__u32)(addr & dev->crumbMask);
+- addr >>= dev->crumbShift;
+- *chunk = ((__u32)addr)/dev->crumbsPerChunk;
+- *offset += ((addr - (*chunk * dev->crumbsPerChunk)) << dev->crumbShift);
++ int chunk;
++ __u32 offset;
++
++ chunk = (__u32)(addr >> dev->chunkShift);
++
++ if (dev->chunkDiv == 1) {
++ /* easy power of 2 case */
++ offset = (__u32)(addr & dev->chunkMask);
++ } else {
++ /* Non power-of-2 case */
++
++ loff_t chunkBase;
++
++ chunk /= dev->chunkDiv;
++
++ chunkBase = ((loff_t)chunk) * dev->nDataBytesPerChunk;
++ offset = (__u32)(addr - chunkBase);
+ }
+- else
+- YBUG();
++
++ *chunkOut = chunk;
++ *offsetOut = offset;
+ }
+
+-/* Function to return the number of shifts for a power of 2 greater than or equal
+- * to the given number
++/* Function to return the number of shifts for a power of 2 greater than or
++ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+@@ -153,13 +154,14 @@ static __u32 ShiftsGE(__u32 x)
+
+ nShifts = extraBits = 0;
+
+- while(x>1){
+- if(x & 1) extraBits++;
+- x>>=1;
++ while (x > 1) {
++ if (x & 1)
++ extraBits++;
++ x >>= 1;
+ nShifts++;
+ }
+
+- if(extraBits)
++ if (extraBits)
+ nShifts++;
+
+ return nShifts;
+@@ -168,16 +170,17 @@ static __u32 ShiftsGE(__u32 x)
+ /* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+-static __u32 ShiftDiv(__u32 x)
++static __u32 Shifts(__u32 x)
+ {
+ int nShifts;
+
+ nShifts = 0;
+
+- if(!x) return 0;
++ if (!x)
++ return 0;
+
+- while( !(x&1)){
+- x>>=1;
++ while (!(x&1)) {
++ x >>= 1;
+ nShifts++;
+ }
+
+@@ -195,21 +198,25 @@ static int yaffs_InitialiseTempBuffers(y
+ int i;
+ __u8 *buf = (__u8 *)1;
+
+- memset(dev->tempBuffer,0,sizeof(dev->tempBuffer));
++ memset(dev->tempBuffer, 0, sizeof(dev->tempBuffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->tempBuffer[i].line = 0; /* not in use */
+ dev->tempBuffer[i].buffer = buf =
+- YMALLOC_DMA(dev->nDataBytesPerChunk);
++ YMALLOC_DMA(dev->totalBytesPerChunk);
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+-
+ }
+
+-static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo)
++__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo)
+ {
+ int i, j;
++
++ dev->tempInUse++;
++ if (dev->tempInUse > dev->maxTemp)
++ dev->maxTemp = dev->tempInUse;
++
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].line == 0) {
+ dev->tempBuffer[i].line = lineNo;
+@@ -227,9 +234,9 @@ static __u8 *yaffs_GetTempBuffer(yaffs_D
+ T(YAFFS_TRACE_BUFFERS,
+ (TSTR("Out of temp buffers at line %d, other held by lines:"),
+ lineNo));
+- for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line));
+- }
++
+ T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR)));
+
+ /*
+@@ -242,10 +249,13 @@ static __u8 *yaffs_GetTempBuffer(yaffs_D
+
+ }
+
+-static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer,
++void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer,
+ int lineNo)
+ {
+ int i;
++
++ dev->tempInUse--;
++
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].buffer == buffer) {
+ dev->tempBuffer[i].line = 0;
+@@ -267,27 +277,26 @@ static void yaffs_ReleaseTempBuffer(yaff
+ /*
+ * Determine if we have a managed buffer.
+ */
+-int yaffs_IsManagedTempBuffer(yaffs_Device * dev, const __u8 * buffer)
++int yaffs_IsManagedTempBuffer(yaffs_Device *dev, const __u8 *buffer)
+ {
+ int i;
++
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->tempBuffer[i].buffer == buffer)
+ return 1;
++ }
+
++ for (i = 0; i < dev->nShortOpCaches; i++) {
++ if (dev->srCache[i].data == buffer)
++ return 1;
+ }
+
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if( dev->srCache[i].data == buffer )
+- return 1;
+-
+- }
+-
+- if (buffer == dev->checkpointBuffer)
+- return 1;
+-
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("yaffs: unmaged buffer detected.\n" TENDSTR)));
+- return 0;
++ if (buffer == dev->checkpointBuffer)
++ return 1;
++
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: unmaged buffer detected.\n" TENDSTR)));
++ return 0;
+ }
+
+
+@@ -296,62 +305,63 @@ int yaffs_IsManagedTempBuffer(yaffs_Devi
+ * Chunk bitmap manipulations
+ */
+
+-static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device * dev, int blk)
++static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device *dev, int blk)
+ {
+ if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
+- blk));
++ (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
++ blk));
+ YBUG();
+ }
+ return dev->chunkBits +
+- (dev->chunkBitmapStride * (blk - dev->internalStartBlock));
++ (dev->chunkBitmapStride * (blk - dev->internalStartBlock));
+ }
+
+ static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk)
+ {
+- if(blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
+- chunk < 0 || chunk >= dev->nChunksPerBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),blk,chunk));
+- YBUG();
++ if (blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
++ chunk < 0 || chunk >= dev->nChunksPerBlock) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),
++ blk, chunk));
++ YBUG();
+ }
+ }
+
+-static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device * dev, int blk)
++static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device *dev, int blk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+ memset(blkBits, 0, dev->chunkBitmapStride);
+ }
+
+-static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device * dev, int blk, int chunk)
++static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device *dev, int blk, int chunk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+- yaffs_VerifyChunkBitId(dev,blk,chunk);
++ yaffs_VerifyChunkBitId(dev, blk, chunk);
+
+ blkBits[chunk / 8] &= ~(1 << (chunk & 7));
+ }
+
+-static Y_INLINE void yaffs_SetChunkBit(yaffs_Device * dev, int blk, int chunk)
++static Y_INLINE void yaffs_SetChunkBit(yaffs_Device *dev, int blk, int chunk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+- yaffs_VerifyChunkBitId(dev,blk,chunk);
++ yaffs_VerifyChunkBitId(dev, blk, chunk);
+
+ blkBits[chunk / 8] |= (1 << (chunk & 7));
+ }
+
+-static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device * dev, int blk, int chunk)
++static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device *dev, int blk, int chunk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+- yaffs_VerifyChunkBitId(dev,blk,chunk);
++ yaffs_VerifyChunkBitId(dev, blk, chunk);
+
+ return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+ }
+
+-static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device * dev, int blk)
++static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device *dev, int blk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+ int i;
+@@ -363,17 +373,17 @@ static Y_INLINE int yaffs_StillSomeChunk
+ return 0;
+ }
+
+-static int yaffs_CountChunkBits(yaffs_Device * dev, int blk)
++static int yaffs_CountChunkBits(yaffs_Device *dev, int blk)
+ {
+ __u8 *blkBits = yaffs_BlockBits(dev, blk);
+ int i;
+ int n = 0;
+ for (i = 0; i < dev->chunkBitmapStride; i++) {
+ __u8 x = *blkBits;
+- while(x){
+- if(x & 1)
++ while (x) {
++ if (x & 1)
+ n++;
+- x >>=1;
++ x >>= 1;
+ }
+
+ blkBits++;
+@@ -400,7 +410,7 @@ static int yaffs_SkipNANDVerification(ya
+ return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND));
+ }
+
+-static const char * blockStateName[] = {
++static const char *blockStateName[] = {
+ "Unknown",
+ "Needs scanning",
+ "Scanning",
+@@ -413,64 +423,65 @@ static const char * blockStateName[] = {
+ "Dead"
+ };
+
+-static void yaffs_VerifyBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n)
++static void yaffs_VerifyBlock(yaffs_Device *dev, yaffs_BlockInfo *bi, int n)
+ {
+ int actuallyUsed;
+ int inUse;
+
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+ /* Report illegal runtime states */
+- if(bi->blockState <0 || bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has undefined state %d"TENDSTR),n,bi->blockState));
++ if (bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has undefined state %d"TENDSTR), n, bi->blockState));
+
+- switch(bi->blockState){
+- case YAFFS_BLOCK_STATE_UNKNOWN:
+- case YAFFS_BLOCK_STATE_SCANNING:
+- case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has bad run-state %s"TENDSTR),
+- n,blockStateName[bi->blockState]));
++ switch (bi->blockState) {
++ case YAFFS_BLOCK_STATE_UNKNOWN:
++ case YAFFS_BLOCK_STATE_SCANNING:
++ case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has bad run-state %s"TENDSTR),
++ n, blockStateName[bi->blockState]));
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actuallyUsed = bi->pagesInUse - bi->softDeletions;
+
+- if(bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
++ if (bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
+ bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock ||
+ actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock)
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
+- n,bi->pagesInUse,bi->softDeletions));
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
++ n, bi->pagesInUse, bi->softDeletions));
+
+
+ /* Check chunk bitmap legal */
+- inUse = yaffs_CountChunkBits(dev,n);
+- if(inUse != bi->pagesInUse)
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
+- n,bi->pagesInUse,inUse));
++ inUse = yaffs_CountChunkBits(dev, n);
++ if (inUse != bi->pagesInUse)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
++ n, bi->pagesInUse, inUse));
+
+ /* Check that the sequence number is valid.
+ * Ten million is legal, but is very unlikely
+ */
+- if(dev->isYaffs2 &&
++ if (dev->isYaffs2 &&
+ (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) &&
+- (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000 ))
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has suspect sequence number of %d"TENDSTR),
+- n,bi->sequenceNumber));
+-
++ (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000))
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has suspect sequence number of %d"TENDSTR),
++ n, bi->sequenceNumber));
+ }
+
+-static void yaffs_VerifyCollectedBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n)
++static void yaffs_VerifyCollectedBlock(yaffs_Device *dev, yaffs_BlockInfo *bi,
++ int n)
+ {
+- yaffs_VerifyBlock(dev,bi,n);
++ yaffs_VerifyBlock(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+- /* TODO: This will need to change if we do partial gc */
++ /* This will need to change if we do partial gc */
+
+- if(bi->blockState != YAFFS_BLOCK_STATE_EMPTY){
+- T(YAFFS_TRACE_ERROR,(TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
+- n,bi->blockState));
++ if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING &&
++ bi->blockState != YAFFS_BLOCK_STATE_EMPTY) {
++ T(YAFFS_TRACE_ERROR, (TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
++ n, bi->blockState));
+ }
+ }
+
+@@ -480,52 +491,49 @@ static void yaffs_VerifyBlocks(yaffs_Dev
+ int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int nIllegalBlockStates = 0;
+
+-
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+- memset(nBlocksPerState,0,sizeof(nBlocksPerState));
+-
++ memset(nBlocksPerState, 0, sizeof(nBlocksPerState));
+
+- for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++){
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+- yaffs_VerifyBlock(dev,bi,i);
++ for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++ yaffs_VerifyBlock(dev, bi, i);
+
+- if(bi->blockState >=0 && bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
++ if (bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
+ nBlocksPerState[bi->blockState]++;
+ else
+ nIllegalBlockStates++;
+-
+ }
+
+- T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR)));
+- T(YAFFS_TRACE_VERIFY,(TSTR("Block summary"TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block summary"TENDSTR)));
+
+- T(YAFFS_TRACE_VERIFY,(TSTR("%d blocks have illegal states"TENDSTR),nIllegalBlockStates));
+- if(nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+- T(YAFFS_TRACE_VERIFY,(TSTR("Too many allocating blocks"TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR("%d blocks have illegal states"TENDSTR), nIllegalBlockStates));
++ if (nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Too many allocating blocks"TENDSTR)));
+
+- for(i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("%s %d blocks"TENDSTR),
+- blockStateName[i],nBlocksPerState[i]));
++ blockStateName[i], nBlocksPerState[i]));
+
+- if(dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
++ if (dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
+ dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
+
+- if(dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
++ if (dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
+ dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
+
+- if(nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++ if (nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
+ nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
+
+- T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
+
+ }
+
+@@ -535,26 +543,26 @@ static void yaffs_VerifyBlocks(yaffs_Dev
+ */
+ static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck)
+ {
+- if(yaffs_SkipVerification(obj->myDev))
++ if (obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+- if(!(tags && obj && oh)){
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
+- (__u32)tags,(__u32)obj,(__u32)oh));
++ if (!(tags && obj && oh)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
++ (__u32)tags, (__u32)obj, (__u32)oh));
+ return;
+ }
+
+- if(oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+- oh->type > YAFFS_OBJECT_TYPE_MAX)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
+- tags->objectId, oh->type));
+-
+- if(tags->objectId != obj->objectId)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch objectId %d"TENDSTR),
+- tags->objectId, obj->objectId));
++ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++ oh->type > YAFFS_OBJECT_TYPE_MAX)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
++ tags->objectId, oh->type));
++
++ if (tags->objectId != obj->objectId)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch objectId %d"TENDSTR),
++ tags->objectId, obj->objectId));
+
+
+ /*
+@@ -563,46 +571,43 @@ static void yaffs_VerifyObjectHeader(yaf
+ * Tests do not apply to the root object.
+ */
+
+- if(parentCheck && tags->objectId > 1 && !obj->parent)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
+- tags->objectId, oh->parentObjectId));
+-
+-
+- if(parentCheck && obj->parent &&
+- oh->parentObjectId != obj->parent->objectId &&
+- (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
+- obj->parent->objectId != YAFFS_OBJECTID_DELETED))
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
+- tags->objectId, oh->parentObjectId, obj->parent->objectId));
++ if (parentCheck && tags->objectId > 1 && !obj->parent)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
++ tags->objectId, oh->parentObjectId));
+
++ if (parentCheck && obj->parent &&
++ oh->parentObjectId != obj->parent->objectId &&
++ (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
++ obj->parent->objectId != YAFFS_OBJECTID_DELETED))
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
++ tags->objectId, oh->parentObjectId, obj->parent->objectId));
+
+- if(tags->objectId > 1 && oh->name[0] == 0) /* Null name */
++ if (tags->objectId > 1 && oh->name[0] == 0) /* Null name */
+ T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header name is NULL"TENDSTR),
+- obj->objectId));
++ (TSTR("Obj %d header name is NULL"TENDSTR),
++ obj->objectId));
+
+- if(tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
++ if (tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
+ T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header name is 0xFF"TENDSTR),
+- obj->objectId));
++ (TSTR("Obj %d header name is 0xFF"TENDSTR),
++ obj->objectId));
+ }
+
+
+
+-static int yaffs_VerifyTnodeWorker(yaffs_Object * obj, yaffs_Tnode * tn,
+- __u32 level, int chunkOffset)
++static int yaffs_VerifyTnodeWorker(yaffs_Object *obj, yaffs_Tnode *tn,
++ __u32 level, int chunkOffset)
+ {
+ int i;
+ yaffs_Device *dev = obj->myDev;
+ int ok = 1;
+- int nTnodeBytes = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+ if (tn) {
+ if (level > 0) {
+
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++){
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (tn->internal[i]) {
+ ok = yaffs_VerifyTnodeWorker(obj,
+ tn->internal[i],
+@@ -611,20 +616,19 @@ static int yaffs_VerifyTnodeWorker(yaffs
+ }
+ }
+ } else if (level == 0) {
+- int i;
+ yaffs_ExtendedTags tags;
+ __u32 objectId = obj->objectId;
+
+ chunkOffset <<= YAFFS_TNODES_LEVEL0_BITS;
+
+- for(i = 0; i < YAFFS_NTNODES_LEVEL0; i++){
+- __u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++ for (i = 0; i < YAFFS_NTNODES_LEVEL0; i++) {
++ __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+
+- if(theChunk > 0){
++ if (theChunk > 0) {
+ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.objectId,tags.chunkId,theChunk)); */
+- yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags);
+- if(tags.objectId != objectId || tags.chunkId != chunkOffset){
+- T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++ yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
++ if (tags.objectId != objectId || tags.chunkId != chunkOffset) {
++ T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+ objectId, chunkOffset, theChunk,
+ tags.objectId, tags.chunkId));
+ }
+@@ -646,13 +650,15 @@ static void yaffs_VerifyFile(yaffs_Objec
+ __u32 lastChunk;
+ __u32 x;
+ __u32 i;
+- int ok;
+ yaffs_Device *dev;
+ yaffs_ExtendedTags tags;
+ yaffs_Tnode *tn;
+ __u32 objectId;
+
+- if(obj && yaffs_SkipVerification(obj->myDev))
++ if (!obj)
++ return;
++
++ if (yaffs_SkipVerification(obj->myDev))
+ return;
+
+ dev = obj->myDev;
+@@ -662,17 +668,17 @@ static void yaffs_VerifyFile(yaffs_Objec
+ lastChunk = obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1;
+ x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
+ requiredTallness = 0;
+- while (x> 0) {
++ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ requiredTallness++;
+ }
+
+ actualTallness = obj->variant.fileVariant.topLevel;
+
+- if(requiredTallness > actualTallness )
++ if (requiredTallness > actualTallness)
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR),
+- obj->objectId,actualTallness, requiredTallness));
++ obj->objectId, actualTallness, requiredTallness));
+
+
+ /* Check that the chunks in the tnode tree are all correct.
+@@ -680,39 +686,31 @@ static void yaffs_VerifyFile(yaffs_Objec
+ * checking the tags for every chunk match.
+ */
+
+- if(yaffs_SkipNANDVerification(dev))
++ if (yaffs_SkipNANDVerification(dev))
+ return;
+
+- for(i = 1; i <= lastChunk; i++){
+- tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant,i);
++ for (i = 1; i <= lastChunk; i++) {
++ tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant, i);
+
+ if (tn) {
+- __u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
+- if(theChunk > 0){
++ __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
++ if (theChunk > 0) {
+ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */
+- yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags);
+- if(tags.objectId != objectId || tags.chunkId != i){
+- T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++ yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
++ if (tags.objectId != objectId || tags.chunkId != i) {
++ T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+ objectId, i, theChunk,
+ tags.objectId, tags.chunkId));
+ }
+ }
+ }
+-
+ }
+-
+ }
+
+-static void yaffs_VerifyDirectory(yaffs_Object *obj)
+-{
+- if(obj && yaffs_SkipVerification(obj->myDev))
+- return;
+-
+-}
+
+ static void yaffs_VerifyHardLink(yaffs_Object *obj)
+ {
+- if(obj && yaffs_SkipVerification(obj->myDev))
++ if (obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+ /* Verify sane equivalent object */
+@@ -720,7 +718,7 @@ static void yaffs_VerifyHardLink(yaffs_O
+
+ static void yaffs_VerifySymlink(yaffs_Object *obj)
+ {
+- if(obj && yaffs_SkipVerification(obj->myDev))
++ if (obj && yaffs_SkipVerification(obj->myDev))
+ return;
+
+ /* Verify symlink string */
+@@ -728,7 +726,7 @@ static void yaffs_VerifySymlink(yaffs_Ob
+
+ static void yaffs_VerifySpecial(yaffs_Object *obj)
+ {
+- if(obj && yaffs_SkipVerification(obj->myDev))
++ if (obj && yaffs_SkipVerification(obj->myDev))
+ return;
+ }
+
+@@ -740,14 +738,19 @@ static void yaffs_VerifyObject(yaffs_Obj
+ __u32 chunkMax;
+
+ __u32 chunkIdOk;
+- __u32 chunkIsLive;
++ __u32 chunkInRange;
++ __u32 chunkShouldNotBeDeleted;
++ __u32 chunkValid;
++
++ if (!obj)
++ return;
+
+- if(!obj)
++ if (obj->beingCreated)
+ return;
+
+ dev = obj->myDev;
+
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+ /* Check sane object header chunk */
+@@ -755,50 +758,54 @@ static void yaffs_VerifyObject(yaffs_Obj
+ chunkMin = dev->internalStartBlock * dev->nChunksPerBlock;
+ chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1;
+
+- chunkIdOk = (obj->chunkId >= chunkMin && obj->chunkId <= chunkMax);
+- chunkIsLive = chunkIdOk &&
++ chunkInRange = (((unsigned)(obj->hdrChunk)) >= chunkMin && ((unsigned)(obj->hdrChunk)) <= chunkMax);
++ chunkIdOk = chunkInRange || obj->hdrChunk == 0;
++ chunkValid = chunkInRange &&
+ yaffs_CheckChunkBit(dev,
+- obj->chunkId / dev->nChunksPerBlock,
+- obj->chunkId % dev->nChunksPerBlock);
+- if(!obj->fake &&
+- (!chunkIdOk || !chunkIsLive)) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
+- obj->objectId,obj->chunkId,
+- chunkIdOk ? "" : ",out of range",
+- chunkIsLive || !chunkIdOk ? "" : ",marked as deleted"));
++ obj->hdrChunk / dev->nChunksPerBlock,
++ obj->hdrChunk % dev->nChunksPerBlock);
++ chunkShouldNotBeDeleted = chunkInRange && !chunkValid;
++
++ if (!obj->fake &&
++ (!chunkIdOk || chunkShouldNotBeDeleted)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
++ obj->objectId, obj->hdrChunk,
++ chunkIdOk ? "" : ",out of range",
++ chunkShouldNotBeDeleted ? ",marked as deleted" : ""));
+ }
+
+- if(chunkIdOk && chunkIsLive &&!yaffs_SkipNANDVerification(dev)) {
++ if (chunkValid && !yaffs_SkipNANDVerification(dev)) {
+ yaffs_ExtendedTags tags;
+ yaffs_ObjectHeader *oh;
+- __u8 *buffer = yaffs_GetTempBuffer(dev,__LINE__);
++ __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
+
+ oh = (yaffs_ObjectHeader *)buffer;
+
+- yaffs_ReadChunkWithTagsFromNAND(dev, obj->chunkId,buffer, &tags);
++ yaffs_ReadChunkWithTagsFromNAND(dev, obj->hdrChunk, buffer,
++ &tags);
+
+- yaffs_VerifyObjectHeader(obj,oh,&tags,1);
++ yaffs_VerifyObjectHeader(obj, oh, &tags, 1);
+
+- yaffs_ReleaseTempBuffer(dev,buffer,__LINE__);
++ yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+ }
+
+ /* Verify it has a parent */
+- if(obj && !obj->fake &&
+- (!obj->parent || obj->parent->myDev != dev)){
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
+- obj->objectId,obj->parent));
++ if (obj && !obj->fake &&
++ (!obj->parent || obj->parent->myDev != dev)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
++ obj->objectId, obj->parent));
+ }
+
+ /* Verify parent is a directory */
+- if(obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY){
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
+- obj->objectId,obj->parent->variantType));
++ if (obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
++ obj->objectId, obj->parent->variantType));
+ }
+
+- switch(obj->variantType){
++ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_VerifyFile(obj);
+ break;
+@@ -818,33 +825,30 @@ static void yaffs_VerifyObject(yaffs_Obj
+ default:
+ T(YAFFS_TRACE_VERIFY,
+ (TSTR("Obj %d has illegaltype %d"TENDSTR),
+- obj->objectId,obj->variantType));
++ obj->objectId, obj->variantType));
+ break;
+ }
+-
+-
+ }
+
+ static void yaffs_VerifyObjects(yaffs_Device *dev)
+ {
+ yaffs_Object *obj;
+ int i;
+- struct list_head *lh;
++ struct ylist_head *lh;
+
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+- for(i = 0; i < YAFFS_NOBJECT_BUCKETS; i++){
+- list_for_each(lh, &dev->objectBucket[i].list) {
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each(lh, &dev->objectBucket[i].list) {
+ if (lh) {
+- obj = list_entry(lh, yaffs_Object, hashLink);
++ obj = ylist_entry(lh, yaffs_Object, hashLink);
+ yaffs_VerifyObject(obj);
+ }
+ }
+- }
+-
++ }
+ }
+
+
+@@ -855,19 +859,20 @@ static void yaffs_VerifyObjects(yaffs_De
+ static Y_INLINE int yaffs_HashFunction(int n)
+ {
+ n = abs(n);
+- return (n % YAFFS_NOBJECT_BUCKETS);
++ return n % YAFFS_NOBJECT_BUCKETS;
+ }
+
+ /*
+- * Access functions to useful fake objects
++ * Access functions to useful fake objects.
++ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+-yaffs_Object *yaffs_Root(yaffs_Device * dev)
++yaffs_Object *yaffs_Root(yaffs_Device *dev)
+ {
+ return dev->rootDir;
+ }
+
+-yaffs_Object *yaffs_LostNFound(yaffs_Device * dev)
++yaffs_Object *yaffs_LostNFound(yaffs_Device *dev)
+ {
+ return dev->lostNFoundDir;
+ }
+@@ -877,7 +882,7 @@ yaffs_Object *yaffs_LostNFound(yaffs_Dev
+ * Erased NAND checking functions
+ */
+
+-int yaffs_CheckFF(__u8 * buffer, int nBytes)
++int yaffs_CheckFF(__u8 *buffer, int nBytes)
+ {
+ /* Horrible, slow implementation */
+ while (nBytes--) {
+@@ -889,9 +894,8 @@ int yaffs_CheckFF(__u8 * buffer, int nBy
+ }
+
+ static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND)
++ int chunkInNAND)
+ {
+-
+ int retval = YAFFS_OK;
+ __u8 *data = yaffs_GetTempBuffer(dev, __LINE__);
+ yaffs_ExtendedTags tags;
+@@ -899,10 +903,9 @@ static int yaffs_CheckChunkErased(struct
+
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
+
+- if(tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
++ if (tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+-
+ if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) {
+ T(YAFFS_TRACE_NANDACCESS,
+ (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND));
+@@ -915,11 +918,10 @@ static int yaffs_CheckChunkErased(struct
+
+ }
+
+-
+ static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+- const __u8 * data,
+- yaffs_ExtendedTags * tags,
+- int useReserve)
++ const __u8 *data,
++ yaffs_ExtendedTags *tags,
++ int useReserve)
+ {
+ int attempts = 0;
+ int writeOk = 0;
+@@ -972,7 +974,7 @@ static int yaffs_WriteNewChunkWithTagsTo
+ erasedOk = yaffs_CheckChunkErased(dev, chunk);
+ if (erasedOk != YAFFS_OK) {
+ T(YAFFS_TRACE_ERROR,
+- (TSTR ("**>> yaffs chunk %d was not erased"
++ (TSTR("**>> yaffs chunk %d was not erased"
+ TENDSTR), chunk));
+
+ /* try another chunk */
+@@ -992,7 +994,11 @@ static int yaffs_WriteNewChunkWithTagsTo
+ /* Copy the data into the robustification buffer */
+ yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
+
+- } while (writeOk != YAFFS_OK && attempts < yaffs_wr_attempts);
++ } while (writeOk != YAFFS_OK &&
++ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
++
++ if (!writeOk)
++ chunk = -1;
+
+ if (attempts > 1) {
+ T(YAFFS_TRACE_ERROR,
+@@ -1009,13 +1015,35 @@ static int yaffs_WriteNewChunkWithTagsTo
+ * Block retiring for handling a broken block.
+ */
+
+-static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND)
++static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND)
+ {
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
+
+ yaffs_InvalidateCheckpoint(dev);
+
+- yaffs_MarkBlockBad(dev, blockInNAND);
++ if (yaffs_MarkBlockBad(dev, blockInNAND) != YAFFS_OK) {
++ if (yaffs_EraseBlockInNAND(dev, blockInNAND) != YAFFS_OK) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR(
++ "yaffs: Failed to mark bad and erase block %d"
++ TENDSTR), blockInNAND));
++ } else {
++ yaffs_ExtendedTags tags;
++ int chunkId = blockInNAND * dev->nChunksPerBlock;
++
++ __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++
++ memset(buffer, 0xff, dev->nDataBytesPerChunk);
++ yaffs_InitialiseTags(&tags);
++ tags.sequenceNumber = YAFFS_SEQUENCE_BAD_BLOCK;
++ if (dev->writeChunkWithTagsToNAND(dev, chunkId -
++ dev->chunkOffset, buffer, &tags) != YAFFS_OK)
++ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to "
++ TCONT("write bad block marker to block %d")
++ TENDSTR), blockInNAND));
++
++ yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
++ }
++ }
+
+ bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+ bi->gcPrioritise = 0;
+@@ -1029,49 +1057,45 @@ static void yaffs_RetireBlock(yaffs_Devi
+ *
+ */
+
+-static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags * tags)
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags)
+ {
+ }
+
+-static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+- const yaffs_ExtendedTags * tags)
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++ const yaffs_ExtendedTags *tags)
+ {
+ }
+
+ void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
+ {
+- if(!bi->gcPrioritise){
++ if (!bi->gcPrioritise) {
+ bi->gcPrioritise = 1;
+ dev->hasPendingPrioritisedGCs = 1;
+- bi->chunkErrorStrikes ++;
++ bi->chunkErrorStrikes++;
+
+- if(bi->chunkErrorStrikes > 3){
++ if (bi->chunkErrorStrikes > 3) {
+ bi->needsRetiring = 1; /* Too many stikes, so retire this */
+ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
+
+ }
+-
+ }
+ }
+
+-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk)
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
++ int erasedOk)
+ {
+-
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
+
+- yaffs_HandleChunkError(dev,bi);
++ yaffs_HandleChunkError(dev, bi);
+
+-
+- if(erasedOk ) {
++ if (erasedOk) {
+ /* Was an actual write failure, so mark the block for retirement */
+ bi->needsRetiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND));
+-
+-
+ }
+
+ /* Delete the chunk */
+@@ -1081,12 +1105,12 @@ static void yaffs_HandleWriteChunkError(
+
+ /*---------------- Name handling functions ------------*/
+
+-static __u16 yaffs_CalcNameSum(const YCHAR * name)
++static __u16 yaffs_CalcNameSum(const YCHAR *name)
+ {
+ __u16 sum = 0;
+ __u16 i = 1;
+
+- YUCHAR *bname = (YUCHAR *) name;
++ const YUCHAR *bname = (const YUCHAR *) name;
+ if (bname) {
+ while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
+
+@@ -1102,14 +1126,14 @@ static __u16 yaffs_CalcNameSum(const YCH
+ return sum;
+ }
+
+-static void yaffs_SetObjectName(yaffs_Object * obj, const YCHAR * name)
++static void yaffs_SetObjectName(yaffs_Object *obj, const YCHAR *name)
+ {
+ #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH) {
++ memset(obj->shortName, 0, sizeof(YCHAR) * (YAFFS_SHORT_NAME_LENGTH+1));
++ if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH)
+ yaffs_strcpy(obj->shortName, name);
+- } else {
++ else
+ obj->shortName[0] = _Y('\0');
+- }
+ #endif
+ obj->sum = yaffs_CalcNameSum(name);
+ }
+@@ -1126,7 +1150,7 @@ static void yaffs_SetObjectName(yaffs_Ob
+ * Don't use this function directly
+ */
+
+-static int yaffs_CreateTnodes(yaffs_Device * dev, int nTnodes)
++static int yaffs_CreateTnodes(yaffs_Device *dev, int nTnodes)
+ {
+ int i;
+ int tnodeSize;
+@@ -1143,6 +1167,9 @@ static int yaffs_CreateTnodes(yaffs_Devi
+ * Must be a multiple of 32-bits */
+ tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
++
+ /* make these things */
+
+ newTnodes = YMALLOC(nTnodes * tnodeSize);
+@@ -1150,7 +1177,7 @@ static int yaffs_CreateTnodes(yaffs_Devi
+
+ if (!newTnodes) {
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
++ (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+@@ -1170,7 +1197,7 @@ static int yaffs_CreateTnodes(yaffs_Devi
+ dev->freeTnodes = newTnodes;
+ #else
+ /* New hookup for wide tnodes */
+- for(i = 0; i < nTnodes -1; i++) {
++ for (i = 0; i < nTnodes - 1; i++) {
+ curr = (yaffs_Tnode *) &mem[i * tnodeSize];
+ next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize];
+ curr->internal[0] = next;
+@@ -1197,7 +1224,6 @@ static int yaffs_CreateTnodes(yaffs_Devi
+ (TSTR
+ ("yaffs: Could not add tnodes to management list" TENDSTR)));
+ return YAFFS_FAIL;
+-
+ } else {
+ tnl->tnodes = newTnodes;
+ tnl->next = dev->allocatedTnodeList;
+@@ -1211,14 +1237,13 @@ static int yaffs_CreateTnodes(yaffs_Devi
+
+ /* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */
+
+-static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device * dev)
++static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device *dev)
+ {
+ yaffs_Tnode *tn = NULL;
+
+ /* If there are none left make more */
+- if (!dev->freeTnodes) {
++ if (!dev->freeTnodes)
+ yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES);
+- }
+
+ if (dev->freeTnodes) {
+ tn = dev->freeTnodes;
+@@ -1233,21 +1258,27 @@ static yaffs_Tnode *yaffs_GetTnodeRaw(ya
+ dev->nFreeTnodes--;
+ }
+
++ dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++
+ return tn;
+ }
+
+-static yaffs_Tnode *yaffs_GetTnode(yaffs_Device * dev)
++static yaffs_Tnode *yaffs_GetTnode(yaffs_Device *dev)
+ {
+ yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev);
++ int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+- if(tn)
+- memset(tn, 0, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
++
++ if (tn)
++ memset(tn, 0, tnodeSize);
+
+ return tn;
+ }
+
+ /* FreeTnode frees up a tnode and puts it back on the free list */
+-static void yaffs_FreeTnode(yaffs_Device * dev, yaffs_Tnode * tn)
++static void yaffs_FreeTnode(yaffs_Device *dev, yaffs_Tnode *tn)
+ {
+ if (tn) {
+ #ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+@@ -1262,9 +1293,10 @@ static void yaffs_FreeTnode(yaffs_Device
+ dev->freeTnodes = tn;
+ dev->nFreeTnodes++;
+ }
++ dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+ }
+
+-static void yaffs_DeinitialiseTnodes(yaffs_Device * dev)
++static void yaffs_DeinitialiseTnodes(yaffs_Device *dev)
+ {
+ /* Free the list of allocated tnodes */
+ yaffs_TnodeList *tmp;
+@@ -1282,71 +1314,72 @@ static void yaffs_DeinitialiseTnodes(yaf
+ dev->nFreeTnodes = 0;
+ }
+
+-static void yaffs_InitialiseTnodes(yaffs_Device * dev)
++static void yaffs_InitialiseTnodes(yaffs_Device *dev)
+ {
+ dev->allocatedTnodeList = NULL;
+ dev->freeTnodes = NULL;
+ dev->nFreeTnodes = 0;
+ dev->nTnodesCreated = 0;
+-
+ }
+
+
+-void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos, unsigned val)
++void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos,
++ unsigned val)
+ {
+- __u32 *map = (__u32 *)tn;
+- __u32 bitInMap;
+- __u32 bitInWord;
+- __u32 wordInMap;
+- __u32 mask;
++ __u32 *map = (__u32 *)tn;
++ __u32 bitInMap;
++ __u32 bitInWord;
++ __u32 wordInMap;
++ __u32 mask;
+
+- pos &= YAFFS_TNODES_LEVEL0_MASK;
+- val >>= dev->chunkGroupBits;
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
++ val >>= dev->chunkGroupBits;
+
+- bitInMap = pos * dev->tnodeWidth;
+- wordInMap = bitInMap /32;
+- bitInWord = bitInMap & (32 -1);
++ bitInMap = pos * dev->tnodeWidth;
++ wordInMap = bitInMap / 32;
++ bitInWord = bitInMap & (32 - 1);
+
+- mask = dev->tnodeMask << bitInWord;
++ mask = dev->tnodeMask << bitInWord;
+
+- map[wordInMap] &= ~mask;
+- map[wordInMap] |= (mask & (val << bitInWord));
++ map[wordInMap] &= ~mask;
++ map[wordInMap] |= (mask & (val << bitInWord));
+
+- if(dev->tnodeWidth > (32-bitInWord)) {
+- bitInWord = (32 - bitInWord);
+- wordInMap++;;
+- mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
+- map[wordInMap] &= ~mask;
+- map[wordInMap] |= (mask & (val >> bitInWord));
+- }
++ if (dev->tnodeWidth > (32 - bitInWord)) {
++ bitInWord = (32 - bitInWord);
++ wordInMap++;;
++ mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
++ map[wordInMap] &= ~mask;
++ map[wordInMap] |= (mask & (val >> bitInWord));
++ }
+ }
+
+-static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos)
++static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
++ unsigned pos)
+ {
+- __u32 *map = (__u32 *)tn;
+- __u32 bitInMap;
+- __u32 bitInWord;
+- __u32 wordInMap;
+- __u32 val;
++ __u32 *map = (__u32 *)tn;
++ __u32 bitInMap;
++ __u32 bitInWord;
++ __u32 wordInMap;
++ __u32 val;
+
+- pos &= YAFFS_TNODES_LEVEL0_MASK;
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+- bitInMap = pos * dev->tnodeWidth;
+- wordInMap = bitInMap /32;
+- bitInWord = bitInMap & (32 -1);
++ bitInMap = pos * dev->tnodeWidth;
++ wordInMap = bitInMap / 32;
++ bitInWord = bitInMap & (32 - 1);
+
+- val = map[wordInMap] >> bitInWord;
++ val = map[wordInMap] >> bitInWord;
+
+- if(dev->tnodeWidth > (32-bitInWord)) {
+- bitInWord = (32 - bitInWord);
+- wordInMap++;;
+- val |= (map[wordInMap] << bitInWord);
+- }
++ if (dev->tnodeWidth > (32 - bitInWord)) {
++ bitInWord = (32 - bitInWord);
++ wordInMap++;;
++ val |= (map[wordInMap] << bitInWord);
++ }
+
+- val &= dev->tnodeMask;
+- val <<= dev->chunkGroupBits;
++ val &= dev->tnodeMask;
++ val <<= dev->chunkGroupBits;
+
+- return val;
++ return val;
+ }
+
+ /* ------------------- End of individual tnode manipulation -----------------*/
+@@ -1357,24 +1390,21 @@ static __u32 yaffs_GetChunkGroupBase(yaf
+ */
+
+ /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+-static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev,
+- yaffs_FileStructure * fStruct,
+- __u32 chunkId)
++static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
++ yaffs_FileStructure *fStruct,
++ __u32 chunkId)
+ {
+-
+ yaffs_Tnode *tn = fStruct->top;
+ __u32 i;
+ int requiredTallness;
+ int level = fStruct->topLevel;
+
+ /* Check sane level and chunk Id */
+- if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) {
++ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+- }
+
+- if (chunkId > YAFFS_MAX_CHUNK_ID) {
++ if (chunkId > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+- }
+
+ /* First check we're tall enough (ie enough topLevel) */
+
+@@ -1385,22 +1415,17 @@ static yaffs_Tnode *yaffs_FindLevel0Tnod
+ requiredTallness++;
+ }
+
+- if (requiredTallness > fStruct->topLevel) {
+- /* Not tall enough, so we can't find it, return NULL. */
+- return NULL;
+- }
++ if (requiredTallness > fStruct->topLevel)
++ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+- tn = tn->
+- internal[(chunkId >>
+- ( YAFFS_TNODES_LEVEL0_BITS +
+- (level - 1) *
+- YAFFS_TNODES_INTERNAL_BITS)
+- ) &
+- YAFFS_TNODES_INTERNAL_MASK];
++ tn = tn->internal[(chunkId >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (level - 1) *
++ YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+-
+ }
+
+ return tn;
+@@ -1417,12 +1442,11 @@ static yaffs_Tnode *yaffs_FindLevel0Tnod
+ * be plugged into the ttree.
+ */
+
+-static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device * dev,
+- yaffs_FileStructure * fStruct,
+- __u32 chunkId,
+- yaffs_Tnode *passedTn)
++static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device *dev,
++ yaffs_FileStructure *fStruct,
++ __u32 chunkId,
++ yaffs_Tnode *passedTn)
+ {
+-
+ int requiredTallness;
+ int i;
+ int l;
+@@ -1432,13 +1456,11 @@ static yaffs_Tnode *yaffs_AddOrFindLevel
+
+
+ /* Check sane level and page Id */
+- if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL) {
++ if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+- }
+
+- if (chunkId > YAFFS_MAX_CHUNK_ID) {
++ if (chunkId > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+- }
+
+ /* First check we're tall enough (ie enough topLevel) */
+
+@@ -1451,7 +1473,7 @@ static yaffs_Tnode *yaffs_AddOrFindLevel
+
+
+ if (requiredTallness > fStruct->topLevel) {
+- /* Not tall enough,gotta make the tree taller */
++ /* Not tall enough, gotta make the tree taller */
+ for (i = fStruct->topLevel; i < requiredTallness; i++) {
+
+ tn = yaffs_GetTnode(dev);
+@@ -1473,27 +1495,27 @@ static yaffs_Tnode *yaffs_AddOrFindLevel
+ l = fStruct->topLevel;
+ tn = fStruct->top;
+
+- if(l > 0) {
++ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunkId >>
+- ( YAFFS_TNODES_LEVEL0_BITS +
++ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+
+- if((l>1) && !tn->internal[x]){
++ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_GetTnode(dev);
+
+- } else if(l == 1) {
++ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+- if (passedTn) {
++ if (passedTn) {
+ /* If we already have one, then release it.*/
+- if(tn->internal[x])
+- yaffs_FreeTnode(dev,tn->internal[x]);
++ if (tn->internal[x])
++ yaffs_FreeTnode(dev, tn->internal[x]);
+ tn->internal[x] = passedTn;
+
+- } else if(!tn->internal[x]) {
++ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_GetTnode(dev);
+ }
+@@ -1504,31 +1526,29 @@ static yaffs_Tnode *yaffs_AddOrFindLevel
+ }
+ } else {
+ /* top is level 0 */
+- if(passedTn) {
+- memcpy(tn,passedTn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
+- yaffs_FreeTnode(dev,passedTn);
++ if (passedTn) {
++ memcpy(tn, passedTn, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++ yaffs_FreeTnode(dev, passedTn);
+ }
+ }
+
+ return tn;
+ }
+
+-static int yaffs_FindChunkInGroup(yaffs_Device * dev, int theChunk,
+- yaffs_ExtendedTags * tags, int objectId,
+- int chunkInInode)
++static int yaffs_FindChunkInGroup(yaffs_Device *dev, int theChunk,
++ yaffs_ExtendedTags *tags, int objectId,
++ int chunkInInode)
+ {
+ int j;
+
+ for (j = 0; theChunk && j < dev->chunkGroupSize; j++) {
+- if (yaffs_CheckChunkBit
+- (dev, theChunk / dev->nChunksPerBlock,
+- theChunk % dev->nChunksPerBlock)) {
++ if (yaffs_CheckChunkBit(dev, theChunk / dev->nChunksPerBlock,
++ theChunk % dev->nChunksPerBlock)) {
+ yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL,
+ tags);
+ if (yaffs_TagsMatch(tags, objectId, chunkInInode)) {
+ /* found it; */
+ return theChunk;
+-
+ }
+ }
+ theChunk++;
+@@ -1543,7 +1563,7 @@ static int yaffs_FindChunkInGroup(yaffs_
+ * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
+ */
+
+-static int yaffs_DeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, __u32 level,
++static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
+ int chunkOffset, int *limit)
+ {
+ int i;
+@@ -1557,7 +1577,6 @@ static int yaffs_DeleteWorker(yaffs_Obje
+
+ if (tn) {
+ if (level > 0) {
+-
+ for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+@@ -1565,17 +1584,17 @@ static int yaffs_DeleteWorker(yaffs_Obje
+ allDone = 0;
+ } else {
+ allDone =
+- yaffs_DeleteWorker(in,
+- tn->
+- internal
+- [i],
+- level -
+- 1,
+- (chunkOffset
++ yaffs_DeleteWorker(in,
++ tn->
++ internal
++ [i],
++ level -
++ 1,
++ (chunkOffset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+- + i,
+- limit);
++ + i,
++ limit);
+ }
+ if (allDone) {
+ yaffs_FreeTnode(dev,
+@@ -1584,27 +1603,25 @@ static int yaffs_DeleteWorker(yaffs_Obje
+ tn->internal[i] = NULL;
+ }
+ }
+-
+ }
+ return (allDone) ? 1 : 0;
+ } else if (level == 0) {
+ int hitLimit = 0;
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
+- i--) {
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++ i--) {
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+ if (theChunk) {
+
+- chunkInInode =
+- (chunkOffset <<
+- YAFFS_TNODES_LEVEL0_BITS) + i;
++ chunkInInode = (chunkOffset <<
++ YAFFS_TNODES_LEVEL0_BITS) + i;
+
+ foundChunk =
+- yaffs_FindChunkInGroup(dev,
+- theChunk,
+- &tags,
+- in->objectId,
+- chunkInInode);
++ yaffs_FindChunkInGroup(dev,
++ theChunk,
++ &tags,
++ in->objectId,
++ chunkInInode);
+
+ if (foundChunk > 0) {
+ yaffs_DeleteChunk(dev,
+@@ -1613,14 +1630,13 @@ static int yaffs_DeleteWorker(yaffs_Obje
+ in->nDataChunks--;
+ if (limit) {
+ *limit = *limit - 1;
+- if (*limit <= 0) {
++ if (*limit <= 0)
+ hitLimit = 1;
+- }
+ }
+
+ }
+
+- yaffs_PutLevel0Tnode(dev,tn,i,0);
++ yaffs_PutLevel0Tnode(dev, tn, i, 0);
+ }
+
+ }
+@@ -1634,9 +1650,8 @@ static int yaffs_DeleteWorker(yaffs_Obje
+
+ }
+
+-static void yaffs_SoftDeleteChunk(yaffs_Device * dev, int chunk)
++static void yaffs_SoftDeleteChunk(yaffs_Device *dev, int chunk)
+ {
+-
+ yaffs_BlockInfo *theBlock;
+
+ T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
+@@ -1654,7 +1669,7 @@ static void yaffs_SoftDeleteChunk(yaffs_
+ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+ */
+
+-static int yaffs_SoftDeleteWorker(yaffs_Object * in, yaffs_Tnode * tn,
++static int yaffs_SoftDeleteWorker(yaffs_Object *in, yaffs_Tnode *tn,
+ __u32 level, int chunkOffset)
+ {
+ int i;
+@@ -1691,14 +1706,14 @@ static int yaffs_SoftDeleteWorker(yaffs_
+ } else if (level == 0) {
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+ if (theChunk) {
+ /* Note this does not find the real chunk, only the chunk group.
+ * We make an assumption that a chunk group is not larger than
+ * a block.
+ */
+ yaffs_SoftDeleteChunk(dev, theChunk);
+- yaffs_PutLevel0Tnode(dev,tn,i,0);
++ yaffs_PutLevel0Tnode(dev, tn, i, 0);
+ }
+
+ }
+@@ -1712,7 +1727,7 @@ static int yaffs_SoftDeleteWorker(yaffs_
+
+ }
+
+-static void yaffs_SoftDeleteFile(yaffs_Object * obj)
++static void yaffs_SoftDeleteFile(yaffs_Object *obj)
+ {
+ if (obj->deleted &&
+ obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) {
+@@ -1746,8 +1761,8 @@ static void yaffs_SoftDeleteFile(yaffs_O
+ * by a special case.
+ */
+
+-static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device * dev, yaffs_Tnode * tn,
+- __u32 level, int del0)
++static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device *dev, yaffs_Tnode *tn,
++ __u32 level, int del0)
+ {
+ int i;
+ int hasData;
+@@ -1763,9 +1778,8 @@ static yaffs_Tnode *yaffs_PruneWorker(ya
+ (i == 0) ? del0 : 1);
+ }
+
+- if (tn->internal[i]) {
++ if (tn->internal[i])
+ hasData++;
+- }
+ }
+
+ if (hasData == 0 && del0) {
+@@ -1781,8 +1795,8 @@ static yaffs_Tnode *yaffs_PruneWorker(ya
+
+ }
+
+-static int yaffs_PruneFileStructure(yaffs_Device * dev,
+- yaffs_FileStructure * fStruct)
++static int yaffs_PruneFileStructure(yaffs_Device *dev,
++ yaffs_FileStructure *fStruct)
+ {
+ int i;
+ int hasData;
+@@ -1805,9 +1819,8 @@ static int yaffs_PruneFileStructure(yaff
+
+ hasData = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+- if (tn->internal[i]) {
++ if (tn->internal[i])
+ hasData++;
+- }
+ }
+
+ if (!hasData) {
+@@ -1828,7 +1841,7 @@ static int yaffs_PruneFileStructure(yaff
+ /* yaffs_CreateFreeObjects creates a bunch more objects and
+ * adds them to the object free list.
+ */
+-static int yaffs_CreateFreeObjects(yaffs_Device * dev, int nObjects)
++static int yaffs_CreateFreeObjects(yaffs_Device *dev, int nObjects)
+ {
+ int i;
+ yaffs_Object *newObjects;
+@@ -1842,9 +1855,9 @@ static int yaffs_CreateFreeObjects(yaffs
+ list = YMALLOC(sizeof(yaffs_ObjectList));
+
+ if (!newObjects || !list) {
+- if(newObjects)
++ if (newObjects)
+ YFREE(newObjects);
+- if(list)
++ if (list)
+ YFREE(list);
+ T(YAFFS_TRACE_ALLOCATE,
+ (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
+@@ -1854,7 +1867,7 @@ static int yaffs_CreateFreeObjects(yaffs
+ /* Hook them into the free list */
+ for (i = 0; i < nObjects - 1; i++) {
+ newObjects[i].siblings.next =
+- (struct list_head *)(&newObjects[i + 1]);
++ (struct ylist_head *)(&newObjects[i + 1]);
+ }
+
+ newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects;
+@@ -1873,85 +1886,109 @@ static int yaffs_CreateFreeObjects(yaffs
+
+
+ /* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+-static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device * dev)
++static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device *dev)
+ {
+ yaffs_Object *tn = NULL;
+
++#ifdef VALGRIND_TEST
++ tn = YMALLOC(sizeof(yaffs_Object));
++#else
+ /* If there are none left make more */
+- if (!dev->freeObjects) {
++ if (!dev->freeObjects)
+ yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS);
+- }
+
+ if (dev->freeObjects) {
+ tn = dev->freeObjects;
+ dev->freeObjects =
+- (yaffs_Object *) (dev->freeObjects->siblings.next);
++ (yaffs_Object *) (dev->freeObjects->siblings.next);
+ dev->nFreeObjects--;
+-
++ }
++#endif
++ if (tn) {
+ /* Now sweeten it up... */
+
+ memset(tn, 0, sizeof(yaffs_Object));
++ tn->beingCreated = 1;
++
+ tn->myDev = dev;
+- tn->chunkId = -1;
++ tn->hdrChunk = 0;
+ tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN;
+- INIT_LIST_HEAD(&(tn->hardLinks));
+- INIT_LIST_HEAD(&(tn->hashLink));
+- INIT_LIST_HEAD(&tn->siblings);
++ YINIT_LIST_HEAD(&(tn->hardLinks));
++ YINIT_LIST_HEAD(&(tn->hashLink));
++ YINIT_LIST_HEAD(&tn->siblings);
++
++
++ /* Now make the directory sane */
++ if (dev->rootDir) {
++ tn->parent = dev->rootDir;
++ ylist_add(&(tn->siblings), &dev->rootDir->variant.directoryVariant.children);
++ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lostNFound in lostNFound so
+ * check if lostNFound exists first
+ */
+- if (dev->lostNFoundDir) {
++ if (dev->lostNFoundDir)
+ yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn);
+- }
++
++ tn->beingCreated = 0;
+ }
+
++ dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++
+ return tn;
+ }
+
+-static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device * dev, int number,
++static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device *dev, int number,
+ __u32 mode)
+ {
+
+ yaffs_Object *obj =
+ yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (obj) {
+- obj->fake = 1; /* it is fake so it has no NAND presence... */
++ obj->fake = 1; /* it is fake so it might have no NAND presence... */
+ obj->renameAllowed = 0; /* ... and we're not allowed to rename it... */
+ obj->unlinkAllowed = 0; /* ... or unlink it */
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->myDev = dev;
+- obj->chunkId = 0; /* Not a valid chunk. */
++ obj->hdrChunk = 0; /* Not a valid chunk. */
+ }
+
+ return obj;
+
+ }
+
+-static void yaffs_UnhashObject(yaffs_Object * tn)
++static void yaffs_UnhashObject(yaffs_Object *tn)
+ {
+ int bucket;
+ yaffs_Device *dev = tn->myDev;
+
+ /* If it is still linked into the bucket list, free from the list */
+- if (!list_empty(&tn->hashLink)) {
+- list_del_init(&tn->hashLink);
++ if (!ylist_empty(&tn->hashLink)) {
++ ylist_del_init(&tn->hashLink);
+ bucket = yaffs_HashFunction(tn->objectId);
+ dev->objectBucket[bucket].count--;
+ }
+-
+ }
+
+ /* FreeObject frees up a Object and puts it back on the free list */
+-static void yaffs_FreeObject(yaffs_Object * tn)
++static void yaffs_FreeObject(yaffs_Object *tn)
+ {
+-
+ yaffs_Device *dev = tn->myDev;
+
+-#ifdef __KERNEL__
++#ifdef __KERNEL__
++ T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), tn, tn->myInode));
++#endif
++
++ if (tn->parent)
++ YBUG();
++ if (!ylist_empty(&tn->siblings))
++ YBUG();
++
++
++#ifdef __KERNEL__
+ if (tn->myInode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+@@ -1963,24 +2000,28 @@ static void yaffs_FreeObject(yaffs_Objec
+
+ yaffs_UnhashObject(tn);
+
++#ifdef VALGRIND_TEST
++ YFREE(tn);
++#else
+ /* Link into the free list. */
+- tn->siblings.next = (struct list_head *)(dev->freeObjects);
++ tn->siblings.next = (struct ylist_head *)(dev->freeObjects);
+ dev->freeObjects = tn;
+ dev->nFreeObjects++;
++#endif
++ dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+ }
+
+ #ifdef __KERNEL__
+
+-void yaffs_HandleDeferedFree(yaffs_Object * obj)
++void yaffs_HandleDeferedFree(yaffs_Object *obj)
+ {
+- if (obj->deferedFree) {
++ if (obj->deferedFree)
+ yaffs_FreeObject(obj);
+- }
+ }
+
+ #endif
+
+-static void yaffs_DeinitialiseObjects(yaffs_Device * dev)
++static void yaffs_DeinitialiseObjects(yaffs_Device *dev)
+ {
+ /* Free the list of allocated Objects */
+
+@@ -1998,7 +2039,7 @@ static void yaffs_DeinitialiseObjects(ya
+ dev->nFreeObjects = 0;
+ }
+
+-static void yaffs_InitialiseObjects(yaffs_Device * dev)
++static void yaffs_InitialiseObjects(yaffs_Device *dev)
+ {
+ int i;
+
+@@ -2007,15 +2048,14 @@ static void yaffs_InitialiseObjects(yaff
+ dev->nFreeObjects = 0;
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+- INIT_LIST_HEAD(&dev->objectBucket[i].list);
++ YINIT_LIST_HEAD(&dev->objectBucket[i].list);
+ dev->objectBucket[i].count = 0;
+ }
+-
+ }
+
+-static int yaffs_FindNiceObjectBucket(yaffs_Device * dev)
++static int yaffs_FindNiceObjectBucket(yaffs_Device *dev)
+ {
+- static int x = 0;
++ static int x;
+ int i;
+ int l = 999;
+ int lowest = 999999;
+@@ -2049,7 +2089,7 @@ static int yaffs_FindNiceObjectBucket(ya
+ return l;
+ }
+
+-static int yaffs_CreateNewObjectNumber(yaffs_Device * dev)
++static int yaffs_CreateNewObjectNumber(yaffs_Device *dev)
+ {
+ int bucket = yaffs_FindNiceObjectBucket(dev);
+
+@@ -2058,7 +2098,7 @@ static int yaffs_CreateNewObjectNumber(y
+ */
+
+ int found = 0;
+- struct list_head *i;
++ struct ylist_head *i;
+
+ __u32 n = (__u32) bucket;
+
+@@ -2068,41 +2108,38 @@ static int yaffs_CreateNewObjectNumber(y
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->objectBucket[bucket].count > 0) {
+- list_for_each(i, &dev->objectBucket[bucket].list) {
++ ylist_for_each(i, &dev->objectBucket[bucket].list) {
+ /* If there is already one in the list */
+- if (i
+- && list_entry(i, yaffs_Object,
+- hashLink)->objectId == n) {
++ if (i && ylist_entry(i, yaffs_Object,
++ hashLink)->objectId == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+
+-
+ return n;
+ }
+
+-static void yaffs_HashObject(yaffs_Object * in)
++static void yaffs_HashObject(yaffs_Object *in)
+ {
+ int bucket = yaffs_HashFunction(in->objectId);
+ yaffs_Device *dev = in->myDev;
+
+- list_add(&in->hashLink, &dev->objectBucket[bucket].list);
++ ylist_add(&in->hashLink, &dev->objectBucket[bucket].list);
+ dev->objectBucket[bucket].count++;
+-
+ }
+
+-yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number)
++yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number)
+ {
+ int bucket = yaffs_HashFunction(number);
+- struct list_head *i;
++ struct ylist_head *i;
+ yaffs_Object *in;
+
+- list_for_each(i, &dev->objectBucket[bucket].list) {
++ ylist_for_each(i, &dev->objectBucket[bucket].list) {
+ /* Look if it is in the list */
+ if (i) {
+- in = list_entry(i, yaffs_Object, hashLink);
++ in = ylist_entry(i, yaffs_Object, hashLink);
+ if (in->objectId == number) {
+ #ifdef __KERNEL__
+ /* Don't tell the VFS about this one if it is defered free */
+@@ -2118,31 +2155,27 @@ yaffs_Object *yaffs_FindObjectByNumber(y
+ return NULL;
+ }
+
+-yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number,
++yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
+ yaffs_ObjectType type)
+ {
+-
+ yaffs_Object *theObject;
+- yaffs_Tnode *tn;
++ yaffs_Tnode *tn = NULL;
+
+- if (number < 0) {
++ if (number < 0)
+ number = yaffs_CreateNewObjectNumber(dev);
+- }
+
+ theObject = yaffs_AllocateEmptyObject(dev);
+- if(!theObject)
++ if (!theObject)
+ return NULL;
+
+- if(type == YAFFS_OBJECT_TYPE_FILE){
++ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_GetTnode(dev);
+- if(!tn){
++ if (!tn) {
+ yaffs_FreeObject(theObject);
+ return NULL;
+ }
+ }
+
+-
+-
+ if (theObject) {
+ theObject->fake = 0;
+ theObject->renameAllowed = 1;
+@@ -2171,8 +2204,8 @@ yaffs_Object *yaffs_CreateNewObject(yaff
+ theObject->variant.fileVariant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+- INIT_LIST_HEAD(&theObject->variant.directoryVariant.
+- children);
++ YINIT_LIST_HEAD(&theObject->variant.directoryVariant.
++ children);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+@@ -2188,32 +2221,30 @@ yaffs_Object *yaffs_CreateNewObject(yaff
+ return theObject;
+ }
+
+-static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device * dev,
++static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device *dev,
+ int number,
+ yaffs_ObjectType type)
+ {
+ yaffs_Object *theObject = NULL;
+
+- if (number > 0) {
++ if (number > 0)
+ theObject = yaffs_FindObjectByNumber(dev, number);
+- }
+
+- if (!theObject) {
++ if (!theObject)
+ theObject = yaffs_CreateNewObject(dev, number, type);
+- }
+
+ return theObject;
+
+ }
+
+
+-static YCHAR *yaffs_CloneString(const YCHAR * str)
++static YCHAR *yaffs_CloneString(const YCHAR *str)
+ {
+ YCHAR *newStr = NULL;
+
+ if (str && *str) {
+ newStr = YMALLOC((yaffs_strlen(str) + 1) * sizeof(YCHAR));
+- if(newStr)
++ if (newStr)
+ yaffs_strcpy(newStr, str);
+ }
+
+@@ -2229,29 +2260,31 @@ static YCHAR *yaffs_CloneString(const YC
+ */
+
+ static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type,
+- yaffs_Object * parent,
+- const YCHAR * name,
++ yaffs_Object *parent,
++ const YCHAR *name,
+ __u32 mode,
+ __u32 uid,
+ __u32 gid,
+- yaffs_Object * equivalentObject,
+- const YCHAR * aliasString, __u32 rdev)
++ yaffs_Object *equivalentObject,
++ const YCHAR *aliasString, __u32 rdev)
+ {
+ yaffs_Object *in;
+- YCHAR *str;
++ YCHAR *str = NULL;
+
+ yaffs_Device *dev = parent->myDev;
+
+ /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
+- if (yaffs_FindObjectByName(parent, name)) {
++ if (yaffs_FindObjectByName(parent, name))
+ return NULL;
+- }
+
+ in = yaffs_CreateNewObject(dev, -1, type);
+
+- if(type == YAFFS_OBJECT_TYPE_SYMLINK){
++ if (!in)
++ return YAFFS_FAIL;
++
++ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_CloneString(aliasString);
+- if(!str){
++ if (!str) {
+ yaffs_FreeObject(in);
+ return NULL;
+ }
+@@ -2260,7 +2293,7 @@ static yaffs_Object *yaffs_MknodObject(y
+
+
+ if (in) {
+- in->chunkId = -1;
++ in->hdrChunk = 0;
+ in->valid = 1;
+ in->variantType = type;
+
+@@ -2293,10 +2326,10 @@ static yaffs_Object *yaffs_MknodObject(y
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardLinkVariant.equivalentObject =
+- equivalentObject;
++ equivalentObject;
+ in->variant.hardLinkVariant.equivalentObjectId =
+- equivalentObject->objectId;
+- list_add(&in->hardLinks, &equivalentObject->hardLinks);
++ equivalentObject->objectId;
++ ylist_add(&in->hardLinks, &equivalentObject->hardLinks);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+@@ -2308,7 +2341,7 @@ static yaffs_Object *yaffs_MknodObject(y
+
+ if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) {
+ /* Could not create the object header, fail the creation */
+- yaffs_DestroyObject(in);
++ yaffs_DeleteObject(in);
+ in = NULL;
+ }
+
+@@ -2317,38 +2350,38 @@ static yaffs_Object *yaffs_MknodObject(y
+ return in;
+ }
+
+-yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid)
++yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid)
+ {
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+- uid, gid, NULL, NULL, 0);
++ uid, gid, NULL, NULL, 0);
+ }
+
+-yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid)
++yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid)
+ {
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+ }
+
+-yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
++yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
+ {
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+ }
+
+-yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid,
+- const YCHAR * alias)
++yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid,
++ const YCHAR *alias)
+ {
+ return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+- uid, gid, NULL, alias, 0);
++ uid, gid, NULL, alias, 0);
+ }
+
+ /* yaffs_Link returns the object id of the equivalent object.*/
+-yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name,
+- yaffs_Object * equivalentObject)
++yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
++ yaffs_Object *equivalentObject)
+ {
+ /* Get the real object in case we were fed a hard link as an equivalent object */
+ equivalentObject = yaffs_GetEquivalentObject(equivalentObject);
+@@ -2363,33 +2396,31 @@ yaffs_Object *yaffs_Link(yaffs_Object *
+
+ }
+
+-static int yaffs_ChangeObjectName(yaffs_Object * obj, yaffs_Object * newDir,
+- const YCHAR * newName, int force, int shadows)
++static int yaffs_ChangeObjectName(yaffs_Object *obj, yaffs_Object *newDir,
++ const YCHAR *newName, int force, int shadows)
+ {
+ int unlinkOp;
+ int deleteOp;
+
+ yaffs_Object *existingTarget;
+
+- if (newDir == NULL) {
++ if (newDir == NULL)
+ newDir = obj->parent; /* use the old directory */
+- }
+
+ if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragendy: yaffs_ChangeObjectName: newDir is not a directory"
++ ("tragedy: yaffs_ChangeObjectName: newDir is not a directory"
+ TENDSTR)));
+ YBUG();
+ }
+
+ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+- if (obj->myDev->isYaffs2) {
++ if (obj->myDev->isYaffs2)
+ unlinkOp = (newDir == obj->myDev->unlinkedDir);
+- } else {
++ else
+ unlinkOp = (newDir == obj->myDev->unlinkedDir
+ && obj->variantType == YAFFS_OBJECT_TYPE_FILE);
+- }
+
+ deleteOp = (newDir == obj->myDev->deletedDir);
+
+@@ -2415,40 +2446,40 @@ static int yaffs_ChangeObjectName(yaffs_
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc purposes. */
+- if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows)>= 0)
++ if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows) >= 0)
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+ }
+
+-int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName,
+- yaffs_Object * newDir, const YCHAR * newName)
++int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
++ yaffs_Object *newDir, const YCHAR *newName)
+ {
+- yaffs_Object *obj;
+- yaffs_Object *existingTarget;
++ yaffs_Object *obj = NULL;
++ yaffs_Object *existingTarget = NULL;
+ int force = 0;
+
++
++ if (!oldDir || oldDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
++ if (!newDir || newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
++
+ #ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems (eg. WinCE).
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+- if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0) {
++ if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0)
+ force = 1;
+- }
+ #endif
+
++ else if (yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
++ /* ENAMETOOLONG */
++ return YAFFS_FAIL;
++
+ obj = yaffs_FindObjectByName(oldDir, oldName);
+- /* Check new name to long. */
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK &&
+- yaffs_strlen(newName) > YAFFS_MAX_ALIAS_LENGTH)
+- /* ENAMETOOLONG */
+- return YAFFS_FAIL;
+- else if (obj->variantType != YAFFS_OBJECT_TYPE_SYMLINK &&
+- yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
+- /* ENAMETOOLONG */
+- return YAFFS_FAIL;
+
+ if (obj && obj->renameAllowed) {
+
+@@ -2456,8 +2487,8 @@ int yaffs_RenameObject(yaffs_Object * ol
+
+ existingTarget = yaffs_FindObjectByName(newDir, newName);
+ if (existingTarget &&
+- existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+- !list_empty(&existingTarget->variant.directoryVariant.children)) {
++ existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !ylist_empty(&existingTarget->variant.directoryVariant.children)) {
+ /* There is a target that is a non-empty directory, so we fail */
+ return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */
+ } else if (existingTarget && existingTarget != obj) {
+@@ -2465,7 +2496,7 @@ int yaffs_RenameObject(yaffs_Object * ol
+ * but only if it isn't the same object
+ */
+ yaffs_ChangeObjectName(obj, newDir, newName, force,
+- existingTarget->objectId);
++ existingTarget->objectId);
+ yaffs_UnlinkObject(existingTarget);
+ }
+
+@@ -2476,7 +2507,7 @@ int yaffs_RenameObject(yaffs_Object * ol
+
+ /*------------------------- Block Management and Page Allocation ----------------*/
+
+-static int yaffs_InitialiseBlocks(yaffs_Device * dev)
++static int yaffs_InitialiseBlocks(yaffs_Device *dev)
+ {
+ int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+
+@@ -2487,23 +2518,20 @@ static int yaffs_InitialiseBlocks(yaffs_
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo));
+- if(!dev->blockInfo){
++ if (!dev->blockInfo) {
+ dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo));
+ dev->blockInfoAlt = 1;
+- }
+- else
++ } else
+ dev->blockInfoAlt = 0;
+
+- if(dev->blockInfo){
+-
++ if (dev->blockInfo) {
+ /* Set up dynamic blockinfo stuff. */
+ dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
+ dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
+- if(!dev->chunkBits){
++ if (!dev->chunkBits) {
+ dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
+ dev->chunkBitsAlt = 1;
+- }
+- else
++ } else
+ dev->chunkBitsAlt = 0;
+ }
+
+@@ -2514,30 +2542,29 @@ static int yaffs_InitialiseBlocks(yaffs_
+ }
+
+ return YAFFS_FAIL;
+-
+ }
+
+-static void yaffs_DeinitialiseBlocks(yaffs_Device * dev)
++static void yaffs_DeinitialiseBlocks(yaffs_Device *dev)
+ {
+- if(dev->blockInfoAlt && dev->blockInfo)
++ if (dev->blockInfoAlt && dev->blockInfo)
+ YFREE_ALT(dev->blockInfo);
+- else if(dev->blockInfo)
++ else if (dev->blockInfo)
+ YFREE(dev->blockInfo);
+
+ dev->blockInfoAlt = 0;
+
+ dev->blockInfo = NULL;
+
+- if(dev->chunkBitsAlt && dev->chunkBits)
++ if (dev->chunkBitsAlt && dev->chunkBits)
+ YFREE_ALT(dev->chunkBits);
+- else if(dev->chunkBits)
++ else if (dev->chunkBits)
+ YFREE(dev->chunkBits);
+ dev->chunkBitsAlt = 0;
+ dev->chunkBits = NULL;
+ }
+
+-static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device * dev,
+- yaffs_BlockInfo * bi)
++static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device *dev,
++ yaffs_BlockInfo *bi)
+ {
+ int i;
+ __u32 seq;
+@@ -2556,7 +2583,7 @@ static int yaffs_BlockNotDisqualifiedFro
+ seq = dev->sequenceNumber;
+
+ for (i = dev->internalStartBlock; i <= dev->internalEndBlock;
+- i++) {
++ i++) {
+ b = yaffs_GetBlockInfo(dev, i);
+ if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
+ (b->pagesInUse - b->softDeletions) <
+@@ -2571,38 +2598,36 @@ static int yaffs_BlockNotDisqualifiedFro
+ * discarded pages.
+ */
+ return (bi->sequenceNumber <= dev->oldestDirtySequence);
+-
+ }
+
+ /* FindDiretiestBlock is used to select the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+-static int yaffs_FindBlockForGarbageCollection(yaffs_Device * dev,
+- int aggressive)
++static int yaffs_FindBlockForGarbageCollection(yaffs_Device *dev,
++ int aggressive)
+ {
+-
+ int b = dev->currentDirtyChecker;
+
+ int i;
+ int iterations;
+ int dirtiest = -1;
+ int pagesInUse = 0;
+- int prioritised=0;
++ int prioritised = 0;
+ yaffs_BlockInfo *bi;
+ int pendingPrioritisedExist = 0;
+
+ /* First let's see if we need to grab a prioritised block */
+- if(dev->hasPendingPrioritisedGCs){
+- for(i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++){
++ if (dev->hasPendingPrioritisedGCs) {
++ for (i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++) {
+
+ bi = yaffs_GetBlockInfo(dev, i);
+- //yaffs_VerifyBlock(dev,bi,i);
++ /* yaffs_VerifyBlock(dev,bi,i); */
+
+- if(bi->gcPrioritise) {
++ if (bi->gcPrioritise) {
+ pendingPrioritisedExist = 1;
+- if(bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+- yaffs_BlockNotDisqualifiedFromGC(dev, bi)){
++ if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
++ yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+ pagesInUse = (bi->pagesInUse - bi->softDeletions);
+ dirtiest = i;
+ prioritised = 1;
+@@ -2611,7 +2636,7 @@ static int yaffs_FindBlockForGarbageColl
+ }
+ }
+
+- if(!pendingPrioritisedExist) /* None found, so we can clear this */
++ if (!pendingPrioritisedExist) /* None found, so we can clear this */
+ dev->hasPendingPrioritisedGCs = 0;
+ }
+
+@@ -2623,31 +2648,28 @@ static int yaffs_FindBlockForGarbageColl
+
+ dev->nonAggressiveSkip--;
+
+- if (!aggressive && (dev->nonAggressiveSkip > 0)) {
++ if (!aggressive && (dev->nonAggressiveSkip > 0))
+ return -1;
+- }
+
+- if(!prioritised)
++ if (!prioritised)
+ pagesInUse =
+- (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
++ (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
+
+- if (aggressive) {
++ if (aggressive)
+ iterations =
+ dev->internalEndBlock - dev->internalStartBlock + 1;
+- } else {
++ else {
+ iterations =
+ dev->internalEndBlock - dev->internalStartBlock + 1;
+ iterations = iterations / 16;
+- if (iterations > 200) {
++ if (iterations > 200)
+ iterations = 200;
+- }
+ }
+
+ for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) {
+ b++;
+- if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
++ if (b < dev->internalStartBlock || b > dev->internalEndBlock)
+ b = dev->internalStartBlock;
+- }
+
+ if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
+ T(YAFFS_TRACE_ERROR,
+@@ -2657,17 +2679,9 @@ static int yaffs_FindBlockForGarbageColl
+
+ bi = yaffs_GetBlockInfo(dev, b);
+
+-#if 0
+- if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
+- dirtiest = b;
+- pagesInUse = 0;
+- }
+- else
+-#endif
+-
+ if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+- (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
+- yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
++ (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
++ yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+ dirtiest = b;
+ pagesInUse = (bi->pagesInUse - bi->softDeletions);
+ }
+@@ -2678,19 +2692,18 @@ static int yaffs_FindBlockForGarbageColl
+ if (dirtiest > 0) {
+ T(YAFFS_TRACE_GC,
+ (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
+- dev->nChunksPerBlock - pagesInUse,prioritised));
++ dev->nChunksPerBlock - pagesInUse, prioritised));
+ }
+
+ dev->oldestDirtySequence = 0;
+
+- if (dirtiest > 0) {
++ if (dirtiest > 0)
+ dev->nonAggressiveSkip = 4;
+- }
+
+ return dirtiest;
+ }
+
+-static void yaffs_BlockBecameDirty(yaffs_Device * dev, int blockNo)
++static void yaffs_BlockBecameDirty(yaffs_Device *dev, int blockNo)
+ {
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo);
+
+@@ -2752,7 +2765,7 @@ static void yaffs_BlockBecameDirty(yaffs
+ }
+ }
+
+-static int yaffs_FindBlockForAllocation(yaffs_Device * dev)
++static int yaffs_FindBlockForAllocation(yaffs_Device *dev)
+ {
+ int i;
+
+@@ -2763,7 +2776,7 @@ static int yaffs_FindBlockForAllocation(
+ * Can't get space to gc
+ */
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs tragedy: no more eraased blocks" TENDSTR)));
++ (TSTR("yaffs tragedy: no more erased blocks" TENDSTR)));
+
+ return -1;
+ }
+@@ -2794,31 +2807,74 @@ static int yaffs_FindBlockForAllocation(
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("yaffs tragedy: no more eraased blocks, but there should have been %d"
++ ("yaffs tragedy: no more erased blocks, but there should have been %d"
+ TENDSTR), dev->nErasedBlocks));
+
+ return -1;
+ }
+
+
+-// Check if there's space to allocate...
+-// Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
+-static int yaffs_CheckSpaceForAllocation(yaffs_Device * dev)
++
++static int yaffs_CalcCheckpointBlocksRequired(yaffs_Device *dev)
++{
++ if (!dev->nCheckpointBlocksRequired &&
++ dev->isYaffs2) {
++ /* Not a valid value so recalculate */
++ int nBytes = 0;
++ int nBlocks;
++ int devBlocks = (dev->endBlock - dev->startBlock + 1);
++ int tnodeSize;
++
++ tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
++
++ nBytes += sizeof(yaffs_CheckpointValidity);
++ nBytes += sizeof(yaffs_CheckpointDevice);
++ nBytes += devBlocks * sizeof(yaffs_BlockInfo);
++ nBytes += devBlocks * dev->chunkBitmapStride;
++ nBytes += (sizeof(yaffs_CheckpointObject) + sizeof(__u32)) * (dev->nObjectsCreated - dev->nFreeObjects);
++ nBytes += (tnodeSize + sizeof(__u32)) * (dev->nTnodesCreated - dev->nFreeTnodes);
++ nBytes += sizeof(yaffs_CheckpointValidity);
++ nBytes += sizeof(__u32); /* checksum*/
++
++ /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
++
++ nBlocks = (nBytes/(dev->nDataBytesPerChunk * dev->nChunksPerBlock)) + 3;
++
++ dev->nCheckpointBlocksRequired = nBlocks;
++ }
++
++ return dev->nCheckpointBlocksRequired;
++}
++
++/*
++ * Check if there's space to allocate...
++ * Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
++ */
++static int yaffs_CheckSpaceForAllocation(yaffs_Device *dev)
+ {
+ int reservedChunks;
+ int reservedBlocks = dev->nReservedBlocks;
+ int checkpointBlocks;
+
+- checkpointBlocks = dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint;
+- if(checkpointBlocks < 0)
++ if (dev->isYaffs2) {
++ checkpointBlocks = yaffs_CalcCheckpointBlocksRequired(dev) -
++ dev->blocksInCheckpoint;
++ if (checkpointBlocks < 0)
++ checkpointBlocks = 0;
++ } else {
+ checkpointBlocks = 0;
++ }
+
+ reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock);
+
+ return (dev->nFreeChunks > reservedChunks);
+ }
+
+-static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr)
++static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
++ yaffs_BlockInfo **blockUsedPtr)
+ {
+ int retVal;
+ yaffs_BlockInfo *bi;
+@@ -2835,7 +2891,7 @@ static int yaffs_AllocateChunk(yaffs_Dev
+ }
+
+ if (dev->nErasedBlocks < dev->nReservedBlocks
+- && dev->allocationPage == 0) {
++ && dev->allocationPage == 0) {
+ T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
+ }
+
+@@ -2844,10 +2900,10 @@ static int yaffs_AllocateChunk(yaffs_Dev
+ bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
+
+ retVal = (dev->allocationBlock * dev->nChunksPerBlock) +
+- dev->allocationPage;
++ dev->allocationPage;
+ bi->pagesInUse++;
+ yaffs_SetChunkBit(dev, dev->allocationBlock,
+- dev->allocationPage);
++ dev->allocationPage);
+
+ dev->allocationPage++;
+
+@@ -2859,43 +2915,43 @@ static int yaffs_AllocateChunk(yaffs_Dev
+ dev->allocationBlock = -1;
+ }
+
+- if(blockUsedPtr)
++ if (blockUsedPtr)
+ *blockUsedPtr = bi;
+
+ return retVal;
+ }
+
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
++ (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
+
+ return -1;
+ }
+
+-static int yaffs_GetErasedChunks(yaffs_Device * dev)
++static int yaffs_GetErasedChunks(yaffs_Device *dev)
+ {
+ int n;
+
+ n = dev->nErasedBlocks * dev->nChunksPerBlock;
+
+- if (dev->allocationBlock > 0) {
++ if (dev->allocationBlock > 0)
+ n += (dev->nChunksPerBlock - dev->allocationPage);
+- }
+
+ return n;
+
+ }
+
+-static int yaffs_GarbageCollectBlock(yaffs_Device * dev, int block)
++static int yaffs_GarbageCollectBlock(yaffs_Device *dev, int block,
++ int wholeBlock)
+ {
+ int oldChunk;
+ int newChunk;
+- int chunkInBlock;
+ int markNAND;
+ int retVal = YAFFS_OK;
+ int cleanups = 0;
+ int i;
+ int isCheckpointBlock;
+ int matchingChunk;
++ int maxCopies;
+
+ int chunksBefore = yaffs_GetErasedChunks(dev);
+ int chunksAfter;
+@@ -2911,8 +2967,11 @@ static int yaffs_GarbageCollectBlock(yaf
+ bi->blockState = YAFFS_BLOCK_STATE_COLLECTING;
+
+ T(YAFFS_TRACE_TRACING,
+- (TSTR("Collecting block %d, in use %d, shrink %d, " TENDSTR), block,
+- bi->pagesInUse, bi->hasShrinkHeader));
++ (TSTR("Collecting block %d, in use %d, shrink %d, wholeBlock %d" TENDSTR),
++ block,
++ bi->pagesInUse,
++ bi->hasShrinkHeader,
++ wholeBlock));
+
+ /*yaffs_VerifyFreeChunks(dev); */
+
+@@ -2926,26 +2985,33 @@ static int yaffs_GarbageCollectBlock(yaf
+ dev->isDoingGC = 1;
+
+ if (isCheckpointBlock ||
+- !yaffs_StillSomeChunkBits(dev, block)) {
++ !yaffs_StillSomeChunkBits(dev, block)) {
+ T(YAFFS_TRACE_TRACING,
+- (TSTR
+- ("Collecting block %d that has no chunks in use" TENDSTR),
+- block));
++ (TSTR
++ ("Collecting block %d that has no chunks in use" TENDSTR),
++ block));
+ yaffs_BlockBecameDirty(dev, block);
+ } else {
+
+ __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
+
+- yaffs_VerifyBlock(dev,bi,block);
++ yaffs_VerifyBlock(dev, bi, block);
+
+- for (chunkInBlock = 0, oldChunk = block * dev->nChunksPerBlock;
+- chunkInBlock < dev->nChunksPerBlock
+- && yaffs_StillSomeChunkBits(dev, block);
+- chunkInBlock++, oldChunk++) {
+- if (yaffs_CheckChunkBit(dev, block, chunkInBlock)) {
++ maxCopies = (wholeBlock) ? dev->nChunksPerBlock : 10;
++ oldChunk = block * dev->nChunksPerBlock + dev->gcChunk;
++
++ for (/* init already done */;
++ retVal == YAFFS_OK &&
++ dev->gcChunk < dev->nChunksPerBlock &&
++ (bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) &&
++ maxCopies > 0;
++ dev->gcChunk++, oldChunk++) {
++ if (yaffs_CheckChunkBit(dev, block, dev->gcChunk)) {
+
+ /* This page is in use and might need to be copied off */
+
++ maxCopies--;
++
+ markNAND = 1;
+
+ yaffs_InitialiseTags(&tags);
+@@ -2959,22 +3025,22 @@ static int yaffs_GarbageCollectBlock(yaf
+
+ T(YAFFS_TRACE_GC_DETAIL,
+ (TSTR
+- ("Collecting page %d, %d %d %d " TENDSTR),
+- chunkInBlock, tags.objectId, tags.chunkId,
++ ("Collecting chunk in block %d, %d %d %d " TENDSTR),
++ dev->gcChunk, tags.objectId, tags.chunkId,
+ tags.byteCount));
+
+- if(object && !yaffs_SkipVerification(dev)){
+- if(tags.chunkId == 0)
+- matchingChunk = object->chunkId;
+- else if(object->softDeleted)
++ if (object && !yaffs_SkipVerification(dev)) {
++ if (tags.chunkId == 0)
++ matchingChunk = object->hdrChunk;
++ else if (object->softDeleted)
+ matchingChunk = oldChunk; /* Defeat the test */
+ else
+- matchingChunk = yaffs_FindChunkInFile(object,tags.chunkId,NULL);
++ matchingChunk = yaffs_FindChunkInFile(object, tags.chunkId, NULL);
+
+- if(oldChunk != matchingChunk)
++ if (oldChunk != matchingChunk)
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
+- oldChunk,matchingChunk,tags.objectId, tags.chunkId));
++ oldChunk, matchingChunk, tags.objectId, tags.chunkId));
+
+ }
+
+@@ -2986,9 +3052,11 @@ static int yaffs_GarbageCollectBlock(yaf
+ tags.objectId, tags.chunkId, tags.byteCount));
+ }
+
+- if (object && object->deleted
+- && tags.chunkId != 0) {
+- /* Data chunk in a deleted file, throw it away
++ if (object &&
++ object->deleted &&
++ object->softDeleted &&
++ tags.chunkId != 0) {
++ /* Data chunk in a soft deleted file, throw it away
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget about it and
+ * fix up the object.
+@@ -3003,13 +3071,12 @@ static int yaffs_GarbageCollectBlock(yaf
+ cleanups++;
+ }
+ markNAND = 0;
+- } else if (0
+- /* Todo object && object->deleted && object->nDataChunks == 0 */
+- ) {
++ } else if (0) {
++ /* Todo object && object->deleted && object->nDataChunks == 0 */
+ /* Deleted object header with no data chunks.
+ * Can be discarded and the file deleted.
+ */
+- object->chunkId = 0;
++ object->hdrChunk = 0;
+ yaffs_FreeTnode(object->myDev,
+ object->variant.
+ fileVariant.top);
+@@ -3031,17 +3098,14 @@ static int yaffs_GarbageCollectBlock(yaf
+ * We need to nuke the shrinkheader flags first
+ * We no longer want the shrinkHeader flag since its work is done
+ * and if it is left in place it will mess up scanning.
+- * Also, clear out any shadowing stuff
+ */
+
+ yaffs_ObjectHeader *oh;
+ oh = (yaffs_ObjectHeader *)buffer;
+ oh->isShrink = 0;
+- oh->shadowsObject = -1;
+- tags.extraShadows = 0;
+ tags.extraIsShrinkHeader = 0;
+
+- yaffs_VerifyObjectHeader(object,oh,&tags,1);
++ yaffs_VerifyObjectHeader(object, oh, &tags, 1);
+ }
+
+ newChunk =
+@@ -3055,7 +3119,7 @@ static int yaffs_GarbageCollectBlock(yaf
+
+ if (tags.chunkId == 0) {
+ /* It's a header */
+- object->chunkId = newChunk;
++ object->hdrChunk = newChunk;
+ object->serial = tags.serialNumber;
+ } else {
+ /* It's a data chunk */
+@@ -3067,7 +3131,8 @@ static int yaffs_GarbageCollectBlock(yaf
+ }
+ }
+
+- yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
++ if (retVal == YAFFS_OK)
++ yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
+
+ }
+ }
+@@ -3098,18 +3163,25 @@ static int yaffs_GarbageCollectBlock(yaf
+
+ }
+
+- yaffs_VerifyCollectedBlock(dev,bi,block);
++ yaffs_VerifyCollectedBlock(dev, bi, block);
+
+- if (chunksBefore >= (chunksAfter = yaffs_GetErasedChunks(dev))) {
++ chunksAfter = yaffs_GetErasedChunks(dev);
++ if (chunksBefore >= chunksAfter) {
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("gc did not increase free chunks before %d after %d"
+ TENDSTR), chunksBefore, chunksAfter));
+ }
+
++ /* If the gc completed then clear the current gcBlock so that we find another. */
++ if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING) {
++ dev->gcBlock = -1;
++ dev->gcChunk = 0;
++ }
++
+ dev->isDoingGC = 0;
+
+- return YAFFS_OK;
++ return retVal;
+ }
+
+ /* New garbage collector
+@@ -3121,7 +3193,7 @@ static int yaffs_GarbageCollectBlock(yaf
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+-static int yaffs_CheckGarbageCollection(yaffs_Device * dev)
++static int yaffs_CheckGarbageCollection(yaffs_Device *dev)
+ {
+ int block;
+ int aggressive;
+@@ -3142,8 +3214,8 @@ static int yaffs_CheckGarbageCollection(
+ do {
+ maxTries++;
+
+- checkpointBlockAdjust = (dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint);
+- if(checkpointBlockAdjust < 0)
++ checkpointBlockAdjust = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
++ if (checkpointBlockAdjust < 0)
+ checkpointBlockAdjust = 0;
+
+ if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) {
+@@ -3154,20 +3226,24 @@ static int yaffs_CheckGarbageCollection(
+ aggressive = 0;
+ }
+
+- block = yaffs_FindBlockForGarbageCollection(dev, aggressive);
++ if (dev->gcBlock <= 0) {
++ dev->gcBlock = yaffs_FindBlockForGarbageCollection(dev, aggressive);
++ dev->gcChunk = 0;
++ }
++
++ block = dev->gcBlock;
+
+ if (block > 0) {
+ dev->garbageCollections++;
+- if (!aggressive) {
++ if (!aggressive)
+ dev->passiveGarbageCollections++;
+- }
+
+ T(YAFFS_TRACE_GC,
+ (TSTR
+ ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
+ dev->nErasedBlocks, aggressive));
+
+- gcOk = yaffs_GarbageCollectBlock(dev, block);
++ gcOk = yaffs_GarbageCollectBlock(dev, block, aggressive);
+ }
+
+ if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) {
+@@ -3176,15 +3252,16 @@ static int yaffs_CheckGarbageCollection(
+ ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
+ TENDSTR), dev->nErasedBlocks, maxTries, block));
+ }
+- } while ((dev->nErasedBlocks < dev->nReservedBlocks) && (block > 0)
+- && (maxTries < 2));
++ } while ((dev->nErasedBlocks < dev->nReservedBlocks) &&
++ (block > 0) &&
++ (maxTries < 2));
+
+ return aggressive ? gcOk : YAFFS_OK;
+ }
+
+ /*------------------------- TAGS --------------------------------*/
+
+-static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId,
++static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
+ int chunkInObject)
+ {
+ return (tags->chunkId == chunkInObject &&
+@@ -3195,8 +3272,8 @@ static int yaffs_TagsMatch(const yaffs_E
+
+ /*-------------------- Data file manipulation -----------------*/
+
+-static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode,
+- yaffs_ExtendedTags * tags)
++static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
++ yaffs_ExtendedTags *tags)
+ {
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ yaffs_Tnode *tn;
+@@ -3214,7 +3291,7 @@ static int yaffs_FindChunkInFile(yaffs_O
+ tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
+
+ if (tn) {
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+
+ retVal =
+ yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+@@ -3223,8 +3300,8 @@ static int yaffs_FindChunkInFile(yaffs_O
+ return retVal;
+ }
+
+-static int yaffs_FindAndDeleteChunkInFile(yaffs_Object * in, int chunkInInode,
+- yaffs_ExtendedTags * tags)
++static int yaffs_FindAndDeleteChunkInFile(yaffs_Object *in, int chunkInInode,
++ yaffs_ExtendedTags *tags)
+ {
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ yaffs_Tnode *tn;
+@@ -3243,29 +3320,23 @@ static int yaffs_FindAndDeleteChunkInFil
+
+ if (tn) {
+
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+
+ retVal =
+ yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+ chunkInInode);
+
+ /* Delete the entry in the filestructure (if found) */
+- if (retVal != -1) {
+- yaffs_PutLevel0Tnode(dev,tn,chunkInInode,0);
+- }
+- } else {
+- /*T(("No level 0 found for %d\n", chunkInInode)); */
++ if (retVal != -1)
++ yaffs_PutLevel0Tnode(dev, tn, chunkInInode, 0);
+ }
+
+- if (retVal == -1) {
+- /* T(("Could not find %d to delete\n",chunkInInode)); */
+- }
+ return retVal;
+ }
+
+ #ifdef YAFFS_PARANOID
+
+-static int yaffs_CheckFileSanity(yaffs_Object * in)
++static int yaffs_CheckFileSanity(yaffs_Object *in)
+ {
+ int chunk;
+ int nChunks;
+@@ -3278,10 +3349,8 @@ static int yaffs_CheckFileSanity(yaffs_O
+ int theChunk;
+ int chunkDeleted;
+
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+- /* T(("Object not a file\n")); */
++ if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+- }
+
+ objId = in->objectId;
+ fSize = in->variant.fileVariant.fileSize;
+@@ -3294,7 +3363,7 @@ static int yaffs_CheckFileSanity(yaffs_O
+
+ if (tn) {
+
+- theChunk = yaffs_GetChunkGroupBase(dev,tn,chunk);
++ theChunk = yaffs_GetChunkGroupBase(dev, tn, chunk);
+
+ if (yaffs_CheckChunkBits
+ (dev, theChunk / dev->nChunksPerBlock,
+@@ -3323,7 +3392,7 @@ static int yaffs_CheckFileSanity(yaffs_O
+
+ #endif
+
+-static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode,
++static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
+ int chunkInNAND, int inScan)
+ {
+ /* NB inScan is zero unless scanning.
+@@ -3358,11 +3427,10 @@ static int yaffs_PutChunkIntoFile(yaffs_
+ &in->variant.fileVariant,
+ chunkInInode,
+ NULL);
+- if (!tn) {
++ if (!tn)
+ return YAFFS_FAIL;
+- }
+
+- existingChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++ existingChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+
+ if (inScan != 0) {
+ /* If we're scanning then we need to test for duplicates
+@@ -3374,7 +3442,7 @@ static int yaffs_PutChunkIntoFile(yaffs_
+ * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+ */
+
+- if (existingChunk != 0) {
++ if (existingChunk > 0) {
+ /* NB Right now existing chunk will not be real chunkId if the device >= 32MB
+ * thus we have to do a FindChunkInFile to get the real chunk id.
+ *
+@@ -3411,8 +3479,10 @@ static int yaffs_PutChunkIntoFile(yaffs_
+ * not be loaded during a scan
+ */
+
+- newSerial = newTags.serialNumber;
+- existingSerial = existingTags.serialNumber;
++ if (inScan > 0) {
++ newSerial = newTags.serialNumber;
++ existingSerial = existingTags.serialNumber;
++ }
+
+ if ((inScan > 0) &&
+ (in->myDev->isYaffs2 ||
+@@ -3437,24 +3507,23 @@ static int yaffs_PutChunkIntoFile(yaffs_
+
+ }
+
+- if (existingChunk == 0) {
++ if (existingChunk == 0)
+ in->nDataChunks++;
+- }
+
+- yaffs_PutLevel0Tnode(dev,tn,chunkInInode,chunkInNAND);
++ yaffs_PutLevel0Tnode(dev, tn, chunkInInode, chunkInNAND);
+
+ return YAFFS_OK;
+ }
+
+-static int yaffs_ReadChunkDataFromObject(yaffs_Object * in, int chunkInInode,
+- __u8 * buffer)
++static int yaffs_ReadChunkDataFromObject(yaffs_Object *in, int chunkInInode,
++ __u8 *buffer)
+ {
+ int chunkInNAND = yaffs_FindChunkInFile(in, chunkInInode, NULL);
+
+- if (chunkInNAND >= 0) {
++ if (chunkInNAND >= 0)
+ return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND,
+- buffer,NULL);
+- } else {
++ buffer, NULL);
++ else {
+ T(YAFFS_TRACE_NANDACCESS,
+ (TSTR("Chunk %d not found zero instead" TENDSTR),
+ chunkInNAND));
+@@ -3465,7 +3534,7 @@ static int yaffs_ReadChunkDataFromObject
+
+ }
+
+-void yaffs_DeleteChunk(yaffs_Device * dev, int chunkId, int markNAND, int lyn)
++void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn)
+ {
+ int block;
+ int page;
+@@ -3475,16 +3544,15 @@ void yaffs_DeleteChunk(yaffs_Device * de
+ if (chunkId <= 0)
+ return;
+
+-
+ dev->nDeletions++;
+ block = chunkId / dev->nChunksPerBlock;
+ page = chunkId % dev->nChunksPerBlock;
+
+
+- if(!yaffs_CheckChunkBit(dev,block,page))
++ if (!yaffs_CheckChunkBit(dev, block, page))
+ T(YAFFS_TRACE_VERIFY,
+- (TSTR("Deleting invalid chunk %d"TENDSTR),
+- chunkId));
++ (TSTR("Deleting invalid chunk %d"TENDSTR),
++ chunkId));
+
+ bi = yaffs_GetBlockInfo(dev, block);
+
+@@ -3524,14 +3592,12 @@ void yaffs_DeleteChunk(yaffs_Device * de
+ yaffs_BlockBecameDirty(dev, block);
+ }
+
+- } else {
+- /* T(("Bad news deleting chunk %d\n",chunkId)); */
+ }
+
+ }
+
+-static int yaffs_WriteChunkDataToObject(yaffs_Object * in, int chunkInInode,
+- const __u8 * buffer, int nBytes,
++static int yaffs_WriteChunkDataToObject(yaffs_Object *in, int chunkInInode,
++ const __u8 *buffer, int nBytes,
+ int useReserve)
+ {
+ /* Find old chunk Need to do this to get serial number
+@@ -3561,6 +3627,12 @@ static int yaffs_WriteChunkDataToObject(
+ (prevChunkId >= 0) ? prevTags.serialNumber + 1 : 1;
+ newTags.byteCount = nBytes;
+
++ if (nBytes < 1 || nBytes > dev->totalBytesPerChunk) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), nBytes));
++ YBUG();
++ }
++
+ newChunkId =
+ yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
+ useReserve);
+@@ -3568,11 +3640,9 @@ static int yaffs_WriteChunkDataToObject(
+ if (newChunkId >= 0) {
+ yaffs_PutChunkIntoFile(in, chunkInInode, newChunkId, 0);
+
+- if (prevChunkId >= 0) {
++ if (prevChunkId >= 0)
+ yaffs_DeleteChunk(dev, prevChunkId, 1, __LINE__);
+
+- }
+-
+ yaffs_CheckFileSanity(in);
+ }
+ return newChunkId;
+@@ -3582,7 +3652,7 @@ static int yaffs_WriteChunkDataToObject(
+ /* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+-int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name, int force,
++int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name, int force,
+ int isShrink, int shadows)
+ {
+
+@@ -3603,9 +3673,12 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+
+ yaffs_ObjectHeader *oh = NULL;
+
+- yaffs_strcpy(oldName,"silly old name");
++ yaffs_strcpy(oldName, _Y("silly old name"));
+
+- if (!in->fake || force) {
++
++ if (!in->fake ||
++ in == dev->rootDir || /* The rootDir should also be saved */
++ force) {
+
+ yaffs_CheckGarbageCollection(dev);
+ yaffs_CheckObjectDetailsLoaded(in);
+@@ -3613,13 +3686,13 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+ buffer = yaffs_GetTempBuffer(in->myDev, __LINE__);
+ oh = (yaffs_ObjectHeader *) buffer;
+
+- prevChunkId = in->chunkId;
++ prevChunkId = in->hdrChunk;
+
+- if (prevChunkId >= 0) {
++ if (prevChunkId > 0) {
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
+ buffer, &oldTags);
+
+- yaffs_VerifyObjectHeader(in,oh,&oldTags,0);
++ yaffs_VerifyObjectHeader(in, oh, &oldTags, 0);
+
+ memcpy(oldName, oh->name, sizeof(oh->name));
+ }
+@@ -3628,7 +3701,7 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+
+ oh->type = in->variantType;
+ oh->yst_mode = in->yst_mode;
+- oh->shadowsObject = shadows;
++ oh->shadowsObject = oh->inbandShadowsObject = shadows;
+
+ #ifdef CONFIG_YAFFS_WINCE
+ oh->win_atime[0] = in->win_atime[0];
+@@ -3645,20 +3718,18 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+ oh->yst_ctime = in->yst_ctime;
+ oh->yst_rdev = in->yst_rdev;
+ #endif
+- if (in->parent) {
++ if (in->parent)
+ oh->parentObjectId = in->parent->objectId;
+- } else {
++ else
+ oh->parentObjectId = 0;
+- }
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_strncpy(oh->name, name, YAFFS_MAX_NAME_LENGTH);
+- } else if (prevChunkId>=0) {
++ } else if (prevChunkId >= 0)
+ memcpy(oh->name, oldName, sizeof(oh->name));
+- } else {
++ else
+ memset(oh->name, 0, sizeof(oh->name));
+- }
+
+ oh->isShrink = isShrink;
+
+@@ -3708,7 +3779,7 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+ newTags.extraShadows = (oh->shadowsObject > 0) ? 1 : 0;
+ newTags.extraObjectType = in->variantType;
+
+- yaffs_VerifyObjectHeader(in,oh,&newTags,1);
++ yaffs_VerifyObjectHeader(in, oh, &newTags, 1);
+
+ /* Create new chunk in NAND */
+ newChunkId =
+@@ -3717,20 +3788,20 @@ int yaffs_UpdateObjectHeader(yaffs_Objec
+
+ if (newChunkId >= 0) {
+
+- in->chunkId = newChunkId;
++ in->hdrChunk = newChunkId;
+
+ if (prevChunkId >= 0) {
+ yaffs_DeleteChunk(dev, prevChunkId, 1,
+ __LINE__);
+ }
+
+- if(!yaffs_ObjectHasCachedWriteData(in))
++ if (!yaffs_ObjectHasCachedWriteData(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block that the chunk lives on */
+ if (isShrink) {
+ bi = yaffs_GetBlockInfo(in->myDev,
+- newChunkId /in->myDev-> nChunksPerBlock);
++ newChunkId / in->myDev->nChunksPerBlock);
+ bi->hasShrinkHeader = 1;
+ }
+
+@@ -3766,7 +3837,7 @@ static int yaffs_ObjectHasCachedWriteDat
+ yaffs_ChunkCache *cache;
+ int nCaches = obj->myDev->nShortOpCaches;
+
+- for(i = 0; i < nCaches; i++){
++ for (i = 0; i < nCaches; i++) {
+ cache = &dev->srCache[i];
+ if (cache->object == obj &&
+ cache->dirty)
+@@ -3777,7 +3848,7 @@ static int yaffs_ObjectHasCachedWriteDat
+ }
+
+
+-static void yaffs_FlushFilesChunkCache(yaffs_Object * obj)
++static void yaffs_FlushFilesChunkCache(yaffs_Object *obj)
+ {
+ yaffs_Device *dev = obj->myDev;
+ int lowest = -99; /* Stop compiler whining. */
+@@ -3844,16 +3915,16 @@ void yaffs_FlushEntireDeviceCache(yaffs_
+ */
+ do {
+ obj = NULL;
+- for( i = 0; i < nCaches && !obj; i++) {
++ for (i = 0; i < nCaches && !obj; i++) {
+ if (dev->srCache[i].object &&
+ dev->srCache[i].dirty)
+ obj = dev->srCache[i].object;
+
+ }
+- if(obj)
++ if (obj)
+ yaffs_FlushFilesChunkCache(obj);
+
+- } while(obj);
++ } while (obj);
+
+ }
+
+@@ -3863,41 +3934,21 @@ void yaffs_FlushEntireDeviceCache(yaffs_
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+-static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device * dev)
++static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device *dev)
+ {
+ int i;
+- int usage;
+- int theOne;
+
+ if (dev->nShortOpCaches > 0) {
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (!dev->srCache[i].object)
+ return &dev->srCache[i];
+ }
++ }
+
+- return NULL;
++ return NULL;
++}
+
+- theOne = -1;
+- usage = 0; /* just to stop the compiler grizzling */
+-
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (!dev->srCache[i].dirty &&
+- ((dev->srCache[i].lastUse < usage && theOne >= 0) ||
+- theOne < 0)) {
+- usage = dev->srCache[i].lastUse;
+- theOne = i;
+- }
+- }
+-
+-
+- return theOne >= 0 ? &dev->srCache[theOne] : NULL;
+- } else {
+- return NULL;
+- }
+-
+-}
+-
+-static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device * dev)
++static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device *dev)
+ {
+ yaffs_ChunkCache *cache;
+ yaffs_Object *theObj;
+@@ -3927,8 +3978,7 @@ static yaffs_ChunkCache *yaffs_GrabChunk
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+ if (dev->srCache[i].object &&
+ !dev->srCache[i].locked &&
+- (dev->srCache[i].lastUse < usage || !cache))
+- {
++ (dev->srCache[i].lastUse < usage || !cache)) {
+ usage = dev->srCache[i].lastUse;
+ theObj = dev->srCache[i].object;
+ cache = &dev->srCache[i];
+@@ -3950,7 +4000,7 @@ static yaffs_ChunkCache *yaffs_GrabChunk
+ }
+
+ /* Find a cached chunk */
+-static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object * obj,
++static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object *obj,
+ int chunkId)
+ {
+ yaffs_Device *dev = obj->myDev;
+@@ -3969,7 +4019,7 @@ static yaffs_ChunkCache *yaffs_FindChunk
+ }
+
+ /* Mark the chunk for the least recently used algorithym */
+-static void yaffs_UseChunkCache(yaffs_Device * dev, yaffs_ChunkCache * cache,
++static void yaffs_UseChunkCache(yaffs_Device *dev, yaffs_ChunkCache *cache,
+ int isAWrite)
+ {
+
+@@ -3977,9 +4027,9 @@ static void yaffs_UseChunkCache(yaffs_De
+ if (dev->srLastUse < 0 || dev->srLastUse > 100000000) {
+ /* Reset the cache usages */
+ int i;
+- for (i = 1; i < dev->nShortOpCaches; i++) {
++ for (i = 1; i < dev->nShortOpCaches; i++)
+ dev->srCache[i].lastUse = 0;
+- }
++
+ dev->srLastUse = 0;
+ }
+
+@@ -3987,9 +4037,8 @@ static void yaffs_UseChunkCache(yaffs_De
+
+ cache->lastUse = dev->srLastUse;
+
+- if (isAWrite) {
++ if (isAWrite)
+ cache->dirty = 1;
+- }
+ }
+ }
+
+@@ -3997,21 +4046,20 @@ static void yaffs_UseChunkCache(yaffs_De
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+-static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId)
++static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId)
+ {
+ if (object->myDev->nShortOpCaches > 0) {
+ yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId);
+
+- if (cache) {
++ if (cache)
+ cache->object = NULL;
+- }
+ }
+ }
+
+ /* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+-static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in)
++static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in)
+ {
+ int i;
+ yaffs_Device *dev = in->myDev;
+@@ -4019,9 +4067,8 @@ static void yaffs_InvalidateWholeChunkCa
+ if (dev->nShortOpCaches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].object == in) {
++ if (dev->srCache[i].object == in)
+ dev->srCache[i].object = NULL;
+- }
+ }
+ }
+ }
+@@ -4029,18 +4076,18 @@ static void yaffs_InvalidateWholeChunkCa
+ /*--------------------- Checkpointing --------------------*/
+
+
+-static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev,int head)
++static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev, int head)
+ {
+ yaffs_CheckpointValidity cp;
+
+- memset(&cp,0,sizeof(cp));
++ memset(&cp, 0, sizeof(cp));
+
+ cp.structType = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+- return (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp))?
++ return (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)) ?
+ 1 : 0;
+ }
+
+@@ -4049,9 +4096,9 @@ static int yaffs_ReadCheckpointValidityM
+ yaffs_CheckpointValidity cp;
+ int ok;
+
+- ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
++ ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+- if(ok)
++ if (ok)
+ ok = (cp.structType == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+@@ -4100,21 +4147,21 @@ static int yaffs_WriteCheckpointDevice(y
+ int ok;
+
+ /* Write device runtime values*/
+- yaffs_DeviceToCheckpointDevice(&cp,dev);
++ yaffs_DeviceToCheckpointDevice(&cp, dev);
+ cp.structType = sizeof(cp);
+
+- ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++ ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ /* Write block info */
+- if(ok) {
++ if (ok) {
+ nBytes = nBlocks * sizeof(yaffs_BlockInfo);
+- ok = (yaffs_CheckpointWrite(dev,dev->blockInfo,nBytes) == nBytes);
++ ok = (yaffs_CheckpointWrite(dev, dev->blockInfo, nBytes) == nBytes);
+ }
+
+ /* Write chunk bits */
+- if(ok) {
++ if (ok) {
+ nBytes = nBlocks * dev->chunkBitmapStride;
+- ok = (yaffs_CheckpointWrite(dev,dev->chunkBits,nBytes) == nBytes);
++ ok = (yaffs_CheckpointWrite(dev, dev->chunkBits, nBytes) == nBytes);
+ }
+ return ok ? 1 : 0;
+
+@@ -4128,25 +4175,25 @@ static int yaffs_ReadCheckpointDevice(ya
+
+ int ok;
+
+- ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
+- if(!ok)
++ ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (!ok)
+ return 0;
+
+- if(cp.structType != sizeof(cp))
++ if (cp.structType != sizeof(cp))
+ return 0;
+
+
+- yaffs_CheckpointDeviceToDevice(dev,&cp);
++ yaffs_CheckpointDeviceToDevice(dev, &cp);
+
+ nBytes = nBlocks * sizeof(yaffs_BlockInfo);
+
+- ok = (yaffs_CheckpointRead(dev,dev->blockInfo,nBytes) == nBytes);
++ ok = (yaffs_CheckpointRead(dev, dev->blockInfo, nBytes) == nBytes);
+
+- if(!ok)
++ if (!ok)
+ return 0;
+ nBytes = nBlocks * dev->chunkBitmapStride;
+
+- ok = (yaffs_CheckpointRead(dev,dev->chunkBits,nBytes) == nBytes);
++ ok = (yaffs_CheckpointRead(dev, dev->chunkBits, nBytes) == nBytes);
+
+ return ok ? 1 : 0;
+ }
+@@ -4157,7 +4204,7 @@ static void yaffs_ObjectToCheckpointObje
+
+ cp->objectId = obj->objectId;
+ cp->parentId = (obj->parent) ? obj->parent->objectId : 0;
+- cp->chunkId = obj->chunkId;
++ cp->hdrChunk = obj->hdrChunk;
+ cp->variantType = obj->variantType;
+ cp->deleted = obj->deleted;
+ cp->softDeleted = obj->softDeleted;
+@@ -4168,20 +4215,28 @@ static void yaffs_ObjectToCheckpointObje
+ cp->serial = obj->serial;
+ cp->nDataChunks = obj->nDataChunks;
+
+- if(obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ cp->fileSizeOrEquivalentObjectId = obj->variant.fileVariant.fileSize;
+- else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
++ else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->fileSizeOrEquivalentObjectId = obj->variant.hardLinkVariant.equivalentObjectId;
+ }
+
+-static void yaffs_CheckpointObjectToObject( yaffs_Object *obj,yaffs_CheckpointObject *cp)
++static int yaffs_CheckpointObjectToObject(yaffs_Object *obj, yaffs_CheckpointObject *cp)
+ {
+
+ yaffs_Object *parent;
+
++ if (obj->variantType != cp->variantType) {
++ T(YAFFS_TRACE_ERROR, (TSTR("Checkpoint read object %d type %d "
++ TCONT("chunk %d does not match existing object type %d")
++ TENDSTR), cp->objectId, cp->variantType, cp->hdrChunk,
++ obj->variantType));
++ return 0;
++ }
++
+ obj->objectId = cp->objectId;
+
+- if(cp->parentId)
++ if (cp->parentId)
+ parent = yaffs_FindOrCreateObjectByNumber(
+ obj->myDev,
+ cp->parentId,
+@@ -4189,10 +4244,19 @@ static void yaffs_CheckpointObjectToObje
+ else
+ parent = NULL;
+
+- if(parent)
++ if (parent) {
++ if (parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Checkpoint read object %d parent %d type %d"
++ TCONT(" chunk %d Parent type, %d, not directory")
++ TENDSTR),
++ cp->objectId, cp->parentId, cp->variantType,
++ cp->hdrChunk, parent->variantType));
++ return 0;
++ }
+ yaffs_AddObjectToDirectory(parent, obj);
++ }
+
+- obj->chunkId = cp->chunkId;
++ obj->hdrChunk = cp->hdrChunk;
+ obj->variantType = cp->variantType;
+ obj->deleted = cp->deleted;
+ obj->softDeleted = cp->softDeleted;
+@@ -4203,29 +4267,34 @@ static void yaffs_CheckpointObjectToObje
+ obj->serial = cp->serial;
+ obj->nDataChunks = cp->nDataChunks;
+
+- if(obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.fileVariant.fileSize = cp->fileSizeOrEquivalentObjectId;
+- else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
++ else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardLinkVariant.equivalentObjectId = cp->fileSizeOrEquivalentObjectId;
+
+- if(obj->objectId >= YAFFS_NOBJECT_BUCKETS)
++ if (obj->hdrChunk > 0)
+ obj->lazyLoaded = 1;
++ return 1;
+ }
+
+
+
+-static int yaffs_CheckpointTnodeWorker(yaffs_Object * in, yaffs_Tnode * tn,
+- __u32 level, int chunkOffset)
++static int yaffs_CheckpointTnodeWorker(yaffs_Object *in, yaffs_Tnode *tn,
++ __u32 level, int chunkOffset)
+ {
+ int i;
+ yaffs_Device *dev = in->myDev;
+ int ok = 1;
+- int nTnodeBytes = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++ int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
++
+
+ if (tn) {
+ if (level > 0) {
+
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++){
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (tn->internal[i]) {
+ ok = yaffs_CheckpointTnodeWorker(in,
+ tn->internal[i],
+@@ -4235,10 +4304,9 @@ static int yaffs_CheckpointTnodeWorker(y
+ }
+ } else if (level == 0) {
+ __u32 baseOffset = chunkOffset << YAFFS_TNODES_LEVEL0_BITS;
+- /* printf("write tnode at %d\n",baseOffset); */
+- ok = (yaffs_CheckpointWrite(dev,&baseOffset,sizeof(baseOffset)) == sizeof(baseOffset));
+- if(ok)
+- ok = (yaffs_CheckpointWrite(dev,tn,nTnodeBytes) == nTnodeBytes);
++ ok = (yaffs_CheckpointWrite(dev, &baseOffset, sizeof(baseOffset)) == sizeof(baseOffset));
++ if (ok)
++ ok = (yaffs_CheckpointWrite(dev, tn, tnodeSize) == tnodeSize);
+ }
+ }
+
+@@ -4251,13 +4319,13 @@ static int yaffs_WriteCheckpointTnodes(y
+ __u32 endMarker = ~0;
+ int ok = 1;
+
+- if(obj->variantType == YAFFS_OBJECT_TYPE_FILE){
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs_CheckpointTnodeWorker(obj,
+ obj->variant.fileVariant.top,
+ obj->variant.fileVariant.topLevel,
+ 0);
+- if(ok)
+- ok = (yaffs_CheckpointWrite(obj->myDev,&endMarker,sizeof(endMarker)) ==
++ if (ok)
++ ok = (yaffs_CheckpointWrite(obj->myDev, &endMarker, sizeof(endMarker)) ==
+ sizeof(endMarker));
+ }
+
+@@ -4272,38 +4340,38 @@ static int yaffs_ReadCheckpointTnodes(ya
+ yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant;
+ yaffs_Tnode *tn;
+ int nread = 0;
++ int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+
+- ok = (yaffs_CheckpointRead(dev,&baseChunk,sizeof(baseChunk)) == sizeof(baseChunk));
++ if (tnodeSize < sizeof(yaffs_Tnode))
++ tnodeSize = sizeof(yaffs_Tnode);
+
+- while(ok && (~baseChunk)){
++ ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++
++ while (ok && (~baseChunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+
+- /* printf("read tnode at %d\n",baseChunk); */
+ tn = yaffs_GetTnodeRaw(dev);
+- if(tn)
+- ok = (yaffs_CheckpointRead(dev,tn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8) ==
+- (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++ if (tn)
++ ok = (yaffs_CheckpointRead(dev, tn, tnodeSize) == tnodeSize);
+ else
+ ok = 0;
+
+- if(tn && ok){
++ if (tn && ok)
+ ok = yaffs_AddOrFindLevel0Tnode(dev,
+- fileStructPtr,
+- baseChunk,
+- tn) ? 1 : 0;
++ fileStructPtr,
++ baseChunk,
++ tn) ? 1 : 0;
+
+- }
+-
+- if(ok)
+- ok = (yaffs_CheckpointRead(dev,&baseChunk,sizeof(baseChunk)) == sizeof(baseChunk));
++ if (ok)
++ ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
+
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT,(
++ T(YAFFS_TRACE_CHECKPOINT, (
+ TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
+- nread,baseChunk,ok));
++ nread, baseChunk, ok));
+
+ return ok ? 1 : 0;
+ }
+@@ -4315,41 +4383,40 @@ static int yaffs_WriteCheckpointObjects(
+ yaffs_CheckpointObject cp;
+ int i;
+ int ok = 1;
+- struct list_head *lh;
++ struct ylist_head *lh;
+
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+- for(i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++){
+- list_for_each(lh, &dev->objectBucket[i].list) {
++ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each(lh, &dev->objectBucket[i].list) {
+ if (lh) {
+- obj = list_entry(lh, yaffs_Object, hashLink);
++ obj = ylist_entry(lh, yaffs_Object, hashLink);
+ if (!obj->deferedFree) {
+- yaffs_ObjectToCheckpointObject(&cp,obj);
++ yaffs_ObjectToCheckpointObject(&cp, obj);
+ cp.structType = sizeof(cp);
+
+- T(YAFFS_TRACE_CHECKPOINT,(
++ T(YAFFS_TRACE_CHECKPOINT, (
+ TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
+- cp.objectId,cp.parentId,cp.variantType,cp.chunkId,(unsigned) obj));
++ cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk, (unsigned) obj));
+
+- ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++ ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+- if(ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE){
++ if (ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs_WriteCheckpointTnodes(obj);
+- }
+ }
+ }
+ }
+- }
++ }
+
+- /* Dump end of list */
+- memset(&cp,0xFF,sizeof(yaffs_CheckpointObject));
++ /* Dump end of list */
++ memset(&cp, 0xFF, sizeof(yaffs_CheckpointObject));
+ cp.structType = sizeof(cp);
+
+- if(ok)
+- ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++ if (ok)
++ ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+ }
+@@ -4362,38 +4429,39 @@ static int yaffs_ReadCheckpointObjects(y
+ int done = 0;
+ yaffs_Object *hardList = NULL;
+
+- while(ok && !done) {
+- ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
+- if(cp.structType != sizeof(cp)) {
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("struct size %d instead of %d ok %d"TENDSTR),
+- cp.structType,sizeof(cp),ok));
++ while (ok && !done) {
++ ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (cp.structType != sizeof(cp)) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR),
++ cp.structType, sizeof(cp), ok));
+ ok = 0;
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
+- cp.objectId,cp.parentId,cp.variantType,cp.chunkId));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
++ cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk));
+
+- if(ok && cp.objectId == ~0)
++ if (ok && cp.objectId == ~0)
+ done = 1;
+- else if(ok){
+- obj = yaffs_FindOrCreateObjectByNumber(dev,cp.objectId, cp.variantType);
+- if(obj) {
+- yaffs_CheckpointObjectToObject(obj,&cp);
+- if(obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
++ else if (ok) {
++ obj = yaffs_FindOrCreateObjectByNumber(dev, cp.objectId, cp.variantType);
++ if (obj) {
++ ok = yaffs_CheckpointObjectToObject(obj, &cp);
++ if (!ok)
++ break;
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs_ReadCheckpointTnodes(obj);
+- } else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++ } else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj->hardLinks.next =
+- (struct list_head *)
+- hardList;
++ (struct ylist_head *) hardList;
+ hardList = obj;
+ }
+-
+- }
++ } else
++ ok = 0;
+ }
+ }
+
+- if(ok)
+- yaffs_HardlinkFixup(dev,hardList);
++ if (ok)
++ yaffs_HardlinkFixup(dev, hardList);
+
+ return ok ? 1 : 0;
+ }
+@@ -4403,11 +4471,11 @@ static int yaffs_WriteCheckpointSum(yaff
+ __u32 checkpointSum;
+ int ok;
+
+- yaffs_GetCheckpointSum(dev,&checkpointSum);
++ yaffs_GetCheckpointSum(dev, &checkpointSum);
+
+- ok = (yaffs_CheckpointWrite(dev,&checkpointSum,sizeof(checkpointSum)) == sizeof(checkpointSum));
++ ok = (yaffs_CheckpointWrite(dev, &checkpointSum, sizeof(checkpointSum)) == sizeof(checkpointSum));
+
+- if(!ok)
++ if (!ok)
+ return 0;
+
+ return 1;
+@@ -4419,14 +4487,14 @@ static int yaffs_ReadCheckpointSum(yaffs
+ __u32 checkpointSum1;
+ int ok;
+
+- yaffs_GetCheckpointSum(dev,&checkpointSum0);
++ yaffs_GetCheckpointSum(dev, &checkpointSum0);
+
+- ok = (yaffs_CheckpointRead(dev,&checkpointSum1,sizeof(checkpointSum1)) == sizeof(checkpointSum1));
++ ok = (yaffs_CheckpointRead(dev, &checkpointSum1, sizeof(checkpointSum1)) == sizeof(checkpointSum1));
+
+- if(!ok)
++ if (!ok)
+ return 0;
+
+- if(checkpointSum0 != checkpointSum1)
++ if (checkpointSum0 != checkpointSum1)
+ return 0;
+
+ return 1;
+@@ -4435,46 +4503,43 @@ static int yaffs_ReadCheckpointSum(yaffs
+
+ static int yaffs_WriteCheckpointData(yaffs_Device *dev)
+ {
+-
+ int ok = 1;
+
+- if(dev->skipCheckpointWrite || !dev->isYaffs2){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("skipping checkpoint write" TENDSTR)));
++ if (dev->skipCheckpointWrite || !dev->isYaffs2) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR)));
+ ok = 0;
+ }
+
+- if(ok)
+- ok = yaffs_CheckpointOpen(dev,1);
++ if (ok)
++ ok = yaffs_CheckpointOpen(dev, 1);
+
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint validity" TENDSTR)));
+- ok = yaffs_WriteCheckpointValidityMarker(dev,1);
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++ ok = yaffs_WriteCheckpointValidityMarker(dev, 1);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint device" TENDSTR)));
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint device" TENDSTR)));
+ ok = yaffs_WriteCheckpointDevice(dev);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint objects" TENDSTR)));
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint objects" TENDSTR)));
+ ok = yaffs_WriteCheckpointObjects(dev);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint validity" TENDSTR)));
+- ok = yaffs_WriteCheckpointValidityMarker(dev,0);
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++ ok = yaffs_WriteCheckpointValidityMarker(dev, 0);
+ }
+
+- if(ok){
++ if (ok)
+ ok = yaffs_WriteCheckpointSum(dev);
+- }
+-
+
+- if(!yaffs_CheckpointClose(dev))
+- ok = 0;
++ if (!yaffs_CheckpointClose(dev))
++ ok = 0;
+
+- if(ok)
+- dev->isCheckpointed = 1;
+- else
+- dev->isCheckpointed = 0;
++ if (ok)
++ dev->isCheckpointed = 1;
++ else
++ dev->isCheckpointed = 0;
+
+ return dev->isCheckpointed;
+ }
+@@ -4483,43 +4548,43 @@ static int yaffs_ReadCheckpointData(yaff
+ {
+ int ok = 1;
+
+- if(dev->skipCheckpointRead || !dev->isYaffs2){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("skipping checkpoint read" TENDSTR)));
++ if (dev->skipCheckpointRead || !dev->isYaffs2) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR)));
+ ok = 0;
+ }
+
+- if(ok)
+- ok = yaffs_CheckpointOpen(dev,0); /* open for read */
++ if (ok)
++ ok = yaffs_CheckpointOpen(dev, 0); /* open for read */
+
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint validity" TENDSTR)));
+- ok = yaffs_ReadCheckpointValidityMarker(dev,1);
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++ ok = yaffs_ReadCheckpointValidityMarker(dev, 1);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint device" TENDSTR)));
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint device" TENDSTR)));
+ ok = yaffs_ReadCheckpointDevice(dev);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint objects" TENDSTR)));
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint objects" TENDSTR)));
+ ok = yaffs_ReadCheckpointObjects(dev);
+ }
+- if(ok){
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint validity" TENDSTR)));
+- ok = yaffs_ReadCheckpointValidityMarker(dev,0);
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++ ok = yaffs_ReadCheckpointValidityMarker(dev, 0);
+ }
+
+- if(ok){
++ if (ok) {
+ ok = yaffs_ReadCheckpointSum(dev);
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint checksum %d" TENDSTR),ok));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint checksum %d" TENDSTR), ok));
+ }
+
+- if(!yaffs_CheckpointClose(dev))
++ if (!yaffs_CheckpointClose(dev))
+ ok = 0;
+
+- if(ok)
+- dev->isCheckpointed = 1;
+- else
+- dev->isCheckpointed = 0;
++ if (ok)
++ dev->isCheckpointed = 1;
++ else
++ dev->isCheckpointed = 0;
+
+ return ok ? 1 : 0;
+
+@@ -4527,11 +4592,11 @@ static int yaffs_ReadCheckpointData(yaff
+
+ static void yaffs_InvalidateCheckpoint(yaffs_Device *dev)
+ {
+- if(dev->isCheckpointed ||
+- dev->blocksInCheckpoint > 0){
++ if (dev->isCheckpointed ||
++ dev->blocksInCheckpoint > 0) {
+ dev->isCheckpointed = 0;
+ yaffs_CheckpointInvalidateStream(dev);
+- if(dev->superBlock && dev->markSuperBlockDirty)
++ if (dev->superBlock && dev->markSuperBlockDirty)
+ dev->markSuperBlockDirty(dev->superBlock);
+ }
+ }
+@@ -4540,18 +4605,18 @@ static void yaffs_InvalidateCheckpoint(y
+ int yaffs_CheckpointSave(yaffs_Device *dev)
+ {
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("save entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("save entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
+
+ yaffs_VerifyObjects(dev);
+ yaffs_VerifyBlocks(dev);
+ yaffs_VerifyFreeChunks(dev);
+
+- if(!dev->isCheckpointed) {
++ if (!dev->isCheckpointed) {
+ yaffs_InvalidateCheckpoint(dev);
+ yaffs_WriteCheckpointData(dev);
+ }
+
+- T(YAFFS_TRACE_ALWAYS,(TSTR("save exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++ T(YAFFS_TRACE_ALWAYS, (TSTR("save exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
+
+ return dev->isCheckpointed;
+ }
+@@ -4559,17 +4624,17 @@ int yaffs_CheckpointSave(yaffs_Device *d
+ int yaffs_CheckpointRestore(yaffs_Device *dev)
+ {
+ int retval;
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
+
+ retval = yaffs_ReadCheckpointData(dev);
+
+- if(dev->isCheckpointed){
++ if (dev->isCheckpointed) {
+ yaffs_VerifyObjects(dev);
+ yaffs_VerifyBlocks(dev);
+ yaffs_VerifyFreeChunks(dev);
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
+
+ return retval;
+ }
+@@ -4584,12 +4649,12 @@ int yaffs_CheckpointRestore(yaffs_Device
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+-int yaffs_ReadDataFromFile(yaffs_Object * in, __u8 * buffer, loff_t offset,
+- int nBytes)
++int yaffs_ReadDataFromFile(yaffs_Object *in, __u8 *buffer, loff_t offset,
++ int nBytes)
+ {
+
+ int chunk;
+- int start;
++ __u32 start;
+ int nToCopy;
+ int n = nBytes;
+ int nDone = 0;
+@@ -4600,27 +4665,26 @@ int yaffs_ReadDataFromFile(yaffs_Object
+ dev = in->myDev;
+
+ while (n > 0) {
+- //chunk = offset / dev->nDataBytesPerChunk + 1;
+- //start = offset % dev->nDataBytesPerChunk;
+- yaffs_AddrToChunk(dev,offset,&chunk,&start);
++ /* chunk = offset / dev->nDataBytesPerChunk + 1; */
++ /* start = offset % dev->nDataBytesPerChunk; */
++ yaffs_AddrToChunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+- if ((start + n) < dev->nDataBytesPerChunk) {
++ if ((start + n) < dev->nDataBytesPerChunk)
+ nToCopy = n;
+- } else {
++ else
+ nToCopy = dev->nDataBytesPerChunk - start;
+- }
+
+ cache = yaffs_FindChunkCache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than a whole chunk
+- * then use the cache (if there is caching)
++ * or we're using inband tags then use the cache (if there is caching)
+ * else bypass the cache.
+ */
+- if (cache || nToCopy != dev->nDataBytesPerChunk) {
++ if (cache || nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
+ if (dev->nShortOpCaches > 0) {
+
+ /* If we can't find the data in the cache, then load it up. */
+@@ -4641,14 +4705,9 @@ int yaffs_ReadDataFromFile(yaffs_Object
+
+ cache->locked = 1;
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
++
+ memcpy(buffer, &cache->data[start], nToCopy);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy..*/
+@@ -4657,41 +4716,19 @@ int yaffs_ReadDataFromFile(yaffs_Object
+ yaffs_GetTempBuffer(dev, __LINE__);
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ localBuffer);
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
++
+ memcpy(buffer, &localBuffer[start], nToCopy);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
++
+ yaffs_ReleaseTempBuffer(dev, localBuffer,
+ __LINE__);
+ }
+
+ } else {
+-#ifdef CONFIG_YAFFS_WINCE
+- __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
+-
+- /* Under WinCE can't do direct transfer. Need to use a local buffer.
+- * This is because we otherwise screw up WinCE's memory mapper
+- */
+- yaffs_ReadChunkDataFromObject(in, chunk, localBuffer);
+-
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
+- memcpy(buffer, localBuffer, dev->nDataBytesPerChunk);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+- yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+-#endif
+-
+-#else
+ /* A full chunk. Read directly into the supplied buffer. */
+ yaffs_ReadChunkDataFromObject(in, chunk, buffer);
+-#endif
++
+ }
+
+ n -= nToCopy;
+@@ -4704,28 +4741,37 @@ int yaffs_ReadDataFromFile(yaffs_Object
+ return nDone;
+ }
+
+-int yaffs_WriteDataToFile(yaffs_Object * in, const __u8 * buffer, loff_t offset,
+- int nBytes, int writeThrough)
++int yaffs_WriteDataToFile(yaffs_Object *in, const __u8 *buffer, loff_t offset,
++ int nBytes, int writeThrough)
+ {
+
+ int chunk;
+- int start;
++ __u32 start;
+ int nToCopy;
+ int n = nBytes;
+ int nDone = 0;
+ int nToWriteBack;
+ int startOfWrite = offset;
+ int chunkWritten = 0;
+- int nBytesRead;
++ __u32 nBytesRead;
++ __u32 chunkStart;
+
+ yaffs_Device *dev;
+
+ dev = in->myDev;
+
+ while (n > 0 && chunkWritten >= 0) {
+- //chunk = offset / dev->nDataBytesPerChunk + 1;
+- //start = offset % dev->nDataBytesPerChunk;
+- yaffs_AddrToChunk(dev,offset,&chunk,&start);
++ /* chunk = offset / dev->nDataBytesPerChunk + 1; */
++ /* start = offset % dev->nDataBytesPerChunk; */
++ yaffs_AddrToChunk(dev, offset, &chunk, &start);
++
++ if (chunk * dev->nDataBytesPerChunk + start != offset ||
++ start >= dev->nDataBytesPerChunk) {
++ T(YAFFS_TRACE_ERROR, (
++ TSTR("AddrToChunk of offset %d gives chunk %d start %d"
++ TENDSTR),
++ (int)offset, chunk, start));
++ }
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+@@ -4740,25 +4786,32 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ * we need to write back as much as was there before.
+ */
+
+- nBytesRead =
+- in->variant.fileVariant.fileSize -
+- ((chunk - 1) * dev->nDataBytesPerChunk);
++ chunkStart = ((chunk - 1) * dev->nDataBytesPerChunk);
++
++ if (chunkStart > in->variant.fileVariant.fileSize)
++ nBytesRead = 0; /* Past end of file */
++ else
++ nBytesRead = in->variant.fileVariant.fileSize - chunkStart;
+
+- if (nBytesRead > dev->nDataBytesPerChunk) {
++ if (nBytesRead > dev->nDataBytesPerChunk)
+ nBytesRead = dev->nDataBytesPerChunk;
+- }
+
+ nToWriteBack =
+ (nBytesRead >
+ (start + n)) ? nBytesRead : (start + n);
+
++ if (nToWriteBack < 0 || nToWriteBack > dev->nDataBytesPerChunk)
++ YBUG();
++
+ } else {
+ nToCopy = dev->nDataBytesPerChunk - start;
+ nToWriteBack = dev->nDataBytesPerChunk;
+ }
+
+- if (nToCopy != dev->nDataBytesPerChunk) {
+- /* An incomplete start or end chunk (or maybe both start and end chunk) */
++ if (nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
++ /* An incomplete start or end chunk (or maybe both start and end chunk),
++ * or we're using inband tags, so we want to use the cache buffers.
++ */
+ if (dev->nShortOpCaches > 0) {
+ yaffs_ChunkCache *cache;
+ /* If we can't find the data in the cache, then load the cache */
+@@ -4775,10 +4828,9 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ cache->
+ data);
+- }
+- else if(cache &&
+- !cache->dirty &&
+- !yaffs_CheckSpaceForAllocation(in->myDev)){
++ } else if (cache &&
++ !cache->dirty &&
++ !yaffs_CheckSpaceForAllocation(in->myDev)) {
+ /* Drop the cache if it was a read cache item and
+ * no space check has been made for it.
+ */
+@@ -4788,16 +4840,12 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ if (cache) {
+ yaffs_UseChunkCache(dev, cache, 1);
+ cache->locked = 1;
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
++
+
+ memcpy(&cache->data[start], buffer,
+ nToCopy);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
++
+ cache->locked = 0;
+ cache->nBytes = nToWriteBack;
+
+@@ -4825,15 +4873,10 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ yaffs_ReadChunkDataFromObject(in, chunk,
+ localBuffer);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
++
+
+ memcpy(&localBuffer[start], buffer, nToCopy);
+
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
+ chunkWritten =
+ yaffs_WriteChunkDataToObject(in, chunk,
+ localBuffer,
+@@ -4846,31 +4889,15 @@ int yaffs_WriteDataToFile(yaffs_Object *
+ }
+
+ } else {
+-
+-#ifdef CONFIG_YAFFS_WINCE
+- /* Under WinCE can't do direct transfer. Need to use a local buffer.
+- * This is because we otherwise screw up WinCE's memory mapper
+- */
+- __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_UnlockYAFFS(TRUE);
+-#endif
+- memcpy(localBuffer, buffer, dev->nDataBytesPerChunk);
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_LockYAFFS(TRUE);
+-#endif
+- chunkWritten =
+- yaffs_WriteChunkDataToObject(in, chunk, localBuffer,
+- dev->nDataBytesPerChunk,
+- 0);
+- yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+-#else
+ /* A full chunk. Write directly from the supplied buffer. */
++
++
++
+ chunkWritten =
+ yaffs_WriteChunkDataToObject(in, chunk, buffer,
+ dev->nDataBytesPerChunk,
+ 0);
+-#endif
++
+ /* Since we've overwritten the cached data, we better invalidate it. */
+ yaffs_InvalidateChunkCache(in, chunk);
+ }
+@@ -4886,9 +4913,8 @@ int yaffs_WriteDataToFile(yaffs_Object *
+
+ /* Update file object */
+
+- if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize) {
++ if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize)
+ in->variant.fileVariant.fileSize = (startOfWrite + nDone);
+- }
+
+ in->dirty = 1;
+
+@@ -4898,7 +4924,7 @@ int yaffs_WriteDataToFile(yaffs_Object *
+
+ /* ---------------------- File resizing stuff ------------------ */
+
+-static void yaffs_PruneResizedChunks(yaffs_Object * in, int newSize)
++static void yaffs_PruneResizedChunks(yaffs_Object *in, int newSize)
+ {
+
+ yaffs_Device *dev = in->myDev;
+@@ -4939,11 +4965,11 @@ static void yaffs_PruneResizedChunks(yaf
+
+ }
+
+-int yaffs_ResizeFile(yaffs_Object * in, loff_t newSize)
++int yaffs_ResizeFile(yaffs_Object *in, loff_t newSize)
+ {
+
+ int oldFileSize = in->variant.fileVariant.fileSize;
+- int newSizeOfPartialChunk;
++ __u32 newSizeOfPartialChunk;
+ int newFullChunks;
+
+ yaffs_Device *dev = in->myDev;
+@@ -4955,13 +4981,11 @@ int yaffs_ResizeFile(yaffs_Object * in,
+
+ yaffs_CheckGarbageCollection(dev);
+
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+- return yaffs_GetFileSize(in);
+- }
++ if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
+
+- if (newSize == oldFileSize) {
+- return oldFileSize;
+- }
++ if (newSize == oldFileSize)
++ return YAFFS_OK;
+
+ if (newSize < oldFileSize) {
+
+@@ -4994,21 +5018,20 @@ int yaffs_ResizeFile(yaffs_Object * in,
+ }
+
+
+-
+ /* Write a new object header.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories.
+ */
+- if (in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
+- in->parent->objectId != YAFFS_OBJECTID_DELETED) {
++ if (in->parent &&
++ in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
++ in->parent->objectId != YAFFS_OBJECTID_DELETED)
+ yaffs_UpdateObjectHeader(in, NULL, 0,
+ (newSize < oldFileSize) ? 1 : 0, 0);
+- }
+
+- return newSize;
++ return YAFFS_OK;
+ }
+
+-loff_t yaffs_GetFileSize(yaffs_Object * obj)
++loff_t yaffs_GetFileSize(yaffs_Object *obj)
+ {
+ obj = yaffs_GetEquivalentObject(obj);
+
+@@ -5024,7 +5047,7 @@ loff_t yaffs_GetFileSize(yaffs_Object *
+
+
+
+-int yaffs_FlushFile(yaffs_Object * in, int updateTime)
++int yaffs_FlushFile(yaffs_Object *in, int updateTime)
+ {
+ int retVal;
+ if (in->dirty) {
+@@ -5039,9 +5062,8 @@ int yaffs_FlushFile(yaffs_Object * in, i
+ #endif
+ }
+
+- retVal =
+- (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
+- 0) ? YAFFS_OK : YAFFS_FAIL;
++ retVal = (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
++ 0) ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ retVal = YAFFS_OK;
+ }
+@@ -5050,7 +5072,7 @@ int yaffs_FlushFile(yaffs_Object * in, i
+
+ }
+
+-static int yaffs_DoGenericObjectDeletion(yaffs_Object * in)
++static int yaffs_DoGenericObjectDeletion(yaffs_Object *in)
+ {
+
+ /* First off, invalidate the file's data in the cache, without flushing. */
+@@ -5058,13 +5080,13 @@ static int yaffs_DoGenericObjectDeletion
+
+ if (in->myDev->isYaffs2 && (in->parent != in->myDev->deletedDir)) {
+ /* Move to the unlinked directory so we have a record that it was deleted. */
+- yaffs_ChangeObjectName(in, in->myDev->deletedDir,"deleted", 0, 0);
++ yaffs_ChangeObjectName(in, in->myDev->deletedDir, _Y("deleted"), 0, 0);
+
+ }
+
+ yaffs_RemoveObjectFromDirectory(in);
+- yaffs_DeleteChunk(in->myDev, in->chunkId, 1, __LINE__);
+- in->chunkId = -1;
++ yaffs_DeleteChunk(in->myDev, in->hdrChunk, 1, __LINE__);
++ in->hdrChunk = 0;
+
+ yaffs_FreeObject(in);
+ return YAFFS_OK;
+@@ -5075,62 +5097,63 @@ static int yaffs_DoGenericObjectDeletion
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+-static int yaffs_UnlinkFile(yaffs_Object * in)
++static int yaffs_UnlinkFileIfNeeded(yaffs_Object *in)
+ {
+
+ int retVal;
+ int immediateDeletion = 0;
+
+- if (1) {
+ #ifdef __KERNEL__
+- if (!in->myInode) {
+- immediateDeletion = 1;
+-
+- }
++ if (!in->myInode)
++ immediateDeletion = 1;
+ #else
+- if (in->inUse <= 0) {
+- immediateDeletion = 1;
+-
+- }
++ if (in->inUse <= 0)
++ immediateDeletion = 1;
+ #endif
+- if (immediateDeletion) {
+- retVal =
+- yaffs_ChangeObjectName(in, in->myDev->deletedDir,
+- "deleted", 0, 0);
+- T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
+- in->objectId));
+- in->deleted = 1;
+- in->myDev->nDeletedFiles++;
+- if (0 && in->myDev->isYaffs2) {
+- yaffs_ResizeFile(in, 0);
+- }
+- yaffs_SoftDeleteFile(in);
+- } else {
+- retVal =
+- yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
+- "unlinked", 0, 0);
+- }
+
++ if (immediateDeletion) {
++ retVal =
++ yaffs_ChangeObjectName(in, in->myDev->deletedDir,
++ _Y("deleted"), 0, 0);
++ T(YAFFS_TRACE_TRACING,
++ (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
++ in->objectId));
++ in->deleted = 1;
++ in->myDev->nDeletedFiles++;
++ if (1 || in->myDev->isYaffs2)
++ yaffs_ResizeFile(in, 0);
++ yaffs_SoftDeleteFile(in);
++ } else {
++ retVal =
++ yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
++ _Y("unlinked"), 0, 0);
+ }
++
++
+ return retVal;
+ }
+
+-int yaffs_DeleteFile(yaffs_Object * in)
++int yaffs_DeleteFile(yaffs_Object *in)
+ {
+ int retVal = YAFFS_OK;
++ int deleted = in->deleted;
++
++ yaffs_ResizeFile(in, 0);
+
+ if (in->nDataChunks > 0) {
+- /* Use soft deletion if there is data in the file */
+- if (!in->unlinked) {
+- retVal = yaffs_UnlinkFile(in);
+- }
++ /* Use soft deletion if there is data in the file.
++ * That won't be the case if it has been resized to zero.
++ */
++ if (!in->unlinked)
++ retVal = yaffs_UnlinkFileIfNeeded(in);
++
+ if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
++ deleted = 1;
+ in->myDev->nDeletedFiles++;
+ yaffs_SoftDeleteFile(in);
+ }
+- return in->deleted ? YAFFS_OK : YAFFS_FAIL;
++ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_FreeTnode(in->myDev, in->variant.fileVariant.top);
+@@ -5141,62 +5164,75 @@ int yaffs_DeleteFile(yaffs_Object * in)
+ }
+ }
+
+-static int yaffs_DeleteDirectory(yaffs_Object * in)
++static int yaffs_DeleteDirectory(yaffs_Object *in)
+ {
+ /* First check that the directory is empty. */
+- if (list_empty(&in->variant.directoryVariant.children)) {
++ if (ylist_empty(&in->variant.directoryVariant.children))
+ return yaffs_DoGenericObjectDeletion(in);
+- }
+
+ return YAFFS_FAIL;
+
+ }
+
+-static int yaffs_DeleteSymLink(yaffs_Object * in)
++static int yaffs_DeleteSymLink(yaffs_Object *in)
+ {
+ YFREE(in->variant.symLinkVariant.alias);
+
+ return yaffs_DoGenericObjectDeletion(in);
+ }
+
+-static int yaffs_DeleteHardLink(yaffs_Object * in)
++static int yaffs_DeleteHardLink(yaffs_Object *in)
+ {
+ /* remove this hardlink from the list assocaited with the equivalent
+ * object
+ */
+- list_del(&in->hardLinks);
++ ylist_del_init(&in->hardLinks);
+ return yaffs_DoGenericObjectDeletion(in);
+ }
+
+-static void yaffs_DestroyObject(yaffs_Object * obj)
++int yaffs_DeleteObject(yaffs_Object *obj)
+ {
++int retVal = -1;
+ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+- yaffs_DeleteFile(obj);
++ retVal = yaffs_DeleteFile(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+- yaffs_DeleteDirectory(obj);
++ return yaffs_DeleteDirectory(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+- yaffs_DeleteSymLink(obj);
++ retVal = yaffs_DeleteSymLink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+- yaffs_DeleteHardLink(obj);
++ retVal = yaffs_DeleteHardLink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+- yaffs_DoGenericObjectDeletion(obj);
++ retVal = yaffs_DoGenericObjectDeletion(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ retVal = 0;
+ break; /* should not happen. */
+ }
++
++ return retVal;
+ }
+
+-static int yaffs_UnlinkWorker(yaffs_Object * obj)
++static int yaffs_UnlinkWorker(yaffs_Object *obj)
+ {
+
++ int immediateDeletion = 0;
++
++#ifdef __KERNEL__
++ if (!obj->myInode)
++ immediateDeletion = 1;
++#else
++ if (obj->inUse <= 0)
++ immediateDeletion = 1;
++#endif
++
+ if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_DeleteHardLink(obj);
+- } else if (!list_empty(&obj->hardLinks)) {
++ } else if (!ylist_empty(&obj->hardLinks)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+@@ -5215,24 +5251,24 @@ static int yaffs_UnlinkWorker(yaffs_Obje
+ int retVal;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+- hl = list_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
++ hl = ylist_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
+
+- list_del_init(&hl->hardLinks);
+- list_del_init(&hl->siblings);
++ ylist_del_init(&hl->hardLinks);
++ ylist_del_init(&hl->siblings);
+
+ yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+
+ retVal = yaffs_ChangeObjectName(obj, hl->parent, name, 0, 0);
+
+- if (retVal == YAFFS_OK) {
++ if (retVal == YAFFS_OK)
+ retVal = yaffs_DoGenericObjectDeletion(hl);
+- }
++
+ return retVal;
+
+- } else {
++ } else if (immediateDeletion) {
+ switch (obj->variantType) {
+ case YAFFS_OBJECT_TYPE_FILE:
+- return yaffs_UnlinkFile(obj);
++ return yaffs_DeleteFile(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return yaffs_DeleteDirectory(obj);
+@@ -5248,21 +5284,22 @@ static int yaffs_UnlinkWorker(yaffs_Obje
+ default:
+ return YAFFS_FAIL;
+ }
+- }
++ } else
++ return yaffs_ChangeObjectName(obj, obj->myDev->unlinkedDir,
++ _Y("unlinked"), 0, 0);
+ }
+
+
+-static int yaffs_UnlinkObject( yaffs_Object *obj)
++static int yaffs_UnlinkObject(yaffs_Object *obj)
+ {
+
+- if (obj && obj->unlinkAllowed) {
++ if (obj && obj->unlinkAllowed)
+ return yaffs_UnlinkWorker(obj);
+- }
+
+ return YAFFS_FAIL;
+
+ }
+-int yaffs_Unlink(yaffs_Object * dir, const YCHAR * name)
++int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name)
+ {
+ yaffs_Object *obj;
+
+@@ -5272,8 +5309,8 @@ int yaffs_Unlink(yaffs_Object * dir, con
+
+ /*----------------------- Initialisation Scanning ---------------------- */
+
+-static void yaffs_HandleShadowedObject(yaffs_Device * dev, int objId,
+- int backwardScanning)
++static void yaffs_HandleShadowedObject(yaffs_Device *dev, int objId,
++ int backwardScanning)
+ {
+ yaffs_Object *obj;
+
+@@ -5286,9 +5323,8 @@ static void yaffs_HandleShadowedObject(y
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+- if (yaffs_FindObjectByNumber(dev, objId)) {
++ if (yaffs_FindObjectByNumber(dev, objId))
+ return;
+- }
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+@@ -5297,6 +5333,8 @@ static void yaffs_HandleShadowedObject(y
+ obj =
+ yaffs_FindOrCreateObjectByNumber(dev, objId,
+ YAFFS_OBJECT_TYPE_FILE);
++ if (!obj)
++ return;
+ yaffs_AddObjectToDirectory(dev->unlinkedDir, obj);
+ obj->variant.fileVariant.shrinkSize = 0;
+ obj->valid = 1; /* So that we don't read any other info for this file */
+@@ -5325,44 +5363,77 @@ static void yaffs_HardlinkFixup(yaffs_De
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardLinkVariant.equivalentObject = in;
+- list_add(&hl->hardLinks, &in->hardLinks);
++ ylist_add(&hl->hardLinks, &in->hardLinks);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardLinkVariant.equivalentObject = NULL;
+- INIT_LIST_HEAD(&hl->hardLinks);
++ YINIT_LIST_HEAD(&hl->hardLinks);
+
+ }
+-
+ }
++}
++
++
+
++
++
++static int ybicmp(const void *a, const void *b)
++{
++ register int aseq = ((yaffs_BlockIndex *)a)->seq;
++ register int bseq = ((yaffs_BlockIndex *)b)->seq;
++ register int ablock = ((yaffs_BlockIndex *)a)->block;
++ register int bblock = ((yaffs_BlockIndex *)b)->block;
++ if (aseq == bseq)
++ return ablock - bblock;
++ else
++ return aseq - bseq;
+ }
+
+
++struct yaffs_ShadowFixerStruct {
++ int objectId;
++ int shadowedId;
++ struct yaffs_ShadowFixerStruct *next;
++};
++
+
++static void yaffs_StripDeletedObjects(yaffs_Device *dev)
++{
++ /*
++ * Sort out state of unlinked and deleted objects after scanning.
++ */
++ struct ylist_head *i;
++ struct ylist_head *n;
++ yaffs_Object *l;
+
++ /* Soft delete all the unlinked files */
++ ylist_for_each_safe(i, n,
++ &dev->unlinkedDir->variant.directoryVariant.children) {
++ if (i) {
++ l = ylist_entry(i, yaffs_Object, siblings);
++ yaffs_DeleteObject(l);
++ }
++ }
+
+-static int ybicmp(const void *a, const void *b){
+- register int aseq = ((yaffs_BlockIndex *)a)->seq;
+- register int bseq = ((yaffs_BlockIndex *)b)->seq;
+- register int ablock = ((yaffs_BlockIndex *)a)->block;
+- register int bblock = ((yaffs_BlockIndex *)b)->block;
+- if( aseq == bseq )
+- return ablock - bblock;
+- else
+- return aseq - bseq;
++ ylist_for_each_safe(i, n,
++ &dev->deletedDir->variant.directoryVariant.children) {
++ if (i) {
++ l = ylist_entry(i, yaffs_Object, siblings);
++ yaffs_DeleteObject(l);
++ }
++ }
+
+ }
+
+-static int yaffs_Scan(yaffs_Device * dev)
++static int yaffs_Scan(yaffs_Device *dev)
+ {
+ yaffs_ExtendedTags tags;
+ int blk;
+ int blockIterator;
+ int startIterator;
+ int endIterator;
+- int nBlocksToScan = 0;
+ int result;
+
+ int chunk;
+@@ -5371,26 +5442,19 @@ static int yaffs_Scan(yaffs_Device * dev
+ yaffs_BlockState state;
+ yaffs_Object *hardList = NULL;
+ yaffs_BlockInfo *bi;
+- int sequenceNumber;
++ __u32 sequenceNumber;
+ yaffs_ObjectHeader *oh;
+ yaffs_Object *in;
+ yaffs_Object *parent;
+- int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+
+ int alloc_failed = 0;
+
++ struct yaffs_ShadowFixerStruct *shadowFixerList = NULL;
++
+
+ __u8 *chunkData;
+
+- yaffs_BlockIndex *blockIndex = NULL;
+
+- if (dev->isYaffs2) {
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("yaffs_Scan is not for YAFFS2!" TENDSTR)));
+- return YAFFS_FAIL;
+- }
+-
+- //TODO Throw all the yaffs2 stuuf out of yaffs_Scan since it is only for yaffs1 format.
+
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("yaffs_Scan starts intstartblk %d intendblk %d..." TENDSTR),
+@@ -5400,12 +5464,6 @@ static int yaffs_Scan(yaffs_Device * dev
+
+ dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+- if (dev->isYaffs2) {
+- blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
+- if(!blockIndex)
+- return YAFFS_FAIL;
+- }
+-
+ /* Scan all the blocks to determine their state */
+ for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
+ bi = yaffs_GetBlockInfo(dev, blk);
+@@ -5418,6 +5476,9 @@ static int yaffs_Scan(yaffs_Device * dev
+ bi->blockState = state;
+ bi->sequenceNumber = sequenceNumber;
+
++ if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
++
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+ state, sequenceNumber));
+@@ -5430,70 +5491,21 @@ static int yaffs_Scan(yaffs_Device * dev
+ (TSTR("Block empty " TENDSTR)));
+ dev->nErasedBlocks++;
+ dev->nFreeChunks += dev->nChunksPerBlock;
+- } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+-
+- /* Determine the highest sequence number */
+- if (dev->isYaffs2 &&
+- sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+- sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+-
+- blockIndex[nBlocksToScan].seq = sequenceNumber;
+- blockIndex[nBlocksToScan].block = blk;
+-
+- nBlocksToScan++;
+-
+- if (sequenceNumber >= dev->sequenceNumber) {
+- dev->sequenceNumber = sequenceNumber;
+- }
+- } else if (dev->isYaffs2) {
+- /* TODO: Nasty sequence number! */
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- ("Block scanning block %d has bad sequence number %d"
+- TENDSTR), blk, sequenceNumber));
+-
+- }
+ }
+ }
+
+- /* Sort the blocks
+- * Dungy old bubble sort for now...
+- */
+- if (dev->isYaffs2) {
+- yaffs_BlockIndex temp;
+- int i;
+- int j;
+-
+- for (i = 0; i < nBlocksToScan; i++)
+- for (j = i + 1; j < nBlocksToScan; j++)
+- if (blockIndex[i].seq > blockIndex[j].seq) {
+- temp = blockIndex[j];
+- blockIndex[j] = blockIndex[i];
+- blockIndex[i] = temp;
+- }
+- }
+-
+- /* Now scan the blocks looking at the data. */
+- if (dev->isYaffs2) {
+- startIterator = 0;
+- endIterator = nBlocksToScan - 1;
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
+- } else {
+- startIterator = dev->internalStartBlock;
+- endIterator = dev->internalEndBlock;
+- }
++ startIterator = dev->internalStartBlock;
++ endIterator = dev->internalEndBlock;
+
+ /* For each block.... */
+ for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
+ blockIterator++) {
+
+- if (dev->isYaffs2) {
+- /* get the block to scan in the correct order */
+- blk = blockIndex[blockIterator].block;
+- } else {
+- blk = blockIterator;
+- }
++ YYIELD();
++
++ YYIELD();
++
++ blk = blockIterator;
+
+ bi = yaffs_GetBlockInfo(dev, blk);
+ state = bi->blockState;
+@@ -5511,7 +5523,7 @@ static int yaffs_Scan(yaffs_Device * dev
+
+ /* Let's have a good look at this chunk... */
+
+- if (!dev->isYaffs2 && tags.chunkDeleted) {
++ if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED || tags.chunkDeleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+@@ -5540,18 +5552,6 @@ static int yaffs_Scan(yaffs_Device * dev
+ dev->allocationBlockFinder = blk;
+ /* Set it to here to encourage the allocator to go forth from here. */
+
+- /* Yaffs2 sanity check:
+- * This should be the one with the highest sequence number
+- */
+- if (dev->isYaffs2
+- && (dev->sequenceNumber !=
+- bi->sequenceNumber)) {
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR
+- ("yaffs: Allocation block %d was not highest sequence id:"
+- " block seq = %d, dev seq = %d"
+- TENDSTR), blk,bi->sequenceNumber,dev->sequenceNumber));
+- }
+ }
+
+ dev->nFreeChunks += (dev->nChunksPerBlock - c);
+@@ -5570,11 +5570,11 @@ static int yaffs_Scan(yaffs_Device * dev
+ * the same chunkId).
+ */
+
+- if(!in)
++ if (!in)
+ alloc_failed = 1;
+
+- if(in){
+- if(!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk,1))
++ if (in) {
++ if (!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk, 1))
+ alloc_failed = 1;
+ }
+
+@@ -5617,7 +5617,7 @@ static int yaffs_Scan(yaffs_Device * dev
+ * deleted, and worse still it has changed type. Delete the old object.
+ */
+
+- yaffs_DestroyObject(in);
++ yaffs_DeleteObject(in);
+
+ in = 0;
+ }
+@@ -5627,14 +5627,20 @@ static int yaffs_Scan(yaffs_Device * dev
+ objectId,
+ oh->type);
+
+- if(!in)
++ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadowsObject > 0) {
+- yaffs_HandleShadowedObject(dev,
+- oh->
+- shadowsObject,
+- 0);
++
++ struct yaffs_ShadowFixerStruct *fixer;
++ fixer = YMALLOC(sizeof(struct yaffs_ShadowFixerStruct));
++ if (fixer) {
++ fixer->next = shadowFixerList;
++ shadowFixerList = fixer;
++ fixer->objectId = tags.objectId;
++ fixer->shadowedId = oh->shadowsObject;
++ }
++
+ }
+
+ if (in && in->valid) {
+@@ -5643,12 +5649,10 @@ static int yaffs_Scan(yaffs_Device * dev
+ unsigned existingSerial = in->serial;
+ unsigned newSerial = tags.serialNumber;
+
+- if (dev->isYaffs2 ||
+- ((existingSerial + 1) & 3) ==
+- newSerial) {
++ if (((existingSerial + 1) & 3) == newSerial) {
+ /* Use new one - destroy the exisiting one */
+ yaffs_DeleteChunk(dev,
+- in->chunkId,
++ in->hdrChunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+@@ -5681,7 +5685,8 @@ static int yaffs_Scan(yaffs_Device * dev
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+ #endif
+- in->chunkId = chunk;
++ in->hdrChunk = chunk;
++ in->serial = tags.serialNumber;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+@@ -5705,7 +5710,8 @@ static int yaffs_Scan(yaffs_Device * dev
+ in->yst_ctime = oh->yst_ctime;
+ in->yst_rdev = oh->yst_rdev;
+ #endif
+- in->chunkId = chunk;
++ in->hdrChunk = chunk;
++ in->serial = tags.serialNumber;
+
+ yaffs_SetObjectName(in, oh->name);
+ in->dirty = 0;
+@@ -5718,25 +5724,25 @@ static int yaffs_Scan(yaffs_Device * dev
+ yaffs_FindOrCreateObjectByNumber
+ (dev, oh->parentObjectId,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+- if (parent->variantType ==
++ if (!parent)
++ alloc_failed = 1;
++ if (parent && parent->variantType ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variantType =
+- YAFFS_OBJECT_TYPE_DIRECTORY;
+- INIT_LIST_HEAD(&parent->variant.
+- directoryVariant.
+- children);
+- } else if (parent->variantType !=
+- YAFFS_OBJECT_TYPE_DIRECTORY)
+- {
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ YINIT_LIST_HEAD(&parent->variant.
++ directoryVariant.
++ children);
++ } else if (!parent || parent->variantType !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+- ("yaffs tragedy: attempting to use non-directory as"
+- " a directory in scan. Put in lost+found."
++ ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ TENDSTR)));
+ parent = dev->lostNFoundDir;
+ }
+@@ -5760,15 +5766,6 @@ static int yaffs_Scan(yaffs_Device * dev
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+- if (dev->isYaffs2
+- && oh->isShrink) {
+- /* Prune back the shrunken chunks */
+- yaffs_PruneResizedChunks
+- (in, oh->fileSize);
+- /* Mark the block as having a shrinkHeader */
+- bi->hasShrinkHeader = 1;
+- }
+-
+ if (dev->useHeaderFileSize)
+
+ in->variant.fileVariant.
+@@ -5778,11 +5775,11 @@ static int yaffs_Scan(yaffs_Device * dev
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardLinkVariant.
+- equivalentObjectId =
+- oh->equivalentObjectId;
++ equivalentObjectId =
++ oh->equivalentObjectId;
+ in->hardLinks.next =
+- (struct list_head *)
+- hardList;
++ (struct ylist_head *)
++ hardList;
+ hardList = in;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+@@ -5794,15 +5791,17 @@ static int yaffs_Scan(yaffs_Device * dev
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symLinkVariant.alias =
+ yaffs_CloneString(oh->alias);
+- if(!in->variant.symLinkVariant.alias)
++ if (!in->variant.symLinkVariant.alias)
+ alloc_failed = 1;
+ break;
+ }
+
++/*
+ if (parent == dev->deletedDir) {
+ yaffs_DestroyObject(in);
+ bi->hasShrinkHeader = 1;
+ }
++*/
+ }
+ }
+ }
+@@ -5823,10 +5822,6 @@ static int yaffs_Scan(yaffs_Device * dev
+
+ }
+
+- if (blockIndex) {
+- YFREE(blockIndex);
+- }
+-
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+@@ -5834,32 +5829,36 @@ static int yaffs_Scan(yaffs_Device * dev
+ * hardlinks.
+ */
+
+- yaffs_HardlinkFixup(dev,hardList);
++ yaffs_HardlinkFixup(dev, hardList);
+
+- /* Handle the unlinked files. Since they were left in an unlinked state we should
+- * just delete them.
+- */
++ /* Fix up any shadowed objects */
+ {
+- struct list_head *i;
+- struct list_head *n;
++ struct yaffs_ShadowFixerStruct *fixer;
++ yaffs_Object *obj;
+
+- yaffs_Object *l;
+- /* Soft delete all the unlinked files */
+- list_for_each_safe(i, n,
+- &dev->unlinkedDir->variant.directoryVariant.
+- children) {
+- if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
+- yaffs_DestroyObject(l);
+- }
++ while (shadowFixerList) {
++ fixer = shadowFixerList;
++ shadowFixerList = fixer->next;
++ /* Complete the rename transaction by deleting the shadowed object
++ * then setting the object header to unshadowed.
++ */
++ obj = yaffs_FindObjectByNumber(dev, fixer->shadowedId);
++ if (obj)
++ yaffs_DeleteObject(obj);
++
++ obj = yaffs_FindObjectByNumber(dev, fixer->objectId);
++
++ if (obj)
++ yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++
++ YFREE(fixer);
+ }
+ }
+
+ yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+
+- if(alloc_failed){
++ if (alloc_failed)
+ return YAFFS_FAIL;
+- }
+
+ T(YAFFS_TRACE_SCAN, (TSTR("yaffs_Scan ends" TENDSTR)));
+
+@@ -5871,25 +5870,27 @@ static void yaffs_CheckObjectDetailsLoad
+ {
+ __u8 *chunkData;
+ yaffs_ObjectHeader *oh;
+- yaffs_Device *dev = in->myDev;
++ yaffs_Device *dev;
+ yaffs_ExtendedTags tags;
+ int result;
+ int alloc_failed = 0;
+
+- if(!in)
++ if (!in)
+ return;
+
++ dev = in->myDev;
++
+ #if 0
+- T(YAFFS_TRACE_SCAN,(TSTR("details for object %d %s loaded" TENDSTR),
++ T(YAFFS_TRACE_SCAN, (TSTR("details for object %d %s loaded" TENDSTR),
+ in->objectId,
+ in->lazyLoaded ? "not yet" : "already"));
+ #endif
+
+- if(in->lazyLoaded){
++ if (in->lazyLoaded && in->hdrChunk > 0) {
+ in->lazyLoaded = 0;
+ chunkData = yaffs_GetTempBuffer(dev, __LINE__);
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev,in->chunkId,chunkData,&tags);
++ result = yaffs_ReadChunkWithTagsFromNAND(dev, in->hdrChunk, chunkData, &tags);
+ oh = (yaffs_ObjectHeader *) chunkData;
+
+ in->yst_mode = oh->yst_mode;
+@@ -5911,18 +5912,18 @@ static void yaffs_CheckObjectDetailsLoad
+ #endif
+ yaffs_SetObjectName(in, oh->name);
+
+- if(in->variantType == YAFFS_OBJECT_TYPE_SYMLINK){
+- in->variant.symLinkVariant.alias =
++ if (in->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++ in->variant.symLinkVariant.alias =
+ yaffs_CloneString(oh->alias);
+- if(!in->variant.symLinkVariant.alias)
++ if (!in->variant.symLinkVariant.alias)
+ alloc_failed = 1; /* Not returned to caller */
+ }
+
+- yaffs_ReleaseTempBuffer(dev,chunkData, __LINE__);
++ yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+ }
+ }
+
+-static int yaffs_ScanBackwards(yaffs_Device * dev)
++static int yaffs_ScanBackwards(yaffs_Device *dev)
+ {
+ yaffs_ExtendedTags tags;
+ int blk;
+@@ -5938,7 +5939,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ yaffs_BlockState state;
+ yaffs_Object *hardList = NULL;
+ yaffs_BlockInfo *bi;
+- int sequenceNumber;
++ __u32 sequenceNumber;
+ yaffs_ObjectHeader *oh;
+ yaffs_Object *in;
+ yaffs_Object *parent;
+@@ -5972,12 +5973,12 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
+
+- if(!blockIndex) {
++ if (!blockIndex) {
+ blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
+ altBlockIndex = 1;
+ }
+
+- if(!blockIndex) {
++ if (!blockIndex) {
+ T(YAFFS_TRACE_SCAN,
+ (TSTR("yaffs_Scan() could not allocate block index!" TENDSTR)));
+ return YAFFS_FAIL;
+@@ -5999,15 +6000,17 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ bi->blockState = state;
+ bi->sequenceNumber = sequenceNumber;
+
+- if(bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ if (bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->blockState = state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
+
+ T(YAFFS_TRACE_SCAN_DEBUG,
+ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+ state, sequenceNumber));
+
+
+- if(state == YAFFS_BLOCK_STATE_CHECKPOINT){
++ if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocksInCheckpoint++;
+
+ } else if (state == YAFFS_BLOCK_STATE_DEAD) {
+@@ -6021,8 +6024,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+
+ /* Determine the highest sequence number */
+- if (dev->isYaffs2 &&
+- sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++ if (sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+
+ blockIndex[nBlocksToScan].seq = sequenceNumber;
+@@ -6030,10 +6032,9 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ nBlocksToScan++;
+
+- if (sequenceNumber >= dev->sequenceNumber) {
++ if (sequenceNumber >= dev->sequenceNumber)
+ dev->sequenceNumber = sequenceNumber;
+- }
+- } else if (dev->isYaffs2) {
++ } else {
+ /* TODO: Nasty sequence number! */
+ T(YAFFS_TRACE_SCAN,
+ (TSTR
+@@ -6053,11 +6054,13 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ /* Sort the blocks */
+ #ifndef CONFIG_YAFFS_USE_OWN_SORT
+- yaffs_qsort(blockIndex, nBlocksToScan,
+- sizeof(yaffs_BlockIndex), ybicmp);
++ {
++ /* Use qsort now. */
++ yaffs_qsort(blockIndex, nBlocksToScan, sizeof(yaffs_BlockIndex), ybicmp);
++ }
+ #else
+ {
+- /* Dungy old bubble sort... */
++ /* Dungy old bubble sort... */
+
+ yaffs_BlockIndex temp;
+ int i;
+@@ -6075,7 +6078,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ YYIELD();
+
+- T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
++ T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
+
+ /* Now scan the blocks looking at the data. */
+ startIterator = 0;
+@@ -6085,10 +6088,10 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ /* For each block.... backwards */
+ for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
+- blockIterator--) {
+- /* Cooperative multitasking! This loop can run for so
++ blockIterator--) {
++ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+- YYIELD();
++ YYIELD();
+
+ /* get the block to scan in the correct order */
+ blk = blockIndex[blockIterator].block;
+@@ -6127,10 +6130,8 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ * this is the one being allocated from
+ */
+
+- if(foundChunksInBlock)
+- {
++ if (foundChunksInBlock) {
+ /* This is a chunk that was skipped due to failing the erased check */
+-
+ } else if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+@@ -6138,7 +6139,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ } else {
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ state == YAFFS_BLOCK_STATE_ALLOCATING) {
+- if(dev->sequenceNumber == bi->sequenceNumber) {
++ if (dev->sequenceNumber == bi->sequenceNumber) {
+ /* this is the block being allocated from */
+
+ T(YAFFS_TRACE_SCAN,
+@@ -6150,27 +6151,31 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ dev->allocationBlock = blk;
+ dev->allocationPage = c;
+ dev->allocationBlockFinder = blk;
+- }
+- else {
++ } else {
+ /* This is a partially written block that is not
+ * the current allocation block. This block must have
+ * had a write failure, so set up for retirement.
+ */
+
+- bi->needsRetiring = 1;
++ /* bi->needsRetiring = 1; ??? TODO */
+ bi->gcPrioritise = 1;
+
+ T(YAFFS_TRACE_ALWAYS,
+- (TSTR("Partially written block %d being set for retirement" TENDSTR),
++ (TSTR("Partially written block %d detected" TENDSTR),
+ blk));
+ }
+-
+ }
+-
+ }
+
+ dev->nFreeChunks++;
+
++ } else if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED) {
++ T(YAFFS_TRACE_SCAN,
++ (TSTR(" Unfixed ECC in chunk(%d:%d), chunk ignored"TENDSTR),
++ blk, c));
++
++ dev->nFreeChunks++;
++
+ } else if (tags.chunkId > 0) {
+ /* chunkId > 0 so it is a data chunk... */
+ unsigned int endpos;
+@@ -6187,7 +6192,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ tags.
+ objectId,
+ YAFFS_OBJECT_TYPE_FILE);
+- if(!in){
++ if (!in) {
+ /* Out of memory */
+ alloc_failed = 1;
+ }
+@@ -6197,8 +6202,8 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ && chunkBase <
+ in->variant.fileVariant.shrinkSize) {
+ /* This has not been invalidated by a resize */
+- if(!yaffs_PutChunkIntoFile(in, tags.chunkId,
+- chunk, -1)){
++ if (!yaffs_PutChunkIntoFile(in, tags.chunkId,
++ chunk, -1)) {
+ alloc_failed = 1;
+ }
+
+@@ -6221,7 +6226,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ scannedFileSize;
+ }
+
+- } else if(in) {
++ } else if (in) {
+ /* This chunk has been invalidated by a resize, so delete */
+ yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
+
+@@ -6242,6 +6247,8 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ in = yaffs_FindOrCreateObjectByNumber
+ (dev, tags.objectId,
+ tags.extraObjectType);
++ if (!in)
++ alloc_failed = 1;
+ }
+
+ if (!in ||
+@@ -6251,8 +6258,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ tags.extraShadows ||
+ (!in->valid &&
+ (tags.objectId == YAFFS_OBJECTID_ROOT ||
+- tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))
+- ) {
++ tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))) {
+
+ /* If we don't have valid info then we need to read the chunk
+ * TODO In future we can probably defer reading the chunk and
+@@ -6266,8 +6272,17 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ oh = (yaffs_ObjectHeader *) chunkData;
+
+- if (!in)
++ if (dev->inbandTags) {
++ /* Fix up the header if they got corrupted by inband tags */
++ oh->shadowsObject = oh->inbandShadowsObject;
++ oh->isShrink = oh->inbandIsShrink;
++ }
++
++ if (!in) {
+ in = yaffs_FindOrCreateObjectByNumber(dev, tags.objectId, oh->type);
++ if (!in)
++ alloc_failed = 1;
++ }
+
+ }
+
+@@ -6275,10 +6290,9 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ /* TODO Hoosterman we have a problem! */
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+- ("yaffs tragedy: Could not make object for object %d "
+- "at chunk %d during scan"
++ ("yaffs tragedy: Could not make object for object %d at chunk %d during scan"
+ TENDSTR), tags.objectId, chunk));
+-
++ continue;
+ }
+
+ if (in->valid) {
+@@ -6289,10 +6303,9 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ if ((in->variantType == YAFFS_OBJECT_TYPE_FILE) &&
+ ((oh &&
+- oh-> type == YAFFS_OBJECT_TYPE_FILE)||
++ oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+ (tags.extraHeaderInfoAvailable &&
+- tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))
+- ) {
++ tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))) {
+ __u32 thisSize =
+ (oh) ? oh->fileSize : tags.
+ extraFileLength;
+@@ -6300,7 +6313,9 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ (oh) ? oh->
+ parentObjectId : tags.
+ extraParentObjectId;
+- unsigned isShrink =
++
++
++ isShrink =
+ (oh) ? oh->isShrink : tags.
+ extraIsShrinkHeader;
+
+@@ -6323,9 +6338,8 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ thisSize;
+ }
+
+- if (isShrink) {
++ if (isShrink)
+ bi->hasShrinkHeader = 1;
+- }
+
+ }
+ /* Use existing - destroy this one. */
+@@ -6333,6 +6347,17 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ }
+
++ if (!in->valid && in->variantType !=
++ (oh ? oh->type : tags.extraObjectType))
++ T(YAFFS_TRACE_ERROR, (
++ TSTR("yaffs tragedy: Bad object type, "
++ TCONT("%d != %d, for object %d at chunk ")
++ TCONT("%d during scan")
++ TENDSTR), oh ?
++ oh->type : tags.extraObjectType,
++ in->variantType, tags.objectId,
++ chunk));
++
+ if (!in->valid &&
+ (tags.objectId == YAFFS_OBJECTID_ROOT ||
+ tags.objectId ==
+@@ -6340,7 +6365,7 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+
+- if(oh) {
++ if (oh) {
+ in->variantType = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+@@ -6365,15 +6390,15 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ in->lazyLoaded = 1;
+ }
+
+- in->chunkId = chunk;
++ in->hdrChunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+- in->chunkId = chunk;
++ in->hdrChunk = chunk;
+
+- if(oh) {
++ if (oh) {
+ in->variantType = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+@@ -6403,20 +6428,19 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ yaffs_SetObjectName(in, oh->name);
+ parent =
+ yaffs_FindOrCreateObjectByNumber
+- (dev, oh->parentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
++ (dev, oh->parentObjectId,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ fileSize = oh->fileSize;
+- isShrink = oh->isShrink;
++ isShrink = oh->isShrink;
+ equivalentObjectId = oh->equivalentObjectId;
+
+- }
+- else {
++ } else {
+ in->variantType = tags.extraObjectType;
+ parent =
+ yaffs_FindOrCreateObjectByNumber
+- (dev, tags.extraParentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
++ (dev, tags.extraParentObjectId,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
+ fileSize = tags.extraFileLength;
+ isShrink = tags.extraIsShrinkHeader;
+ equivalentObjectId = tags.extraEquivalentObjectId;
+@@ -6425,29 +6449,30 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ }
+ in->dirty = 0;
+
++ if (!parent)
++ alloc_failed = 1;
++
+ /* directory stuff...
+ * hook up to parent
+ */
+
+- if (parent->variantType ==
++ if (parent && parent->variantType ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variantType =
+- YAFFS_OBJECT_TYPE_DIRECTORY;
+- INIT_LIST_HEAD(&parent->variant.
+- directoryVariant.
+- children);
+- } else if (parent->variantType !=
+- YAFFS_OBJECT_TYPE_DIRECTORY)
+- {
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ YINIT_LIST_HEAD(&parent->variant.
++ directoryVariant.
++ children);
++ } else if (!parent || parent->variantType !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+- ("yaffs tragedy: attempting to use non-directory as"
+- " a directory in scan. Put in lost+found."
++ ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ TENDSTR)));
+ parent = dev->lostNFoundDir;
+ }
+@@ -6494,12 +6519,12 @@ static int yaffs_ScanBackwards(yaffs_Dev
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+- if(!itsUnlinked) {
+- in->variant.hardLinkVariant.equivalentObjectId =
+- equivalentObjectId;
+- in->hardLinks.next =
+- (struct list_head *) hardList;
+- hardList = in;
++ if (!itsUnlinked) {
++ in->variant.hardLinkVariant.equivalentObjectId =
++ equivalentObjectId;
++ in->hardLinks.next =
++ (struct ylist_head *) hardList;
++ hardList = in;
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+@@ -6509,12 +6534,11 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+- if(oh){
+- in->variant.symLinkVariant.alias =
+- yaffs_CloneString(oh->
+- alias);
+- if(!in->variant.symLinkVariant.alias)
+- alloc_failed = 1;
++ if (oh) {
++ in->variant.symLinkVariant.alias =
++ yaffs_CloneString(oh->alias);
++ if (!in->variant.symLinkVariant.alias)
++ alloc_failed = 1;
+ }
+ break;
+ }
+@@ -6551,75 +6575,129 @@ static int yaffs_ScanBackwards(yaffs_Dev
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+- yaffs_HardlinkFixup(dev,hardList);
++ yaffs_HardlinkFixup(dev, hardList);
+
+
+- /*
+- * Sort out state of unlinked and deleted objects.
+- */
+- {
+- struct list_head *i;
+- struct list_head *n;
++ yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+
+- yaffs_Object *l;
++ if (alloc_failed)
++ return YAFFS_FAIL;
+
+- /* Soft delete all the unlinked files */
+- list_for_each_safe(i, n,
+- &dev->unlinkedDir->variant.directoryVariant.
+- children) {
+- if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
+- yaffs_DestroyObject(l);
+- }
+- }
++ T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
+
+- /* Soft delete all the deletedDir files */
+- list_for_each_safe(i, n,
+- &dev->deletedDir->variant.directoryVariant.
+- children) {
+- if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
+- yaffs_DestroyObject(l);
++ return YAFFS_OK;
++}
+
+- }
++/*------------------------------ Directory Functions ----------------------------- */
++
++static void yaffs_VerifyObjectInDirectory(yaffs_Object *obj)
++{
++ struct ylist_head *lh;
++ yaffs_Object *listObj;
++
++ int count = 0;
++
++ if (!obj) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("No object to verify" TENDSTR)));
++ YBUG();
++ return;
++ }
++
++ if (yaffs_SkipVerification(obj->myDev))
++ return;
++
++ if (!obj->parent) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object does not have parent" TENDSTR)));
++ YBUG();
++ return;
++ }
++
++ if (obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Parent is not directory" TENDSTR)));
++ YBUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ ylist_for_each(lh, &obj->parent->variant.directoryVariant.children) {
++ if (lh) {
++ listObj = ylist_entry(lh, yaffs_Object, siblings);
++ yaffs_VerifyObject(listObj);
++ if (obj == listObj)
++ count++;
+ }
++ }
++
++ if (count != 1) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory %d times" TENDSTR), count));
++ YBUG();
+ }
++}
+
+- yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++static void yaffs_VerifyDirectory(yaffs_Object *directory)
++{
++ struct ylist_head *lh;
++ yaffs_Object *listObj;
+
+- if(alloc_failed){
+- return YAFFS_FAIL;
++ if (!directory) {
++ YBUG();
++ return;
+ }
+
+- T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
++ if (yaffs_SkipFullVerification(directory->myDev))
++ return;
+
+- return YAFFS_OK;
++ if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Directory has wrong type: %d" TENDSTR), directory->variantType));
++ YBUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ ylist_for_each(lh, &directory->variant.directoryVariant.children) {
++ if (lh) {
++ listObj = ylist_entry(lh, yaffs_Object, siblings);
++ if (listObj->parent != directory) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory list has wrong parent %p" TENDSTR), listObj->parent));
++ YBUG();
++ }
++ yaffs_VerifyObjectInDirectory(listObj);
++ }
++ }
+ }
+
+-/*------------------------------ Directory Functions ----------------------------- */
+
+-static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj)
++static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj)
+ {
+ yaffs_Device *dev = obj->myDev;
++ yaffs_Object *parent;
++
++ yaffs_VerifyObjectInDirectory(obj);
++ parent = obj->parent;
++
++ yaffs_VerifyDirectory(parent);
+
+- if(dev && dev->removeObjectCallback)
++ if (dev && dev->removeObjectCallback)
+ dev->removeObjectCallback(obj);
+
+- list_del_init(&obj->siblings);
++
++ ylist_del_init(&obj->siblings);
+ obj->parent = NULL;
++
++ yaffs_VerifyDirectory(parent);
+ }
+
+
+-static void yaffs_AddObjectToDirectory(yaffs_Object * directory,
+- yaffs_Object * obj)
++static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
++ yaffs_Object *obj)
+ {
+-
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: Trying to add an object to a null pointer directory"
+ TENDSTR)));
+ YBUG();
++ return;
+ }
+ if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+@@ -6631,37 +6709,42 @@ static void yaffs_AddObjectToDirectory(y
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+- INIT_LIST_HEAD(&obj->siblings);
+-
+- } else if (!list_empty(&obj->siblings)) {
+- /* If it is holed up somewhere else, un hook it */
+- yaffs_RemoveObjectFromDirectory(obj);
++ YBUG();
+ }
++
++
++ yaffs_VerifyDirectory(directory);
++
++ yaffs_RemoveObjectFromDirectory(obj);
++
++
+ /* Now add it */
+- list_add(&obj->siblings, &directory->variant.directoryVariant.children);
++ ylist_add(&obj->siblings, &directory->variant.directoryVariant.children);
+ obj->parent = directory;
+
+ if (directory == obj->myDev->unlinkedDir
+- || directory == obj->myDev->deletedDir) {
++ || directory == obj->myDev->deletedDir) {
+ obj->unlinked = 1;
+ obj->myDev->nUnlinkedFiles++;
+ obj->renameAllowed = 0;
+ }
++
++ yaffs_VerifyDirectory(directory);
++ yaffs_VerifyObjectInDirectory(obj);
+ }
+
+-yaffs_Object *yaffs_FindObjectByName(yaffs_Object * directory,
+- const YCHAR * name)
++yaffs_Object *yaffs_FindObjectByName(yaffs_Object *directory,
++ const YCHAR *name)
+ {
+ int sum;
+
+- struct list_head *i;
++ struct ylist_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+ yaffs_Object *l;
+
+- if (!name) {
++ if (!name)
+ return NULL;
+- }
+
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+@@ -6669,6 +6752,7 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+ ("tragedy: yaffs_FindObjectByName: null pointer directory"
+ TENDSTR)));
+ YBUG();
++ return NULL;
+ }
+ if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+@@ -6679,28 +6763,27 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+
+ sum = yaffs_CalcNameSum(name);
+
+- list_for_each(i, &directory->variant.directoryVariant.children) {
++ ylist_for_each(i, &directory->variant.directoryVariant.children) {
+ if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
++ l = ylist_entry(i, yaffs_Object, siblings);
++
++ if (l->parent != directory)
++ YBUG();
+
+ yaffs_CheckObjectDetailsLoaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
+- if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0) {
++ if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0)
+ return l;
+- }
+- } else if (yaffs_SumCompare(l->sum, sum) || l->chunkId <= 0)
+- {
+- /* LostnFound cunk called Objxxx
++ } else if (yaffs_SumCompare(l->sum, sum) || l->hdrChunk <= 0) {
++ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_GetObjectName(l, buffer,
+ YAFFS_MAX_NAME_LENGTH);
+- if (yaffs_strncmp(name, buffer,YAFFS_MAX_NAME_LENGTH) == 0) {
++ if (yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
+ return l;
+- }
+-
+ }
+ }
+ }
+@@ -6710,10 +6793,10 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+
+
+ #if 0
+-int yaffs_ApplyToDirectoryChildren(yaffs_Object * theDir,
+- int (*fn) (yaffs_Object *))
++int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
++ int (*fn) (yaffs_Object *))
+ {
+- struct list_head *i;
++ struct ylist_head *i;
+ yaffs_Object *l;
+
+ if (!theDir) {
+@@ -6722,20 +6805,21 @@ int yaffs_ApplyToDirectoryChildren(yaffs
+ ("tragedy: yaffs_FindObjectByName: null pointer directory"
+ TENDSTR)));
+ YBUG();
++ return YAFFS_FAIL;
+ }
+ if (theDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
+ YBUG();
++ return YAFFS_FAIL;
+ }
+
+- list_for_each(i, &theDir->variant.directoryVariant.children) {
++ ylist_for_each(i, &theDir->variant.directoryVariant.children) {
+ if (i) {
+- l = list_entry(i, yaffs_Object, siblings);
+- if (l && !fn(l)) {
++ l = ylist_entry(i, yaffs_Object, siblings);
++ if (l && !fn(l))
+ return YAFFS_FAIL;
+- }
+ }
+ }
+
+@@ -6748,7 +6832,7 @@ int yaffs_ApplyToDirectoryChildren(yaffs
+ * actual object.
+ */
+
+-yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object * obj)
++yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj)
+ {
+ if (obj && obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+ /* We want the object id of the equivalent object, not this one */
+@@ -6756,10 +6840,9 @@ yaffs_Object *yaffs_GetEquivalentObject(
+ yaffs_CheckObjectDetailsLoaded(obj);
+ }
+ return obj;
+-
+ }
+
+-int yaffs_GetObjectName(yaffs_Object * obj, YCHAR * name, int buffSize)
++int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize)
+ {
+ memset(name, 0, buffSize * sizeof(YCHAR));
+
+@@ -6767,18 +6850,26 @@ int yaffs_GetObjectName(yaffs_Object * o
+
+ if (obj->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
+ yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffSize - 1);
+- } else if (obj->chunkId <= 0) {
++ } else if (obj->hdrChunk <= 0) {
+ YCHAR locName[20];
++ YCHAR numString[20];
++ YCHAR *x = &numString[19];
++ unsigned v = obj->objectId;
++ numString[19] = 0;
++ while (v > 0) {
++ x--;
++ *x = '0' + (v % 10);
++ v /= 10;
++ }
+ /* make up a name */
+- yaffs_sprintf(locName, _Y("%s%d"), YAFFS_LOSTNFOUND_PREFIX,
+- obj->objectId);
++ yaffs_strcpy(locName, YAFFS_LOSTNFOUND_PREFIX);
++ yaffs_strcat(locName, x);
+ yaffs_strncpy(name, locName, buffSize - 1);
+
+ }
+ #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- else if (obj->shortName[0]) {
++ else if (obj->shortName[0])
+ yaffs_strcpy(name, obj->shortName);
+- }
+ #endif
+ else {
+ int result;
+@@ -6788,9 +6879,9 @@ int yaffs_GetObjectName(yaffs_Object * o
+
+ memset(buffer, 0, obj->myDev->nDataBytesPerChunk);
+
+- if (obj->chunkId >= 0) {
++ if (obj->hdrChunk > 0) {
+ result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
+- obj->chunkId, buffer,
++ obj->hdrChunk, buffer,
+ NULL);
+ }
+ yaffs_strncpy(name, oh->name, buffSize - 1);
+@@ -6801,46 +6892,43 @@ int yaffs_GetObjectName(yaffs_Object * o
+ return yaffs_strlen(name);
+ }
+
+-int yaffs_GetObjectFileLength(yaffs_Object * obj)
++int yaffs_GetObjectFileLength(yaffs_Object *obj)
+ {
+-
+ /* Dereference any hard linking */
+ obj = yaffs_GetEquivalentObject(obj);
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
++ if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.fileVariant.fileSize;
+- }
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++ if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_strlen(obj->variant.symLinkVariant.alias);
+- } else {
++ else {
+ /* Only a directory should drop through to here */
+ return obj->myDev->nDataBytesPerChunk;
+ }
+ }
+
+-int yaffs_GetObjectLinkCount(yaffs_Object * obj)
++int yaffs_GetObjectLinkCount(yaffs_Object *obj)
+ {
+ int count = 0;
+- struct list_head *i;
++ struct ylist_head *i;
+
+- if (!obj->unlinked) {
+- count++; /* the object itself */
+- }
+- list_for_each(i, &obj->hardLinks) {
+- count++; /* add the hard links; */
+- }
+- return count;
++ if (!obj->unlinked)
++ count++; /* the object itself */
++
++ ylist_for_each(i, &obj->hardLinks)
++ count++; /* add the hard links; */
+
++ return count;
+ }
+
+-int yaffs_GetObjectInode(yaffs_Object * obj)
++int yaffs_GetObjectInode(yaffs_Object *obj)
+ {
+ obj = yaffs_GetEquivalentObject(obj);
+
+ return obj->objectId;
+ }
+
+-unsigned yaffs_GetObjectType(yaffs_Object * obj)
++unsigned yaffs_GetObjectType(yaffs_Object *obj)
+ {
+ obj = yaffs_GetEquivalentObject(obj);
+
+@@ -6872,19 +6960,18 @@ unsigned yaffs_GetObjectType(yaffs_Objec
+ }
+ }
+
+-YCHAR *yaffs_GetSymlinkAlias(yaffs_Object * obj)
++YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj)
+ {
+ obj = yaffs_GetEquivalentObject(obj);
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++ if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_CloneString(obj->variant.symLinkVariant.alias);
+- } else {
++ else
+ return yaffs_CloneString(_Y(""));
+- }
+ }
+
+ #ifndef CONFIG_YAFFS_WINCE
+
+-int yaffs_SetAttributes(yaffs_Object * obj, struct iattr *attr)
++int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr)
+ {
+ unsigned int valid = attr->ia_valid;
+
+@@ -6910,7 +6997,7 @@ int yaffs_SetAttributes(yaffs_Object * o
+ return YAFFS_OK;
+
+ }
+-int yaffs_GetAttributes(yaffs_Object * obj, struct iattr *attr)
++int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr)
+ {
+ unsigned int valid = 0;
+
+@@ -6934,13 +7021,12 @@ int yaffs_GetAttributes(yaffs_Object * o
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+-
+ }
+
+ #endif
+
+ #if 0
+-int yaffs_DumpObject(yaffs_Object * obj)
++int yaffs_DumpObject(yaffs_Object *obj)
+ {
+ YCHAR name[257];
+
+@@ -6951,7 +7037,7 @@ int yaffs_DumpObject(yaffs_Object * obj)
+ ("Object %d, inode %d \"%s\"\n dirty %d valid %d serial %d sum %d"
+ " chunk %d type %d size %d\n"
+ TENDSTR), obj->objectId, yaffs_GetObjectInode(obj), name,
+- obj->dirty, obj->valid, obj->serial, obj->sum, obj->chunkId,
++ obj->dirty, obj->valid, obj->serial, obj->sum, obj->hdrChunk,
+ yaffs_GetObjectType(obj), yaffs_GetObjectFileLength(obj)));
+
+ return YAFFS_OK;
+@@ -6960,7 +7046,7 @@ int yaffs_DumpObject(yaffs_Object * obj)
+
+ /*---------------------------- Initialisation code -------------------------------------- */
+
+-static int yaffs_CheckDevFunctions(const yaffs_Device * dev)
++static int yaffs_CheckDevFunctions(const yaffs_Device *dev)
+ {
+
+ /* Common functions, gotta have */
+@@ -7011,7 +7097,7 @@ static int yaffs_CreateInitialDirectorie
+ yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+- if(dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir){
++ if (dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir) {
+ yaffs_AddObjectToDirectory(dev->rootDir, dev->lostNFoundDir);
+ return YAFFS_OK;
+ }
+@@ -7019,7 +7105,7 @@ static int yaffs_CreateInitialDirectorie
+ return YAFFS_FAIL;
+ }
+
+-int yaffs_GutsInitialise(yaffs_Device * dev)
++int yaffs_GutsInitialise(yaffs_Device *dev)
+ {
+ int init_failed = 0;
+ unsigned x;
+@@ -7040,6 +7126,8 @@ int yaffs_GutsInitialise(yaffs_Device *
+ dev->chunkOffset = 0;
+ dev->nFreeChunks = 0;
+
++ dev->gcBlock = -1;
++
+ if (dev->startBlock == 0) {
+ dev->internalStartBlock = dev->startBlock + 1;
+ dev->internalEndBlock = dev->endBlock + 1;
+@@ -7049,18 +7137,18 @@ int yaffs_GutsInitialise(yaffs_Device *
+
+ /* Check geometry parameters. */
+
+- if ((dev->isYaffs2 && dev->nDataBytesPerChunk < 1024) ||
+- (!dev->isYaffs2 && dev->nDataBytesPerChunk != 512) ||
++ if ((!dev->inbandTags && dev->isYaffs2 && dev->totalBytesPerChunk < 1024) ||
++ (!dev->isYaffs2 && dev->totalBytesPerChunk < 512) ||
++ (dev->inbandTags && !dev->isYaffs2) ||
+ dev->nChunksPerBlock < 2 ||
+ dev->nReservedBlocks < 2 ||
+ dev->internalStartBlock <= 0 ||
+ dev->internalEndBlock <= 0 ||
+- dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2) // otherwise it is too small
+- ) {
++ dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2)) { /* otherwise it is too small */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s "
+- TENDSTR), dev->nDataBytesPerChunk, dev->isYaffs2 ? "2" : ""));
++ ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inbandTags %d "
++ TENDSTR), dev->totalBytesPerChunk, dev->isYaffs2 ? "2" : "", dev->inbandTags));
+ return YAFFS_FAIL;
+ }
+
+@@ -7070,6 +7158,12 @@ int yaffs_GutsInitialise(yaffs_Device *
+ return YAFFS_FAIL;
+ }
+
++ /* Sort out space for inband tags, if required */
++ if (dev->inbandTags)
++ dev->nDataBytesPerChunk = dev->totalBytesPerChunk - sizeof(yaffs_PackedTags2TagsPart);
++ else
++ dev->nDataBytesPerChunk = dev->totalBytesPerChunk;
++
+ /* Got the right mix of functions? */
+ if (!yaffs_CheckDevFunctions(dev)) {
+ /* Function missing */
+@@ -7097,31 +7191,18 @@ int yaffs_GutsInitialise(yaffs_Device *
+
+ dev->isMounted = 1;
+
+-
+-
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+- /* Start off assuming it is a power of 2 */
+- dev->chunkShift = ShiftDiv(dev->nDataBytesPerChunk);
+- dev->chunkMask = (1<<dev->chunkShift) - 1;
+-
+- if(dev->nDataBytesPerChunk == (dev->chunkMask + 1)){
+- /* Yes it is a power of 2, disable crumbs */
+- dev->crumbMask = 0;
+- dev->crumbShift = 0;
+- dev->crumbsPerChunk = 0;
+- } else {
+- /* Not a power of 2, use crumbs instead */
+- dev->crumbShift = ShiftDiv(sizeof(yaffs_PackedTags2TagsPart));
+- dev->crumbMask = (1<<dev->crumbShift)-1;
+- dev->crumbsPerChunk = dev->nDataBytesPerChunk/(1 << dev->crumbShift);
+- dev->chunkShift = 0;
+- dev->chunkMask = 0;
+- }
+-
++ x = dev->nDataBytesPerChunk;
++ /* We always use dev->chunkShift and dev->chunkDiv */
++ dev->chunkShift = Shifts(x);
++ x >>= dev->chunkShift;
++ dev->chunkDiv = x;
++ /* We only use chunk mask if chunkDiv is 1 */
++ dev->chunkMask = (1<<dev->chunkShift) - 1;
+
+ /*
+ * Calculate chunkGroupBits.
+@@ -7133,16 +7214,15 @@ int yaffs_GutsInitialise(yaffs_Device *
+ bits = ShiftsGE(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+- if(!dev->wideTnodesDisabled){
++ if (!dev->wideTnodesDisabled) {
+ /* bits must be even so that we end up with 32-bit words */
+- if(bits & 1)
++ if (bits & 1)
+ bits++;
+- if(bits < 16)
++ if (bits < 16)
+ dev->tnodeWidth = 16;
+ else
+ dev->tnodeWidth = bits;
+- }
+- else
++ } else
+ dev->tnodeWidth = 16;
+
+ dev->tnodeMask = (1<<dev->tnodeWidth)-1;
+@@ -7193,7 +7273,7 @@ int yaffs_GutsInitialise(yaffs_Device *
+ dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
+
+ /* Initialise temporary buffers and caches. */
+- if(!yaffs_InitialiseTempBuffers(dev))
++ if (!yaffs_InitialiseTempBuffers(dev))
+ init_failed = 1;
+
+ dev->srCache = NULL;
+@@ -7203,25 +7283,26 @@ int yaffs_GutsInitialise(yaffs_Device *
+ if (!init_failed &&
+ dev->nShortOpCaches > 0) {
+ int i;
+- __u8 *buf;
++ void *buf;
+ int srCacheBytes = dev->nShortOpCaches * sizeof(yaffs_ChunkCache);
+
+- if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES) {
++ if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
+- }
+
+- buf = dev->srCache = YMALLOC(srCacheBytes);
++ dev->srCache = YMALLOC(srCacheBytes);
+
+- if(dev->srCache)
+- memset(dev->srCache,0,srCacheBytes);
++ buf = (__u8 *) dev->srCache;
++
++ if (dev->srCache)
++ memset(dev->srCache, 0, srCacheBytes);
+
+ for (i = 0; i < dev->nShortOpCaches && buf; i++) {
+ dev->srCache[i].object = NULL;
+ dev->srCache[i].lastUse = 0;
+ dev->srCache[i].dirty = 0;
+- dev->srCache[i].data = buf = YMALLOC_DMA(dev->nDataBytesPerChunk);
++ dev->srCache[i].data = buf = YMALLOC_DMA(dev->totalBytesPerChunk);
+ }
+- if(!buf)
++ if (!buf)
+ init_failed = 1;
+
+ dev->srLastUse = 0;
+@@ -7229,29 +7310,30 @@ int yaffs_GutsInitialise(yaffs_Device *
+
+ dev->cacheHits = 0;
+
+- if(!init_failed){
++ if (!init_failed) {
+ dev->gcCleanupList = YMALLOC(dev->nChunksPerBlock * sizeof(__u32));
+- if(!dev->gcCleanupList)
++ if (!dev->gcCleanupList)
+ init_failed = 1;
+ }
+
+- if (dev->isYaffs2) {
++ if (dev->isYaffs2)
+ dev->useHeaderFileSize = 1;
+- }
+- if(!init_failed && !yaffs_InitialiseBlocks(dev))
++
++ if (!init_failed && !yaffs_InitialiseBlocks(dev))
+ init_failed = 1;
+
+ yaffs_InitialiseTnodes(dev);
+ yaffs_InitialiseObjects(dev);
+
+- if(!init_failed && !yaffs_CreateInitialDirectories(dev))
++ if (!init_failed && !yaffs_CreateInitialDirectories(dev))
+ init_failed = 1;
+
+
+- if(!init_failed){
++ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->isYaffs2) {
+- if(yaffs_CheckpointRestore(dev)) {
++ if (yaffs_CheckpointRestore(dev)) {
++ yaffs_CheckObjectDetailsLoaded(dev->rootDir);
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: restored from checkpoint" TENDSTR)));
+ } else {
+@@ -7273,24 +7355,25 @@ int yaffs_GutsInitialise(yaffs_Device *
+ dev->nBackgroundDeletions = 0;
+ dev->oldestDirtySequence = 0;
+
+- if(!init_failed && !yaffs_InitialiseBlocks(dev))
++ if (!init_failed && !yaffs_InitialiseBlocks(dev))
+ init_failed = 1;
+
+ yaffs_InitialiseTnodes(dev);
+ yaffs_InitialiseObjects(dev);
+
+- if(!init_failed && !yaffs_CreateInitialDirectories(dev))
++ if (!init_failed && !yaffs_CreateInitialDirectories(dev))
+ init_failed = 1;
+
+- if(!init_failed && !yaffs_ScanBackwards(dev))
++ if (!init_failed && !yaffs_ScanBackwards(dev))
+ init_failed = 1;
+ }
+- }else
+- if(!yaffs_Scan(dev))
++ } else if (!yaffs_Scan(dev))
+ init_failed = 1;
++
++ yaffs_StripDeletedObjects(dev);
+ }
+
+- if(init_failed){
++ if (init_failed) {
+ /* Clean up the mess */
+ T(YAFFS_TRACE_TRACING,
+ (TSTR("yaffs: yaffs_GutsInitialise() aborted.\n" TENDSTR)));
+@@ -7318,7 +7401,7 @@ int yaffs_GutsInitialise(yaffs_Device *
+
+ }
+
+-void yaffs_Deinitialise(yaffs_Device * dev)
++void yaffs_Deinitialise(yaffs_Device *dev)
+ {
+ if (dev->isMounted) {
+ int i;
+@@ -7330,7 +7413,7 @@ void yaffs_Deinitialise(yaffs_Device * d
+ dev->srCache) {
+
+ for (i = 0; i < dev->nShortOpCaches; i++) {
+- if(dev->srCache[i].data)
++ if (dev->srCache[i].data)
+ YFREE(dev->srCache[i].data);
+ dev->srCache[i].data = NULL;
+ }
+@@ -7341,16 +7424,17 @@ void yaffs_Deinitialise(yaffs_Device * d
+
+ YFREE(dev->gcCleanupList);
+
+- for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ YFREE(dev->tempBuffer[i].buffer);
+- }
+
+ dev->isMounted = 0;
+- }
+
++ if (dev->deinitialiseNAND)
++ dev->deinitialiseNAND(dev);
++ }
+ }
+
+-static int yaffs_CountFreeChunks(yaffs_Device * dev)
++static int yaffs_CountFreeChunks(yaffs_Device *dev)
+ {
+ int nFree;
+ int b;
+@@ -7358,7 +7442,7 @@ static int yaffs_CountFreeChunks(yaffs_D
+ yaffs_BlockInfo *blk;
+
+ for (nFree = 0, b = dev->internalStartBlock; b <= dev->internalEndBlock;
+- b++) {
++ b++) {
+ blk = yaffs_GetBlockInfo(dev, b);
+
+ switch (blk->blockState) {
+@@ -7373,19 +7457,19 @@ static int yaffs_CountFreeChunks(yaffs_D
+ default:
+ break;
+ }
+-
+ }
+
+ return nFree;
+ }
+
+-int yaffs_GetNumberOfFreeChunks(yaffs_Device * dev)
++int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev)
+ {
+ /* This is what we report to the outside world */
+
+ int nFree;
+ int nDirtyCacheChunks;
+ int blocksForCheckpoint;
++ int i;
+
+ #if 1
+ nFree = dev->nFreeChunks;
+@@ -7397,12 +7481,9 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+
+ /* Now count the number of dirty chunks in the cache and subtract those */
+
+- {
+- int i;
+- for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].dirty)
+- nDirtyCacheChunks++;
+- }
++ for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
++ if (dev->srCache[i].dirty)
++ nDirtyCacheChunks++;
+ }
+
+ nFree -= nDirtyCacheChunks;
+@@ -7410,8 +7491,8 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+ nFree -= ((dev->nReservedBlocks + 1) * dev->nChunksPerBlock);
+
+ /* Now we figure out how much to reserve for the checkpoint and report that... */
+- blocksForCheckpoint = dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint;
+- if(blocksForCheckpoint < 0)
++ blocksForCheckpoint = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
++ if (blocksForCheckpoint < 0)
+ blocksForCheckpoint = 0;
+
+ nFree -= (blocksForCheckpoint * dev->nChunksPerBlock);
+@@ -7425,12 +7506,12 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+
+ static int yaffs_freeVerificationFailures;
+
+-static void yaffs_VerifyFreeChunks(yaffs_Device * dev)
++static void yaffs_VerifyFreeChunks(yaffs_Device *dev)
+ {
+ int counted;
+ int difference;
+
+- if(yaffs_SkipVerification(dev))
++ if (yaffs_SkipVerification(dev))
+ return;
+
+ counted = yaffs_CountFreeChunks(dev);
+@@ -7447,23 +7528,25 @@ static void yaffs_VerifyFreeChunks(yaffs
+
+ /*---------------------------------------- YAFFS test code ----------------------*/
+
+-#define yaffs_CheckStruct(structure,syze, name) \
+- if(sizeof(structure) != syze) \
+- { \
+- T(YAFFS_TRACE_ALWAYS,(TSTR("%s should be %d but is %d\n" TENDSTR),\
+- name,syze,sizeof(structure))); \
+- return YAFFS_FAIL; \
+- }
++#define yaffs_CheckStruct(structure, syze, name) \
++ do { \
++ if (sizeof(structure) != syze) { \
++ T(YAFFS_TRACE_ALWAYS, (TSTR("%s should be %d but is %d\n" TENDSTR),\
++ name, syze, sizeof(structure))); \
++ return YAFFS_FAIL; \
++ } \
++ } while (0)
+
+ static int yaffs_CheckStructures(void)
+ {
+-/* yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags") */
+-/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion") */
+-/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare") */
++/* yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags"); */
++/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion"); */
++/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare"); */
+ #ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode")
++ yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode");
+ #endif
+- yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader")
+-
+- return YAFFS_OK;
++#ifndef CONFIG_YAFFS_WINCE
++ yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader");
++#endif
++ return YAFFS_OK;
+ }
+--- a/fs/yaffs2/yaffs_guts.h
++++ b/fs/yaffs2/yaffs_guts.h
+@@ -90,7 +90,7 @@
+
+ #define YAFFS_MAX_SHORT_OP_CACHES 20
+
+-#define YAFFS_N_TEMP_BUFFERS 4
++#define YAFFS_N_TEMP_BUFFERS 6
+
+ /* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+@@ -108,6 +108,9 @@
+ #define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+ #define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xEFFFFF00
+
++/* Special sequence number for bad block that failed to be marked bad */
++#define YAFFS_SEQUENCE_BAD_BLOCK 0xFFFF0000
++
+ /* ChunkCache is used for short read/write operations.*/
+ typedef struct {
+ struct yaffs_ObjectStruct *object;
+@@ -134,11 +137,10 @@ typedef struct {
+ typedef struct {
+ unsigned chunkId:20;
+ unsigned serialNumber:2;
+- unsigned byteCount:10;
++ unsigned byteCountLSB:10;
+ unsigned objectId:18;
+ unsigned ecc:12;
+- unsigned unusedStuff:2;
+-
++ unsigned byteCountMSB:2;
+ } yaffs_Tags;
+
+ typedef union {
+@@ -277,13 +279,13 @@ typedef struct {
+
+ int softDeletions:10; /* number of soft deleted pages */
+ int pagesInUse:10; /* number of pages in use */
+- yaffs_BlockState blockState:4; /* One of the above block states */
++ unsigned blockState:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
+ __u32 needsRetiring:1; /* Data has failed on this block, need to get valid data off */
+- /* and retire the block. */
+- __u32 skipErasedCheck: 1; /* If this is set we can skip the erased check on this block */
+- __u32 gcPrioritise: 1; /* An ECC check or blank check has failed on this block.
++ /* and retire the block. */
++ __u32 skipErasedCheck:1; /* If this is set we can skip the erased check on this block */
++ __u32 gcPrioritise:1; /* An ECC check or blank check has failed on this block.
+ It should be prioritised for GC */
+- __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
++ __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+ __u32 hasShrinkHeader:1; /* This block has at least one shrink object header */
+@@ -300,11 +302,11 @@ typedef struct {
+
+ /* Apply to everything */
+ int parentObjectId;
+- __u16 sum__NoLongerUsed; /* checksum of name. No longer used */
++ __u16 sum__NoLongerUsed; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+- /* Thes following apply to directories, files, symlinks - not hard links */
+- __u32 yst_mode; /* protection */
++ /* The following apply to directories, files, symlinks - not hard links */
++ __u32 yst_mode; /* protection */
+
+ #ifdef CONFIG_YAFFS_WINCE
+ __u32 notForWinCE[5];
+@@ -331,11 +333,14 @@ typedef struct {
+ __u32 win_ctime[2];
+ __u32 win_atime[2];
+ __u32 win_mtime[2];
+- __u32 roomToGrow[4];
+ #else
+- __u32 roomToGrow[10];
++ __u32 roomToGrow[6];
++
+ #endif
++ __u32 inbandShadowsObject;
++ __u32 inbandIsShrink;
+
++ __u32 reservedSpace[2];
+ int shadowsObject; /* This object header shadows the specified object if > 0 */
+
+ /* isShrink applies to object headers written when we shrink the file (ie resize) */
+@@ -381,7 +386,7 @@ typedef struct {
+ } yaffs_FileStructure;
+
+ typedef struct {
+- struct list_head children; /* list of child links */
++ struct ylist_head children; /* list of child links */
+ } yaffs_DirectoryStructure;
+
+ typedef struct {
+@@ -418,23 +423,24 @@ struct yaffs_ObjectStruct {
+ * still in the inode cache. Free of object is defered.
+ * until the inode is released.
+ */
++ __u8 beingCreated:1; /* This object is still being created so skip some checks. */
+
+ __u8 serial; /* serial number of chunk in NAND. Cached here */
+ __u16 sum; /* sum of the name to speed searching */
+
+- struct yaffs_DeviceStruct *myDev; /* The device I'm on */
++ struct yaffs_DeviceStruct *myDev; /* The device I'm on */
+
+- struct list_head hashLink; /* list of objects in this hash bucket */
++ struct ylist_head hashLink; /* list of objects in this hash bucket */
+
+- struct list_head hardLinks; /* all the equivalent hard linked objects */
++ struct ylist_head hardLinks; /* all the equivalent hard linked objects */
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_ObjectStruct *parent;
+- struct list_head siblings;
++ struct ylist_head siblings;
+
+ /* Where's my object header in NAND? */
+- int chunkId;
++ int hdrChunk;
+
+ int nDataChunks; /* Number of data chunks attached to the file. */
+
+@@ -485,7 +491,7 @@ struct yaffs_ObjectList_struct {
+ typedef struct yaffs_ObjectList_struct yaffs_ObjectList;
+
+ typedef struct {
+- struct list_head list;
++ struct ylist_head list;
+ int count;
+ } yaffs_ObjectBucket;
+
+@@ -495,11 +501,10 @@ typedef struct {
+ */
+
+ typedef struct {
+- int structType;
++ int structType;
+ __u32 objectId;
+ __u32 parentId;
+- int chunkId;
+-
++ int hdrChunk;
+ yaffs_ObjectType variantType:3;
+ __u8 deleted:1;
+ __u8 softDeleted:1;
+@@ -511,8 +516,7 @@ typedef struct {
+
+ int nDataChunks;
+ __u32 fileSizeOrEquivalentObjectId;
+-
+-}yaffs_CheckpointObject;
++} yaffs_CheckpointObject;
+
+ /*--------------------- Temporary buffers ----------------
+ *
+@@ -528,13 +532,13 @@ typedef struct {
+ /*----------------- Device ---------------------------------*/
+
+ struct yaffs_DeviceStruct {
+- struct list_head devList;
++ struct ylist_head devList;
+ const char *name;
+
+ /* Entry parameters set up way early. Yaffs sets up the rest.*/
+ int nDataBytesPerChunk; /* Should be a power of 2 >= 512 */
+ int nChunksPerBlock; /* does not need to be a power of 2 */
+- int nBytesPerSpare; /* spare area size */
++ int spareBytesPerChunk; /* spare area size */
+ int startBlock; /* Start block we're allowed to use */
+ int endBlock; /* End block we're allowed to use */
+ int nReservedBlocks; /* We want this tuneable so that we can reduce */
+@@ -544,9 +548,7 @@ struct yaffs_DeviceStruct {
+ /* Stuff used by the shared space checkpointing mechanism */
+ /* If this value is zero, then this mechanism is disabled */
+
+- int nCheckpointReservedBlocks; /* Blocks to reserve for checkpoint data */
+-
+-
++/* int nCheckpointReservedBlocks; */ /* Blocks to reserve for checkpoint data */
+
+
+ int nShortOpCaches; /* If <= 0, then short op caching is disabled, else
+@@ -560,30 +562,31 @@ struct yaffs_DeviceStruct {
+ void *genericDevice; /* Pointer to device context
+ * On an mtd this holds the mtd pointer.
+ */
+- void *superBlock;
++ void *superBlock;
+
+ /* NAND access functions (Must be set before calling YAFFS)*/
+
+- int (*writeChunkToNAND) (struct yaffs_DeviceStruct * dev,
+- int chunkInNAND, const __u8 * data,
+- const yaffs_Spare * spare);
+- int (*readChunkFromNAND) (struct yaffs_DeviceStruct * dev,
+- int chunkInNAND, __u8 * data,
+- yaffs_Spare * spare);
+- int (*eraseBlockInNAND) (struct yaffs_DeviceStruct * dev,
+- int blockInNAND);
+- int (*initialiseNAND) (struct yaffs_DeviceStruct * dev);
++ int (*writeChunkToNAND) (struct yaffs_DeviceStruct *dev,
++ int chunkInNAND, const __u8 *data,
++ const yaffs_Spare *spare);
++ int (*readChunkFromNAND) (struct yaffs_DeviceStruct *dev,
++ int chunkInNAND, __u8 *data,
++ yaffs_Spare *spare);
++ int (*eraseBlockInNAND) (struct yaffs_DeviceStruct *dev,
++ int blockInNAND);
++ int (*initialiseNAND) (struct yaffs_DeviceStruct *dev);
++ int (*deinitialiseNAND) (struct yaffs_DeviceStruct *dev);
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+- int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct * dev,
+- int chunkInNAND, const __u8 * data,
+- const yaffs_ExtendedTags * tags);
+- int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct * dev,
+- int chunkInNAND, __u8 * data,
+- yaffs_ExtendedTags * tags);
+- int (*markNANDBlockBad) (struct yaffs_DeviceStruct * dev, int blockNo);
+- int (*queryNANDBlock) (struct yaffs_DeviceStruct * dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber);
++ int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct *dev,
++ int chunkInNAND, const __u8 *data,
++ const yaffs_ExtendedTags *tags);
++ int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct *dev,
++ int chunkInNAND, __u8 *data,
++ yaffs_ExtendedTags *tags);
++ int (*markNANDBlockBad) (struct yaffs_DeviceStruct *dev, int blockNo);
++ int (*queryNANDBlock) (struct yaffs_DeviceStruct *dev, int blockNo,
++ yaffs_BlockState *state, __u32 *sequenceNumber);
+ #endif
+
+ int isYaffs2;
+@@ -595,10 +598,12 @@ struct yaffs_DeviceStruct {
+ void (*removeObjectCallback)(struct yaffs_ObjectStruct *obj);
+
+ /* Callback to mark the superblock dirsty */
+- void (*markSuperBlockDirty)(void * superblock);
++ void (*markSuperBlockDirty)(void *superblock);
+
+ int wideTnodesDisabled; /* Set to disable wide tnodes */
+
++ YCHAR *pathDividers; /* String of legal path dividers */
++
+
+ /* End of stuff that must be set before initialisation. */
+
+@@ -615,16 +620,14 @@ struct yaffs_DeviceStruct {
+ __u32 tnodeWidth;
+ __u32 tnodeMask;
+
+- /* Stuff to support various file offses to chunk/offset translations */
+- /* "Crumbs" for nDataBytesPerChunk not being a power of 2 */
+- __u32 crumbMask;
+- __u32 crumbShift;
+- __u32 crumbsPerChunk;
+-
+- /* Straight shifting for nDataBytesPerChunk being a power of 2 */
+- __u32 chunkShift;
+- __u32 chunkMask;
+-
++ /* Stuff for figuring out file offset to chunk conversions */
++ __u32 chunkShift; /* Shift value */
++ __u32 chunkDiv; /* Divisor after shifting: 1 for power-of-2 sizes */
++ __u32 chunkMask; /* Mask to use for power-of-2 case */
++
++ /* Stuff to handle inband tags */
++ int inbandTags;
++ __u32 totalBytesPerChunk;
+
+ #ifdef __KERNEL__
+
+@@ -633,7 +636,7 @@ struct yaffs_DeviceStruct {
+ __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer
+ * at compile time so we have to allocate it.
+ */
+- void (*putSuperFunc) (struct super_block * sb);
++ void (*putSuperFunc) (struct super_block *sb);
+ #endif
+
+ int isMounted;
+@@ -663,6 +666,8 @@ struct yaffs_DeviceStruct {
+ __u32 checkpointSum;
+ __u32 checkpointXor;
+
++ int nCheckpointBlocksRequired; /* Number of blocks needed to store current checkpoint set */
++
+ /* Block Info */
+ yaffs_BlockInfo *blockInfo;
+ __u8 *chunkBits; /* bitmap of chunks in use */
+@@ -684,11 +689,15 @@ struct yaffs_DeviceStruct {
+ yaffs_TnodeList *allocatedTnodeList;
+
+ int isDoingGC;
++ int gcBlock;
++ int gcChunk;
+
+ int nObjectsCreated;
+ yaffs_Object *freeObjects;
+ int nFreeObjects;
+
++ int nHardLinks;
++
+ yaffs_ObjectList *allocatedObjectList;
+
+ yaffs_ObjectBucket objectBucket[YAFFS_NOBJECT_BUCKETS];
+@@ -745,8 +754,10 @@ struct yaffs_DeviceStruct {
+ int nBackgroundDeletions; /* Count of background deletions. */
+
+
++ /* Temporary buffer management */
+ yaffs_TempBuffer tempBuffer[YAFFS_N_TEMP_BUFFERS];
+ int maxTemp;
++ int tempInUse;
+ int unmanagedTempAllocations;
+ int unmanagedTempDeallocations;
+
+@@ -758,9 +769,9 @@ struct yaffs_DeviceStruct {
+
+ typedef struct yaffs_DeviceStruct yaffs_Device;
+
+-/* The static layout of bllock usage etc is stored in the super block header */
++/* The static layout of block usage etc is stored in the super block header */
+ typedef struct {
+- int StructType;
++ int StructType;
+ int version;
+ int checkpointStartBlock;
+ int checkpointEndBlock;
+@@ -773,7 +784,7 @@ typedef struct {
+ * must be preserved over unmount/mount cycles.
+ */
+ typedef struct {
+- int structType;
++ int structType;
+ int nErasedBlocks;
+ int allocationBlock; /* Current block being allocated off */
+ __u32 allocationPage;
+@@ -791,57 +802,45 @@ typedef struct {
+
+
+ typedef struct {
+- int structType;
+- __u32 magic;
+- __u32 version;
+- __u32 head;
++ int structType;
++ __u32 magic;
++ __u32 version;
++ __u32 head;
+ } yaffs_CheckpointValidity;
+
+-/* Function to manipulate block info */
+-static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
+-{
+- if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
+- blk));
+- YBUG();
+- }
+- return &dev->blockInfo[blk - dev->internalStartBlock];
+-}
+
+ /*----------------------- YAFFS Functions -----------------------*/
+
+-int yaffs_GutsInitialise(yaffs_Device * dev);
+-void yaffs_Deinitialise(yaffs_Device * dev);
++int yaffs_GutsInitialise(yaffs_Device *dev);
++void yaffs_Deinitialise(yaffs_Device *dev);
+
+-int yaffs_GetNumberOfFreeChunks(yaffs_Device * dev);
++int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev);
+
+-int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName,
+- yaffs_Object * newDir, const YCHAR * newName);
++int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
++ yaffs_Object *newDir, const YCHAR *newName);
+
+-int yaffs_Unlink(yaffs_Object * dir, const YCHAR * name);
+-int yaffs_DeleteFile(yaffs_Object * obj);
++int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name);
++int yaffs_DeleteObject(yaffs_Object *obj);
+
+-int yaffs_GetObjectName(yaffs_Object * obj, YCHAR * name, int buffSize);
+-int yaffs_GetObjectFileLength(yaffs_Object * obj);
+-int yaffs_GetObjectInode(yaffs_Object * obj);
+-unsigned yaffs_GetObjectType(yaffs_Object * obj);
+-int yaffs_GetObjectLinkCount(yaffs_Object * obj);
++int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize);
++int yaffs_GetObjectFileLength(yaffs_Object *obj);
++int yaffs_GetObjectInode(yaffs_Object *obj);
++unsigned yaffs_GetObjectType(yaffs_Object *obj);
++int yaffs_GetObjectLinkCount(yaffs_Object *obj);
+
+-int yaffs_SetAttributes(yaffs_Object * obj, struct iattr *attr);
+-int yaffs_GetAttributes(yaffs_Object * obj, struct iattr *attr);
++int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr);
++int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr);
+
+ /* File operations */
+-int yaffs_ReadDataFromFile(yaffs_Object * obj, __u8 * buffer, loff_t offset,
+- int nBytes);
+-int yaffs_WriteDataToFile(yaffs_Object * obj, const __u8 * buffer, loff_t offset,
+- int nBytes, int writeThrough);
+-int yaffs_ResizeFile(yaffs_Object * obj, loff_t newSize);
+-
+-yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid);
+-int yaffs_FlushFile(yaffs_Object * obj, int updateTime);
++int yaffs_ReadDataFromFile(yaffs_Object *obj, __u8 *buffer, loff_t offset,
++ int nBytes);
++int yaffs_WriteDataToFile(yaffs_Object *obj, const __u8 *buffer, loff_t offset,
++ int nBytes, int writeThrough);
++int yaffs_ResizeFile(yaffs_Object *obj, loff_t newSize);
++
++yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid);
++int yaffs_FlushFile(yaffs_Object *obj, int updateTime);
+
+ /* Flushing and checkpointing */
+ void yaffs_FlushEntireDeviceCache(yaffs_Device *dev);
+@@ -850,33 +849,33 @@ int yaffs_CheckpointSave(yaffs_Device *d
+ int yaffs_CheckpointRestore(yaffs_Device *dev);
+
+ /* Directory operations */
+-yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name,
+- __u32 mode, __u32 uid, __u32 gid);
+-yaffs_Object *yaffs_FindObjectByName(yaffs_Object * theDir, const YCHAR * name);
+-int yaffs_ApplyToDirectoryChildren(yaffs_Object * theDir,
++yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid);
++yaffs_Object *yaffs_FindObjectByName(yaffs_Object *theDir, const YCHAR *name);
++int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
+ int (*fn) (yaffs_Object *));
+
+-yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number);
++yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number);
+
+ /* Link operations */
+-yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name,
+- yaffs_Object * equivalentObject);
++yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
++ yaffs_Object *equivalentObject);
+
+-yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object * obj);
++yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj);
+
+ /* Symlink operations */
+-yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name,
++yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid,
+- const YCHAR * alias);
+-YCHAR *yaffs_GetSymlinkAlias(yaffs_Object * obj);
++ const YCHAR *alias);
++YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj);
+
+ /* Special inodes (fifos, sockets and devices) */
+-yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name,
++yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid, __u32 rdev);
+
+ /* Special directories */
+-yaffs_Object *yaffs_Root(yaffs_Device * dev);
+-yaffs_Object *yaffs_LostNFound(yaffs_Device * dev);
++yaffs_Object *yaffs_Root(yaffs_Device *dev);
++yaffs_Object *yaffs_LostNFound(yaffs_Device *dev);
+
+ #ifdef CONFIG_YAFFS_WINCE
+ /* CONFIG_YAFFS_WINCE special stuff */
+@@ -885,18 +884,21 @@ void yfsd_WinFileTimeNow(__u32 target[2]
+
+ #ifdef __KERNEL__
+
+-void yaffs_HandleDeferedFree(yaffs_Object * obj);
++void yaffs_HandleDeferedFree(yaffs_Object *obj);
+ #endif
+
+ /* Debug dump */
+-int yaffs_DumpObject(yaffs_Object * obj);
++int yaffs_DumpObject(yaffs_Object *obj);
+
+-void yaffs_GutsTest(yaffs_Device * dev);
++void yaffs_GutsTest(yaffs_Device *dev);
+
+ /* A few useful functions */
+-void yaffs_InitialiseTags(yaffs_ExtendedTags * tags);
+-void yaffs_DeleteChunk(yaffs_Device * dev, int chunkId, int markNAND, int lyn);
+-int yaffs_CheckFF(__u8 * buffer, int nBytes);
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
++void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn);
++int yaffs_CheckFF(__u8 *buffer, int nBytes);
+ void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi);
+
++__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo);
++void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer, int lineNo);
++
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif1.c
++++ b/fs/yaffs2/yaffs_mtdif1.c
+@@ -26,7 +26,7 @@
+ #include "yportenv.h"
+ #include "yaffs_guts.h"
+ #include "yaffs_packedtags1.h"
+-#include "yaffs_tagscompat.h" // for yaffs_CalcTagsECC
++#include "yaffs_tagscompat.h" /* for yaffs_CalcTagsECC */
+
+ #include "linux/kernel.h"
+ #include "linux/version.h"
+@@ -34,9 +34,9 @@
+ #include "linux/mtd/mtd.h"
+
+ /* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+
+-const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.3 2007/05/15 20:16:11 ian Exp $";
++const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.10 2009-03-09 07:41:10 charles Exp $";
+
+ #ifndef CONFIG_YAFFS_9BYTE_TAGS
+ # define YTAG1_SIZE 8
+@@ -89,9 +89,9 @@ static struct nand_ecclayout nand_oob_16
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+ int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * etags)
++ int chunkInNAND, const __u8 *data, const yaffs_ExtendedTags *etags)
+ {
+- struct mtd_info * mtd = dev->genericDevice;
++ struct mtd_info *mtd = dev->genericDevice;
+ int chunkBytes = dev->nDataBytesPerChunk;
+ loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+ struct mtd_oob_ops ops;
+@@ -146,7 +146,7 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya
+
+ /* Return with empty ExtendedTags but add eccResult.
+ */
+-static int rettags(yaffs_ExtendedTags * etags, int eccResult, int retval)
++static int rettags(yaffs_ExtendedTags *etags, int eccResult, int retval)
+ {
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+@@ -169,9 +169,9 @@ static int rettags(yaffs_ExtendedTags *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+ int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND, __u8 * data, yaffs_ExtendedTags * etags)
++ int chunkInNAND, __u8 *data, yaffs_ExtendedTags *etags)
+ {
+- struct mtd_info * mtd = dev->genericDevice;
++ struct mtd_info *mtd = dev->genericDevice;
+ int chunkBytes = dev->nDataBytesPerChunk;
+ loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+@@ -189,7 +189,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ ops.datbuf = data;
+ ops.oobbuf = (__u8 *)&pt1;
+
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
+ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+ */
+@@ -284,11 +284,11 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ */
+ int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
+ {
+- struct mtd_info * mtd = dev->genericDevice;
++ struct mtd_info *mtd = dev->genericDevice;
+ int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
+ int retval;
+
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", blockNo);
++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad\n", blockNo);
+
+ retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+@@ -298,7 +298,7 @@ int nandmtd1_MarkNANDBlockBad(struct yaf
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+-static int nandmtd1_TestPrerequists(struct mtd_info * mtd)
++static int nandmtd1_TestPrerequists(struct mtd_info *mtd)
+ {
+ /* 2.6.18 has mtd->ecclayout->oobavail */
+ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+@@ -323,10 +323,11 @@ static int nandmtd1_TestPrerequists(stru
+ * Always returns YAFFS_OK.
+ */
+ int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * pState, int *pSequenceNumber)
++ yaffs_BlockState *pState, __u32 *pSequenceNumber)
+ {
+- struct mtd_info * mtd = dev->genericDevice;
++ struct mtd_info *mtd = dev->genericDevice;
+ int chunkNo = blockNo * dev->nChunksPerBlock;
++ loff_t addr = (loff_t)chunkNo * dev->nDataBytesPerChunk;
+ yaffs_ExtendedTags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+@@ -335,21 +336,22 @@ int nandmtd1_QueryNANDBlock(struct yaffs
+ /* We don't yet have a good place to test for MTD config prerequists.
+ * Do it here as we are called during the initial scan.
+ */
+- if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) {
++ if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK)
+ return YAFFS_FAIL;
+- }
+
+ retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
++ etags.blockBad = (mtd->block_isbad)(mtd, addr);
+ if (etags.blockBad) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+- "block %d is marked bad", blockNo);
++ "block %d is marked bad\n", blockNo);
+ state = YAFFS_BLOCK_STATE_DEAD;
+- }
+- else if (etags.chunkUsed) {
++ } else if (etags.eccResult != YAFFS_ECC_RESULT_NO_ERROR) {
++ /* bad tags, need to look more closely */
++ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++ } else if (etags.chunkUsed) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ seqnum = etags.sequenceNumber;
+- }
+- else {
++ } else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+@@ -360,4 +362,4 @@ int nandmtd1_QueryNANDBlock(struct yaffs
+ return YAFFS_OK;
+ }
+
+-#endif /*KERNEL_VERSION*/
++#endif /*MTD_VERSION*/
+--- a/fs/yaffs2/yaffs_mtdif1.h
++++ b/fs/yaffs2/yaffs_mtdif1.h
+@@ -14,15 +14,15 @@
+ #ifndef __YAFFS_MTDIF1_H__
+ #define __YAFFS_MTDIF1_H__
+
+-int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data, const yaffs_ExtendedTags * tags);
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data, const yaffs_ExtendedTags *tags);
+
+-int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * data, yaffs_ExtendedTags * tags);
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *data, yaffs_ExtendedTags *tags);
+
+ int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+
+ int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber);
++ yaffs_BlockState *state, __u32 *sequenceNumber);
+
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif2.c
++++ b/fs/yaffs2/yaffs_mtdif2.c
+@@ -14,7 +14,7 @@
+ /* mtd interface for YAFFS2 */
+
+ const char *yaffs_mtdif2_c_version =
+- "$Id: yaffs_mtdif2.c,v 1.17 2007-02-14 01:09:06 wookey Exp $";
++ "$Id: yaffs_mtdif2.c,v 1.23 2009-03-06 17:20:53 wookey Exp $";
+
+ #include "yportenv.h"
+
+@@ -27,19 +27,23 @@ const char *yaffs_mtdif2_c_version =
+
+ #include "yaffs_packedtags2.h"
+
+-int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags * tags)
++/* NB For use with inband tags....
++ * We assume that the data buffer is of size totalBytersPerChunk so that we can also
++ * use it to load the tags.
++ */
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #else
+ size_t dummy;
+ #endif
+ int retval = 0;
+
+- loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++ loff_t addr;
+
+ yaffs_PackedTags2 pt;
+
+@@ -48,46 +52,40 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p"
+ TENDSTR), chunkInNAND, data, tags));
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- if (tags)
+- yaffs_PackTags2(&pt, tags);
+- else
+- BUG(); /* both tags and data should always be present */
+
+- if (data) {
+- ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = sizeof(pt);
+- ops.len = dev->nDataBytesPerChunk;
+- ops.ooboffs = 0;
+- ops.datbuf = (__u8 *)data;
+- ops.oobbuf = (void *)&pt;
+- retval = mtd->write_oob(mtd, addr, &ops);
++ addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
++
++ /* For yaffs2 writing there must be both data and tags.
++ * If we're using inband tags, then the tags are stuffed into
++ * the end of the data buffer.
++ */
++ if (!data || !tags)
++ BUG();
++ else if (dev->inbandTags) {
++ yaffs_PackedTags2TagsPart *pt2tp;
++ pt2tp = (yaffs_PackedTags2TagsPart *)(data + dev->nDataBytesPerChunk);
++ yaffs_PackTags2TagsPart(pt2tp, tags);
+ } else
+- BUG(); /* both tags and data should always be present */
+-#else
+- if (tags) {
+ yaffs_PackTags2(&pt, tags);
+- }
+
+- if (data && tags) {
+- if (dev->useNANDECC)
+- retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, (__u8 *) & pt, NULL);
+- else
+- retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, (__u8 *) & pt, NULL);
+- } else {
+- if (data)
+- retval =
+- mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+- data);
+- if (tags)
+- retval =
+- mtd->write_oob(mtd, addr, mtd->oobsize, &dummy,
+- (__u8 *) & pt);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ ops.mode = MTD_OOB_AUTO;
++ ops.ooblen = (dev->inbandTags) ? 0 : sizeof(pt);
++ ops.len = dev->totalBytesPerChunk;
++ ops.ooboffs = 0;
++ ops.datbuf = (__u8 *)data;
++ ops.oobbuf = (dev->inbandTags) ? NULL : (void *)&pt;
++ retval = mtd->write_oob(mtd, addr, &ops);
+
++#else
++ if (!dev->inbandTags) {
++ retval =
++ mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++ &dummy, data, (__u8 *) &pt, NULL);
++ } else {
++ retval =
++ mtd->write(mtd, addr, dev->totalBytesPerChunk, &dummy,
++ data);
+ }
+ #endif
+
+@@ -97,17 +95,18 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * data, yaffs_ExtendedTags * tags)
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *data, yaffs_ExtendedTags *tags)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #endif
+ size_t dummy;
+ int retval = 0;
++ int localData = 0;
+
+- loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++ loff_t addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
+
+ yaffs_PackedTags2 pt;
+
+@@ -116,9 +115,20 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p"
+ TENDSTR), chunkInNAND, data, tags));
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+- if (data && !tags)
+- retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
++ if (dev->inbandTags) {
++
++ if (!data) {
++ localData = 1;
++ data = yaffs_GetTempBuffer(dev, __LINE__);
++ }
++
++
++ }
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ if (dev->inbandTags || (data && !tags))
++ retval = mtd->read(mtd, addr, dev->totalBytesPerChunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OOB_AUTO;
+@@ -130,38 +140,42 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ retval = mtd->read_oob(mtd, addr, &ops);
+ }
+ #else
+- if (data && tags) {
+- if (dev->useNANDECC) {
+- retval =
+- mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, dev->spareBuffer,
+- NULL);
+- } else {
+- retval =
+- mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++ if (!dev->inbandTags && data && tags) {
++
++ retval = mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+ &dummy, data, dev->spareBuffer,
+ NULL);
+- }
+ } else {
+ if (data)
+ retval =
+ mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+ data);
+- if (tags)
++ if (!dev->inbandTags && tags)
+ retval =
+ mtd->read_oob(mtd, addr, mtd->oobsize, &dummy,
+ dev->spareBuffer);
+ }
+ #endif
+
+- memcpy(&pt, dev->spareBuffer, sizeof(pt));
+
+- if (tags)
+- yaffs_UnpackTags2(tags, &pt);
++ if (dev->inbandTags) {
++ if (tags) {
++ yaffs_PackedTags2TagsPart *pt2tp;
++ pt2tp = (yaffs_PackedTags2TagsPart *)&data[dev->nDataBytesPerChunk];
++ yaffs_UnpackTags2TagsPart(tags, pt2tp);
++ }
++ } else {
++ if (tags) {
++ memcpy(&pt, dev->spareBuffer, sizeof(pt));
++ yaffs_UnpackTags2(tags, &pt);
++ }
++ }
++
++ if (localData)
++ yaffs_ReleaseTempBuffer(dev, data, __LINE__);
+
+- if(tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
++ if (tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
+ tags->eccResult = YAFFS_ECC_RESULT_UNFIXED;
+-
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+@@ -178,7 +192,7 @@ int nandmtd2_MarkNANDBlockBad(struct yaf
+ retval =
+ mtd->block_markbad(mtd,
+ blockNo * dev->nChunksPerBlock *
+- dev->nDataBytesPerChunk);
++ dev->totalBytesPerChunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+@@ -188,7 +202,7 @@ int nandmtd2_MarkNANDBlockBad(struct yaf
+ }
+
+ int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber)
++ yaffs_BlockState *state, __u32 *sequenceNumber)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+ int retval;
+@@ -198,7 +212,7 @@ int nandmtd2_QueryNANDBlock(struct yaffs
+ retval =
+ mtd->block_isbad(mtd,
+ blockNo * dev->nChunksPerBlock *
+- dev->nDataBytesPerChunk);
++ dev->totalBytesPerChunk);
+
+ if (retval) {
+ T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR)));
+--- a/fs/yaffs2/yaffs_mtdif2.h
++++ b/fs/yaffs2/yaffs_mtdif2.h
+@@ -17,13 +17,13 @@
+ #define __YAFFS_MTDIF2_H__
+
+ #include "yaffs_guts.h"
+-int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags * tags);
+-int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * data, yaffs_ExtendedTags * tags);
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags);
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *data, yaffs_ExtendedTags *tags);
+ int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+ int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber);
++ yaffs_BlockState *state, __u32 *sequenceNumber);
+
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif.c
++++ b/fs/yaffs2/yaffs_mtdif.c
+@@ -12,7 +12,7 @@
+ */
+
+ const char *yaffs_mtdif_c_version =
+- "$Id: yaffs_mtdif.c,v 1.19 2007-02-14 01:09:06 wookey Exp $";
++ "$Id: yaffs_mtdif.c,v 1.22 2009-03-06 17:20:51 wookey Exp $";
+
+ #include "yportenv.h"
+
+@@ -24,7 +24,7 @@ const char *yaffs_mtdif_c_version =
+ #include "linux/time.h"
+ #include "linux/mtd/nand.h"
+
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18))
+ static struct nand_oobinfo yaffs_oobinfo = {
+ .useecc = 1,
+ .eccbytes = 6,
+@@ -36,7 +36,7 @@ static struct nand_oobinfo yaffs_noeccin
+ };
+ #endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ static inline void translate_spare2oob(const yaffs_Spare *spare, __u8 *oob)
+ {
+ oob[0] = spare->tagByte0;
+@@ -45,8 +45,8 @@ static inline void translate_spare2oob(c
+ oob[3] = spare->tagByte3;
+ oob[4] = spare->tagByte4;
+ oob[5] = spare->tagByte5 & 0x3f;
+- oob[5] |= spare->blockStatus == 'Y' ? 0: 0x80;
+- oob[5] |= spare->pageStatus == 0 ? 0: 0x40;
++ oob[5] |= spare->blockStatus == 'Y' ? 0 : 0x80;
++ oob[5] |= spare->pageStatus == 0 ? 0 : 0x40;
+ oob[6] = spare->tagByte6;
+ oob[7] = spare->tagByte7;
+ }
+@@ -71,18 +71,18 @@ static inline void translate_oob2spare(y
+ }
+ #endif
+
+-int nandmtd_WriteChunkToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data, const yaffs_Spare * spare)
++int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data, const yaffs_Spare *spare)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #endif
+ size_t dummy;
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ __u8 spareAsBytes[8]; /* OOB */
+
+ if (data && !spare)
+@@ -135,18 +135,18 @@ int nandmtd_WriteChunkToNAND(yaffs_Devic
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd_ReadChunkFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data,
+- yaffs_Spare * spare)
++int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
++ yaffs_Spare *spare)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #endif
+ size_t dummy;
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ __u8 spareAsBytes[8]; /* OOB */
+
+ if (data && !spare)
+@@ -205,7 +205,7 @@ int nandmtd_ReadChunkFromNAND(yaffs_Devi
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd_EraseBlockInNAND(yaffs_Device * dev, int blockNumber)
++int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber)
+ {
+ struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+ __u32 addr =
+@@ -234,7 +234,7 @@ int nandmtd_EraseBlockInNAND(yaffs_Devic
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd_InitialiseNAND(yaffs_Device * dev)
++int nandmtd_InitialiseNAND(yaffs_Device *dev)
+ {
+ return YAFFS_OK;
+ }
+--- a/fs/yaffs2/yaffs_mtdif.h
++++ b/fs/yaffs2/yaffs_mtdif.h
+@@ -18,10 +18,15 @@
+
+ #include "yaffs_guts.h"
+
+-int nandmtd_WriteChunkToNAND(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data, const yaffs_Spare * spare);
+-int nandmtd_ReadChunkFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data,
+- yaffs_Spare * spare);
+-int nandmtd_EraseBlockInNAND(yaffs_Device * dev, int blockNumber);
+-int nandmtd_InitialiseNAND(yaffs_Device * dev);
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18))
++extern struct nand_oobinfo yaffs_oobinfo;
++extern struct nand_oobinfo yaffs_noeccinfo;
++#endif
++
++int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data, const yaffs_Spare *spare);
++int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
++ yaffs_Spare *spare);
++int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber);
++int nandmtd_InitialiseNAND(yaffs_Device *dev);
+ #endif
+--- a/fs/yaffs2/yaffs_nand.c
++++ b/fs/yaffs2/yaffs_nand.c
+@@ -12,16 +12,17 @@
+ */
+
+ const char *yaffs_nand_c_version =
+- "$Id: yaffs_nand.c,v 1.7 2007-02-14 01:09:06 wookey Exp $";
++ "$Id: yaffs_nand.c,v 1.10 2009-03-06 17:20:54 wookey Exp $";
+
+ #include "yaffs_nand.h"
+ #include "yaffs_tagscompat.h"
+ #include "yaffs_tagsvalidity.h"
+
++#include "yaffs_getblockinfo.h"
+
+-int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * buffer,
+- yaffs_ExtendedTags * tags)
++int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *buffer,
++ yaffs_ExtendedTags *tags)
+ {
+ int result;
+ yaffs_ExtendedTags localTags;
+@@ -29,7 +30,7 @@ int yaffs_ReadChunkWithTagsFromNAND(yaff
+ int realignedChunkInNAND = chunkInNAND - dev->chunkOffset;
+
+ /* If there are no tags provided, use local tags to get prioritised gc working */
+- if(!tags)
++ if (!tags)
+ tags = &localTags;
+
+ if (dev->readChunkWithTagsFromNAND)
+@@ -40,20 +41,20 @@ int yaffs_ReadChunkWithTagsFromNAND(yaff
+ realignedChunkInNAND,
+ buffer,
+ tags);
+- if(tags &&
+- tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR){
++ if (tags &&
++ tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->nChunksPerBlock);
+- yaffs_HandleChunkError(dev,bi);
++ yaffs_HandleChunkError(dev, bi);
+ }
+
+ return result;
+ }
+
+-int yaffs_WriteChunkWithTagsToNAND(yaffs_Device * dev,
++int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+ int chunkInNAND,
+- const __u8 * buffer,
+- yaffs_ExtendedTags * tags)
++ const __u8 *buffer,
++ yaffs_ExtendedTags *tags)
+ {
+ chunkInNAND -= dev->chunkOffset;
+
+@@ -84,7 +85,7 @@ int yaffs_WriteChunkWithTagsToNAND(yaffs
+ tags);
+ }
+
+-int yaffs_MarkBlockBad(yaffs_Device * dev, int blockNo)
++int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo)
+ {
+ blockNo -= dev->blockOffset;
+
+@@ -95,10 +96,10 @@ int yaffs_MarkBlockBad(yaffs_Device * de
+ return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo);
+ }
+
+-int yaffs_QueryInitialBlockState(yaffs_Device * dev,
++int yaffs_QueryInitialBlockState(yaffs_Device *dev,
+ int blockNo,
+- yaffs_BlockState * state,
+- unsigned *sequenceNumber)
++ yaffs_BlockState *state,
++ __u32 *sequenceNumber)
+ {
+ blockNo -= dev->blockOffset;
+
+--- a/fs/yaffs2/yaffs_nandemul2k.h
++++ b/fs/yaffs2/yaffs_nandemul2k.h
+@@ -21,14 +21,14 @@
+ #include "yaffs_guts.h"
+
+ int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 * data,
+- yaffs_ExtendedTags * tags);
++ int chunkInNAND, const __u8 *data,
++ const yaffs_ExtendedTags *tags);
+ int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, __u8 * data,
+- yaffs_ExtendedTags * tags);
++ int chunkInNAND, __u8 *data,
++ yaffs_ExtendedTags *tags);
+ int nandemul2k_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+ int nandemul2k_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * state, int *sequenceNumber);
++ yaffs_BlockState *state, __u32 *sequenceNumber);
+ int nandemul2k_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+ int blockInNAND);
+ int nandemul2k_InitialiseNAND(struct yaffs_DeviceStruct *dev);
+--- a/fs/yaffs2/yaffs_nand.h
++++ b/fs/yaffs2/yaffs_nand.h
+@@ -19,21 +19,21 @@
+
+
+
+-int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
+- __u8 * buffer,
+- yaffs_ExtendedTags * tags);
+-
+-int yaffs_WriteChunkWithTagsToNAND(yaffs_Device * dev,
+- int chunkInNAND,
+- const __u8 * buffer,
+- yaffs_ExtendedTags * tags);
+-
+-int yaffs_MarkBlockBad(yaffs_Device * dev, int blockNo);
+-
+-int yaffs_QueryInitialBlockState(yaffs_Device * dev,
+- int blockNo,
+- yaffs_BlockState * state,
+- unsigned *sequenceNumber);
++int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++ __u8 *buffer,
++ yaffs_ExtendedTags *tags);
++
++int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
++ int chunkInNAND,
++ const __u8 *buffer,
++ yaffs_ExtendedTags *tags);
++
++int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo);
++
++int yaffs_QueryInitialBlockState(yaffs_Device *dev,
++ int blockNo,
++ yaffs_BlockState *state,
++ unsigned *sequenceNumber);
+
+ int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+ int blockInNAND);
+--- a/fs/yaffs2/yaffs_packedtags1.c
++++ b/fs/yaffs2/yaffs_packedtags1.c
+@@ -14,7 +14,7 @@
+ #include "yaffs_packedtags1.h"
+ #include "yportenv.h"
+
+-void yaffs_PackTags1(yaffs_PackedTags1 * pt, const yaffs_ExtendedTags * t)
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t)
+ {
+ pt->chunkId = t->chunkId;
+ pt->serialNumber = t->serialNumber;
+@@ -27,7 +27,7 @@ void yaffs_PackTags1(yaffs_PackedTags1 *
+
+ }
+
+-void yaffs_UnpackTags1(yaffs_ExtendedTags * t, const yaffs_PackedTags1 * pt)
++void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt)
+ {
+ static const __u8 allFF[] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+@@ -35,9 +35,8 @@ void yaffs_UnpackTags1(yaffs_ExtendedTag
+
+ if (memcmp(allFF, pt, sizeof(yaffs_PackedTags1))) {
+ t->blockBad = 0;
+- if (pt->shouldBeFF != 0xFFFFFFFF) {
++ if (pt->shouldBeFF != 0xFFFFFFFF)
+ t->blockBad = 1;
+- }
+ t->chunkUsed = 1;
+ t->objectId = pt->objectId;
+ t->chunkId = pt->chunkId;
+@@ -47,6 +46,5 @@ void yaffs_UnpackTags1(yaffs_ExtendedTag
+ t->serialNumber = pt->serialNumber;
+ } else {
+ memset(t, 0, sizeof(yaffs_ExtendedTags));
+-
+ }
+ }
+--- a/fs/yaffs2/yaffs_packedtags1.h
++++ b/fs/yaffs2/yaffs_packedtags1.h
+@@ -32,6 +32,6 @@ typedef struct {
+
+ } yaffs_PackedTags1;
+
+-void yaffs_PackTags1(yaffs_PackedTags1 * pt, const yaffs_ExtendedTags * t);
+-void yaffs_UnpackTags1(yaffs_ExtendedTags * t, const yaffs_PackedTags1 * pt);
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt);
+ #endif
+--- a/fs/yaffs2/yaffs_packedtags2.c
++++ b/fs/yaffs2/yaffs_packedtags2.c
+@@ -37,60 +37,68 @@
+ #define EXTRA_OBJECT_TYPE_SHIFT (28)
+ #define EXTRA_OBJECT_TYPE_MASK ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
+
+-static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 * pt)
++
++static void yaffs_DumpPackedTags2TagsPart(const yaffs_PackedTags2TagsPart *ptt)
+ {
+ T(YAFFS_TRACE_MTD,
+ (TSTR("packed tags obj %d chunk %d byte %d seq %d" TENDSTR),
+- pt->t.objectId, pt->t.chunkId, pt->t.byteCount,
+- pt->t.sequenceNumber));
++ ptt->objectId, ptt->chunkId, ptt->byteCount,
++ ptt->sequenceNumber));
++}
++static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 *pt)
++{
++ yaffs_DumpPackedTags2TagsPart(&pt->t);
+ }
+
+-static void yaffs_DumpTags2(const yaffs_ExtendedTags * t)
++static void yaffs_DumpTags2(const yaffs_ExtendedTags *t)
+ {
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+- ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte "
+- "%d del %d ser %d seq %d"
++ ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d"
+ TENDSTR), t->eccResult, t->blockBad, t->chunkUsed, t->objectId,
+ t->chunkId, t->byteCount, t->chunkDeleted, t->serialNumber,
+ t->sequenceNumber));
+
+ }
+
+-void yaffs_PackTags2(yaffs_PackedTags2 * pt, const yaffs_ExtendedTags * t)
++void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *ptt,
++ const yaffs_ExtendedTags *t)
+ {
+- pt->t.chunkId = t->chunkId;
+- pt->t.sequenceNumber = t->sequenceNumber;
+- pt->t.byteCount = t->byteCount;
+- pt->t.objectId = t->objectId;
++ ptt->chunkId = t->chunkId;
++ ptt->sequenceNumber = t->sequenceNumber;
++ ptt->byteCount = t->byteCount;
++ ptt->objectId = t->objectId;
+
+ if (t->chunkId == 0 && t->extraHeaderInfoAvailable) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunkId */
+- pt->t.chunkId = EXTRA_HEADER_INFO_FLAG
++ ptt->chunkId = EXTRA_HEADER_INFO_FLAG
+ | t->extraParentObjectId;
+- if (t->extraIsShrinkHeader) {
+- pt->t.chunkId |= EXTRA_SHRINK_FLAG;
+- }
+- if (t->extraShadows) {
+- pt->t.chunkId |= EXTRA_SHADOWS_FLAG;
+- }
++ if (t->extraIsShrinkHeader)
++ ptt->chunkId |= EXTRA_SHRINK_FLAG;
++ if (t->extraShadows)
++ ptt->chunkId |= EXTRA_SHADOWS_FLAG;
+
+- pt->t.objectId &= ~EXTRA_OBJECT_TYPE_MASK;
+- pt->t.objectId |=
++ ptt->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
++ ptt->objectId |=
+ (t->extraObjectType << EXTRA_OBJECT_TYPE_SHIFT);
+
+- if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) {
+- pt->t.byteCount = t->extraEquivalentObjectId;
+- } else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE) {
+- pt->t.byteCount = t->extraFileLength;
+- } else {
+- pt->t.byteCount = 0;
+- }
++ if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
++ ptt->byteCount = t->extraEquivalentObjectId;
++ else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE)
++ ptt->byteCount = t->extraFileLength;
++ else
++ ptt->byteCount = 0;
+ }
+
+- yaffs_DumpPackedTags2(pt);
++ yaffs_DumpPackedTags2TagsPart(ptt);
+ yaffs_DumpTags2(t);
++}
++
++
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t)
++{
++ yaffs_PackTags2TagsPart(&pt->t, t);
+
+ #ifndef YAFFS_IGNORE_TAGS_ECC
+ {
+@@ -101,82 +109,98 @@ void yaffs_PackTags2(yaffs_PackedTags2 *
+ #endif
+ }
+
+-void yaffs_UnpackTags2(yaffs_ExtendedTags * t, yaffs_PackedTags2 * pt)
++
++void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t,
++ yaffs_PackedTags2TagsPart *ptt)
+ {
+
+ memset(t, 0, sizeof(yaffs_ExtendedTags));
+
+ yaffs_InitialiseTags(t);
+
+- if (pt->t.sequenceNumber != 0xFFFFFFFF) {
+- /* Page is in use */
+-#ifdef YAFFS_IGNORE_TAGS_ECC
+- {
+- t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+- }
+-#else
+- {
+- yaffs_ECCOther ecc;
+- int result;
+- yaffs_ECCCalculateOther((unsigned char *)&pt->t,
+- sizeof
+- (yaffs_PackedTags2TagsPart),
+- &ecc);
+- result =
+- yaffs_ECCCorrectOther((unsigned char *)&pt->t,
+- sizeof
+- (yaffs_PackedTags2TagsPart),
+- &pt->ecc, &ecc);
+- switch(result){
+- case 0:
+- t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+- break;
+- case 1:
+- t->eccResult = YAFFS_ECC_RESULT_FIXED;
+- break;
+- case -1:
+- t->eccResult = YAFFS_ECC_RESULT_UNFIXED;
+- break;
+- default:
+- t->eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+- }
+- }
+-#endif
++ if (ptt->sequenceNumber != 0xFFFFFFFF) {
+ t->blockBad = 0;
+ t->chunkUsed = 1;
+- t->objectId = pt->t.objectId;
+- t->chunkId = pt->t.chunkId;
+- t->byteCount = pt->t.byteCount;
++ t->objectId = ptt->objectId;
++ t->chunkId = ptt->chunkId;
++ t->byteCount = ptt->byteCount;
+ t->chunkDeleted = 0;
+ t->serialNumber = 0;
+- t->sequenceNumber = pt->t.sequenceNumber;
++ t->sequenceNumber = ptt->sequenceNumber;
+
+ /* Do extra header info stuff */
+
+- if (pt->t.chunkId & EXTRA_HEADER_INFO_FLAG) {
++ if (ptt->chunkId & EXTRA_HEADER_INFO_FLAG) {
+ t->chunkId = 0;
+ t->byteCount = 0;
+
+ t->extraHeaderInfoAvailable = 1;
+ t->extraParentObjectId =
+- pt->t.chunkId & (~(ALL_EXTRA_FLAGS));
++ ptt->chunkId & (~(ALL_EXTRA_FLAGS));
+ t->extraIsShrinkHeader =
+- (pt->t.chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
++ (ptt->chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
+ t->extraShadows =
+- (pt->t.chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
++ (ptt->chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+ t->extraObjectType =
+- pt->t.objectId >> EXTRA_OBJECT_TYPE_SHIFT;
++ ptt->objectId >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
+
+- if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) {
+- t->extraEquivalentObjectId = pt->t.byteCount;
+- } else {
+- t->extraFileLength = pt->t.byteCount;
++ if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
++ t->extraEquivalentObjectId = ptt->byteCount;
++ else
++ t->extraFileLength = ptt->byteCount;
++ }
++ }
++
++ yaffs_DumpPackedTags2TagsPart(ptt);
++ yaffs_DumpTags2(t);
++
++}
++
++
++void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt)
++{
++
++ yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++
++ if (pt->t.sequenceNumber != 0xFFFFFFFF) {
++ /* Page is in use */
++#ifndef YAFFS_IGNORE_TAGS_ECC
++ {
++ yaffs_ECCOther ecc;
++ int result;
++ yaffs_ECCCalculateOther((unsigned char *)&pt->t,
++ sizeof
++ (yaffs_PackedTags2TagsPart),
++ &ecc);
++ result =
++ yaffs_ECCCorrectOther((unsigned char *)&pt->t,
++ sizeof
++ (yaffs_PackedTags2TagsPart),
++ &pt->ecc, &ecc);
++ switch (result) {
++ case 0:
++ eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ break;
++ case 1:
++ eccResult = YAFFS_ECC_RESULT_FIXED;
++ break;
++ case -1:
++ eccResult = YAFFS_ECC_RESULT_UNFIXED;
++ break;
++ default:
++ eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
++#endif
+ }
+
++ yaffs_UnpackTags2TagsPart(t, &pt->t);
++
++ t->eccResult = eccResult;
++
+ yaffs_DumpPackedTags2(pt);
+ yaffs_DumpTags2(t);
+
+ }
++
+--- a/fs/yaffs2/yaffs_packedtags2.h
++++ b/fs/yaffs2/yaffs_packedtags2.h
+@@ -33,6 +33,11 @@ typedef struct {
+ yaffs_ECCOther ecc;
+ } yaffs_PackedTags2;
+
+-void yaffs_PackTags2(yaffs_PackedTags2 * pt, const yaffs_ExtendedTags * t);
+-void yaffs_UnpackTags2(yaffs_ExtendedTags * t, yaffs_PackedTags2 * pt);
++/* Full packed tags with ECC, used for oob tags */
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt);
++
++/* Only the tags part (no ECC for use with inband tags */
++void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t, yaffs_PackedTags2TagsPart *pt);
+ #endif
+--- a/fs/yaffs2/yaffs_qsort.c
++++ b/fs/yaffs2/yaffs_qsort.c
+@@ -28,12 +28,12 @@
+ */
+
+ #include "yportenv.h"
+-//#include <linux/string.h>
++/* #include <linux/string.h> */
+
+ /*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+-#define swapcode(TYPE, parmi, parmj, n) { \
++#define swapcode(TYPE, parmi, parmj, n) do { \
+ long i = (n) / sizeof (TYPE); \
+ register TYPE *pi = (TYPE *) (parmi); \
+ register TYPE *pj = (TYPE *) (parmj); \
+@@ -41,28 +41,29 @@
+ register TYPE t = *pi; \
+ *pi++ = *pj; \
+ *pj++ = t; \
+- } while (--i > 0); \
+-}
++ } while (--i > 0); \
++} while (0)
+
+ #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+- es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
++ es % sizeof(long) ? 2 : es == sizeof(long) ? 0 : 1;
+
+ static __inline void
+ swapfunc(char *a, char *b, int n, int swaptype)
+ {
+ if (swaptype <= 1)
+- swapcode(long, a, b, n)
++ swapcode(long, a, b, n);
+ else
+- swapcode(char, a, b, n)
++ swapcode(char, a, b, n);
+ }
+
+-#define swap(a, b) \
++#define yswap(a, b) do { \
+ if (swaptype == 0) { \
+ long t = *(long *)(a); \
+ *(long *)(a) = *(long *)(b); \
+ *(long *)(b) = t; \
+ } else \
+- swapfunc(a, b, es, swaptype)
++ swapfunc(a, b, es, swaptype); \
++} while (0)
+
+ #define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+@@ -70,12 +71,12 @@ static __inline char *
+ med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *))
+ {
+ return cmp(a, b) < 0 ?
+- (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a ))
+- :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c ));
++ (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a))
++ : (cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c));
+ }
+
+ #ifndef min
+-#define min(a,b) (((a) < (b)) ? (a) : (b))
++#define min(a, b) (((a) < (b)) ? (a) : (b))
+ #endif
+
+ void
+@@ -92,7 +93,7 @@ loop: SWAPINIT(a, es);
+ for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es)
+ for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+ pl -= es)
+- swap(pl, pl - es);
++ yswap(pl, pl - es);
+ return;
+ }
+ pm = (char *)a + (n / 2) * es;
+@@ -107,7 +108,7 @@ loop: SWAPINIT(a, es);
+ }
+ pm = med3(pl, pm, pn, cmp);
+ }
+- swap(a, pm);
++ yswap(a, pm);
+ pa = pb = (char *)a + es;
+
+ pc = pd = (char *)a + (n - 1) * es;
+@@ -115,7 +116,7 @@ loop: SWAPINIT(a, es);
+ while (pb <= pc && (r = cmp(pb, a)) <= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+- swap(pa, pb);
++ yswap(pa, pb);
+ pa += es;
+ }
+ pb += es;
+@@ -123,14 +124,14 @@ loop: SWAPINIT(a, es);
+ while (pb <= pc && (r = cmp(pc, a)) >= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+- swap(pc, pd);
++ yswap(pc, pd);
+ pd -= es;
+ }
+ pc -= es;
+ }
+ if (pb > pc)
+ break;
+- swap(pb, pc);
++ yswap(pb, pc);
+ swap_cnt = 1;
+ pb += es;
+ pc -= es;
+@@ -139,7 +140,7 @@ loop: SWAPINIT(a, es);
+ for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
+ for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+ pl -= es)
+- swap(pl, pl - es);
++ yswap(pl, pl - es);
+ return;
+ }
+
+@@ -148,9 +149,11 @@ loop: SWAPINIT(a, es);
+ vecswap(a, pb - r, r);
+ r = min((long)(pd - pc), (long)(pn - pd - es));
+ vecswap(pb, pn - r, r);
+- if ((r = pb - pa) > es)
++ r = pb - pa;
++ if (r > es)
+ yaffs_qsort(a, r / es, es, cmp);
+- if ((r = pd - pc) > es) {
++ r = pd - pc;
++ if (r > es) {
+ /* Iterate rather than recurse to save stack space */
+ a = pn - r;
+ n = r / es;
+--- a/fs/yaffs2/yaffs_qsort.h
++++ b/fs/yaffs2/yaffs_qsort.h
+@@ -17,7 +17,7 @@
+ #ifndef __YAFFS_QSORT_H__
+ #define __YAFFS_QSORT_H__
+
+-extern void yaffs_qsort (void *const base, size_t total_elems, size_t size,
+- int (*cmp)(const void *, const void *));
++extern void yaffs_qsort(void *const base, size_t total_elems, size_t size,
++ int (*cmp)(const void *, const void *));
+
+ #endif
+--- a/fs/yaffs2/yaffs_tagscompat.c
++++ b/fs/yaffs2/yaffs_tagscompat.c
+@@ -14,16 +14,17 @@
+ #include "yaffs_guts.h"
+ #include "yaffs_tagscompat.h"
+ #include "yaffs_ecc.h"
++#include "yaffs_getblockinfo.h"
+
+-static void yaffs_HandleReadDataError(yaffs_Device * dev, int chunkInNAND);
++static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND);
+ #ifdef NOTYET
+-static void yaffs_CheckWrittenBlock(yaffs_Device * dev, int chunkInNAND);
+-static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_Spare * spare);
+-static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+- const yaffs_Spare * spare);
+-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND);
++static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND);
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_Spare *spare);
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++ const yaffs_Spare *spare);
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND);
+ #endif
+
+ static const char yaffs_countBitsTable[256] = {
+@@ -54,13 +55,13 @@ int yaffs_CountBits(__u8 x)
+
+ /********** Tags ECC calculations *********/
+
+-void yaffs_CalcECC(const __u8 * data, yaffs_Spare * spare)
++void yaffs_CalcECC(const __u8 *data, yaffs_Spare *spare)
+ {
+ yaffs_ECCCalculate(data, spare->ecc1);
+ yaffs_ECCCalculate(&data[256], spare->ecc2);
+ }
+
+-void yaffs_CalcTagsECC(yaffs_Tags * tags)
++void yaffs_CalcTagsECC(yaffs_Tags *tags)
+ {
+ /* Calculate an ecc */
+
+@@ -74,9 +75,8 @@ void yaffs_CalcTagsECC(yaffs_Tags * tags
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+- if (b[i] & j) {
++ if (b[i] & j)
+ ecc ^= bit;
+- }
+ }
+ }
+
+@@ -84,7 +84,7 @@ void yaffs_CalcTagsECC(yaffs_Tags * tags
+
+ }
+
+-int yaffs_CheckECCOnTags(yaffs_Tags * tags)
++int yaffs_CheckECCOnTags(yaffs_Tags *tags)
+ {
+ unsigned ecc = tags->ecc;
+
+@@ -115,8 +115,8 @@ int yaffs_CheckECCOnTags(yaffs_Tags * ta
+
+ /********** Tags **********/
+
+-static void yaffs_LoadTagsIntoSpare(yaffs_Spare * sparePtr,
+- yaffs_Tags * tagsPtr)
++static void yaffs_LoadTagsIntoSpare(yaffs_Spare *sparePtr,
++ yaffs_Tags *tagsPtr)
+ {
+ yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
+
+@@ -132,8 +132,8 @@ static void yaffs_LoadTagsIntoSpare(yaff
+ sparePtr->tagByte7 = tu->asBytes[7];
+ }
+
+-static void yaffs_GetTagsFromSpare(yaffs_Device * dev, yaffs_Spare * sparePtr,
+- yaffs_Tags * tagsPtr)
++static void yaffs_GetTagsFromSpare(yaffs_Device *dev, yaffs_Spare *sparePtr,
++ yaffs_Tags *tagsPtr)
+ {
+ yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
+ int result;
+@@ -148,21 +148,20 @@ static void yaffs_GetTagsFromSpare(yaffs
+ tu->asBytes[7] = sparePtr->tagByte7;
+
+ result = yaffs_CheckECCOnTags(tagsPtr);
+- if (result > 0) {
++ if (result > 0)
+ dev->tagsEccFixed++;
+- } else if (result < 0) {
++ else if (result < 0)
+ dev->tagsEccUnfixed++;
+- }
+ }
+
+-static void yaffs_SpareInitialise(yaffs_Spare * spare)
++static void yaffs_SpareInitialise(yaffs_Spare *spare)
+ {
+ memset(spare, 0xFF, sizeof(yaffs_Spare));
+ }
+
+ static int yaffs_WriteChunkToNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 * data,
+- yaffs_Spare * spare)
++ int chunkInNAND, const __u8 *data,
++ yaffs_Spare *spare)
+ {
+ if (chunkInNAND < dev->startBlock * dev->nChunksPerBlock) {
+ T(YAFFS_TRACE_ERROR,
+@@ -177,9 +176,9 @@ static int yaffs_WriteChunkToNAND(struct
+
+ static int yaffs_ReadChunkFromNAND(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND,
+- __u8 * data,
+- yaffs_Spare * spare,
+- yaffs_ECCResult * eccResult,
++ __u8 *data,
++ yaffs_Spare *spare,
++ yaffs_ECCResult *eccResult,
+ int doErrorCorrection)
+ {
+ int retVal;
+@@ -252,9 +251,11 @@ static int yaffs_ReadChunkFromNAND(struc
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+ struct yaffs_NANDSpare nspare;
+- retVal =
+- dev->readChunkFromNAND(dev, chunkInNAND, data,
+- (yaffs_Spare *) & nspare);
++
++ memset(&nspare, 0, sizeof(nspare));
++
++ retVal = dev->readChunkFromNAND(dev, chunkInNAND, data,
++ (yaffs_Spare *) &nspare);
+ memcpy(spare, &nspare, sizeof(yaffs_Spare));
+ if (data && doErrorCorrection) {
+ if (nspare.eccres1 > 0) {
+@@ -302,8 +303,7 @@ static int yaffs_ReadChunkFromNAND(struc
+ static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+ int chunkInNAND)
+ {
+-
+- static int init = 0;
++ static int init;
+ static __u8 cmpbuf[YAFFS_BYTES_PER_CHUNK];
+ static __u8 data[YAFFS_BYTES_PER_CHUNK];
+ /* Might as well always allocate the larger size for */
+@@ -331,12 +331,12 @@ static int yaffs_CheckChunkErased(struct
+ * Functions for robustisizing
+ */
+
+-static void yaffs_HandleReadDataError(yaffs_Device * dev, int chunkInNAND)
++static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND)
+ {
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+
+ /* Mark the block for retirement */
+- yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
++ yaffs_GetBlockInfo(dev, blockInNAND + dev->blockOffset)->needsRetiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>>Block %d marked for retirement" TENDSTR), blockInNAND));
+
+@@ -348,22 +348,22 @@ static void yaffs_HandleReadDataError(ya
+ }
+
+ #ifdef NOTYET
+-static void yaffs_CheckWrittenBlock(yaffs_Device * dev, int chunkInNAND)
++static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND)
+ {
+ }
+
+-static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
+- const __u8 * data,
+- const yaffs_Spare * spare)
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++ const __u8 *data,
++ const yaffs_Spare *spare)
+ {
+ }
+
+-static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
+- const yaffs_Spare * spare)
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++ const yaffs_Spare *spare)
+ {
+ }
+
+-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND)
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND)
+ {
+ int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+
+@@ -373,8 +373,8 @@ static void yaffs_HandleWriteChunkError(
+ yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+ }
+
+-static int yaffs_VerifyCompare(const __u8 * d0, const __u8 * d1,
+- const yaffs_Spare * s0, const yaffs_Spare * s1)
++static int yaffs_VerifyCompare(const __u8 *d0, const __u8 *d1,
++ const yaffs_Spare *s0, const yaffs_Spare *s1)
+ {
+
+ if (memcmp(d0, d1, YAFFS_BYTES_PER_CHUNK) != 0 ||
+@@ -398,28 +398,35 @@ static int yaffs_VerifyCompare(const __u
+ }
+ #endif /* NOTYET */
+
+-int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device * dev,
+- int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags *
+- eTags)
++int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
++ int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *eTags)
+ {
+ yaffs_Spare spare;
+ yaffs_Tags tags;
+
+ yaffs_SpareInitialise(&spare);
+
+- if (eTags->chunkDeleted) {
++ if (eTags->chunkDeleted)
+ spare.pageStatus = 0;
+- } else {
++ else {
+ tags.objectId = eTags->objectId;
+ tags.chunkId = eTags->chunkId;
+- tags.byteCount = eTags->byteCount;
++
++ tags.byteCountLSB = eTags->byteCount & 0x3ff;
++
++ if (dev->nDataBytesPerChunk >= 1024)
++ tags.byteCountMSB = (eTags->byteCount >> 10) & 3;
++ else
++ tags.byteCountMSB = 3;
++
++
+ tags.serialNumber = eTags->serialNumber;
+
+- if (!dev->useNANDECC && data) {
++ if (!dev->useNANDECC && data)
+ yaffs_CalcECC(data, &spare);
+- }
++
+ yaffs_LoadTagsIntoSpare(&spare, &tags);
+
+ }
+@@ -427,15 +434,15 @@ int yaffs_TagsCompatabilityWriteChunkWit
+ return yaffs_WriteChunkToNAND(dev, chunkInNAND, data, &spare);
+ }
+
+-int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device * dev,
++int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
+ int chunkInNAND,
+- __u8 * data,
+- yaffs_ExtendedTags * eTags)
++ __u8 *data,
++ yaffs_ExtendedTags *eTags)
+ {
+
+ yaffs_Spare spare;
+ yaffs_Tags tags;
+- yaffs_ECCResult eccResult;
++ yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+
+ static yaffs_Spare spareFF;
+ static int init;
+@@ -466,7 +473,11 @@ int yaffs_TagsCompatabilityReadChunkWith
+
+ eTags->objectId = tags.objectId;
+ eTags->chunkId = tags.chunkId;
+- eTags->byteCount = tags.byteCount;
++ eTags->byteCount = tags.byteCountLSB;
++
++ if (dev->nDataBytesPerChunk >= 1024)
++ eTags->byteCount |= (((unsigned) tags.byteCountMSB) << 10);
++
+ eTags->serialNumber = tags.serialNumber;
+ }
+ }
+@@ -497,9 +508,9 @@ int yaffs_TagsCompatabilityMarkNANDBlock
+ }
+
+ int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+- int blockNo, yaffs_BlockState *
+- state,
+- int *sequenceNumber)
++ int blockNo,
++ yaffs_BlockState *state,
++ __u32 *sequenceNumber)
+ {
+
+ yaffs_Spare spare0, spare1;
+--- a/fs/yaffs2/yaffs_tagscompat.h
++++ b/fs/yaffs2/yaffs_tagscompat.h
+@@ -17,24 +17,23 @@
+ #define __YAFFS_TAGSCOMPAT_H__
+
+ #include "yaffs_guts.h"
+-int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device * dev,
+- int chunkInNAND,
+- const __u8 * data,
+- const yaffs_ExtendedTags *
+- tags);
+-int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device * dev,
+- int chunkInNAND,
+- __u8 * data,
+- yaffs_ExtendedTags *
+- tags);
++int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
++ int chunkInNAND,
++ const __u8 *data,
++ const yaffs_ExtendedTags *tags);
++int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
++ int chunkInNAND,
++ __u8 *data,
++ yaffs_ExtendedTags *tags);
+ int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
+ int blockNo);
+ int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+- int blockNo, yaffs_BlockState *
+- state, int *sequenceNumber);
++ int blockNo,
++ yaffs_BlockState *state,
++ __u32 *sequenceNumber);
+
+-void yaffs_CalcTagsECC(yaffs_Tags * tags);
+-int yaffs_CheckECCOnTags(yaffs_Tags * tags);
++void yaffs_CalcTagsECC(yaffs_Tags *tags);
++int yaffs_CheckECCOnTags(yaffs_Tags *tags);
+ int yaffs_CountBits(__u8 byte);
+
+ #endif
+--- a/fs/yaffs2/yaffs_tagsvalidity.c
++++ b/fs/yaffs2/yaffs_tagsvalidity.c
+@@ -13,14 +13,14 @@
+
+ #include "yaffs_tagsvalidity.h"
+
+-void yaffs_InitialiseTags(yaffs_ExtendedTags * tags)
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags)
+ {
+ memset(tags, 0, sizeof(yaffs_ExtendedTags));
+ tags->validMarker0 = 0xAAAAAAAA;
+ tags->validMarker1 = 0x55555555;
+ }
+
+-int yaffs_ValidateTags(yaffs_ExtendedTags * tags)
++int yaffs_ValidateTags(yaffs_ExtendedTags *tags)
+ {
+ return (tags->validMarker0 == 0xAAAAAAAA &&
+ tags->validMarker1 == 0x55555555);
+--- a/fs/yaffs2/yaffs_tagsvalidity.h
++++ b/fs/yaffs2/yaffs_tagsvalidity.h
+@@ -19,6 +19,6 @@
+
+ #include "yaffs_guts.h"
+
+-void yaffs_InitialiseTags(yaffs_ExtendedTags * tags);
+-int yaffs_ValidateTags(yaffs_ExtendedTags * tags);
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
++int yaffs_ValidateTags(yaffs_ExtendedTags *tags);
+ #endif
+--- a/fs/yaffs2/yportenv.h
++++ b/fs/yaffs2/yportenv.h
+@@ -17,17 +17,28 @@
+ #ifndef __YPORTENV_H__
+ #define __YPORTENV_H__
+
++/*
++ * Define the MTD version in terms of Linux Kernel versions
++ * This allows yaffs to be used independantly of the kernel
++ * as well as with it.
++ */
++
++#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
++
+ #if defined CONFIG_YAFFS_WINCE
+
+ #include "ywinceenv.h"
+
+-#elif defined __KERNEL__
++#elif defined __KERNEL__
+
+ #include "moduleconfig.h"
+
+ /* Linux kernel */
++
+ #include <linux/version.h>
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#define MTD_VERSION_CODE LINUX_VERSION_CODE
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ #include <linux/config.h>
+ #endif
+ #include <linux/kernel.h>
+@@ -40,12 +51,13 @@
+ #define YCHAR char
+ #define YUCHAR unsigned char
+ #define _Y(x) x
+-#define yaffs_strcpy(a,b) strcpy(a,b)
+-#define yaffs_strncpy(a,b,c) strncpy(a,b,c)
+-#define yaffs_strncmp(a,b,c) strncmp(a,b,c)
+-#define yaffs_strlen(s) strlen(s)
+-#define yaffs_sprintf sprintf
+-#define yaffs_toupper(a) toupper(a)
++#define yaffs_strcat(a, b) strcat(a, b)
++#define yaffs_strcpy(a, b) strcpy(a, b)
++#define yaffs_strncpy(a, b, c) strncpy(a, b, c)
++#define yaffs_strncmp(a, b, c) strncmp(a, b, c)
++#define yaffs_strlen(s) strlen(s)
++#define yaffs_sprintf sprintf
++#define yaffs_toupper(a) toupper(a)
+
+ #define Y_INLINE inline
+
+@@ -53,19 +65,19 @@
+ #define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+ /* #define YPRINTF(x) printk x */
+-#define YMALLOC(x) kmalloc(x,GFP_KERNEL)
++#define YMALLOC(x) kmalloc(x, GFP_NOFS)
+ #define YFREE(x) kfree(x)
+ #define YMALLOC_ALT(x) vmalloc(x)
+ #define YFREE_ALT(x) vfree(x)
+ #define YMALLOC_DMA(x) YMALLOC(x)
+
+-// KR - added for use in scan so processes aren't blocked indefinitely.
++/* KR - added for use in scan so processes aren't blocked indefinitely. */
+ #define YYIELD() schedule()
+
+ #define YAFFS_ROOT_MODE 0666
+ #define YAFFS_LOSTNFOUND_MODE 0666
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ #define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+ #define Y_TIME_CONVERT(x) (x).tv_sec
+ #else
+@@ -73,11 +85,12 @@
+ #define Y_TIME_CONVERT(x) (x)
+ #endif
+
+-#define yaffs_SumCompare(x,y) ((x) == (y))
+-#define yaffs_strcmp(a,b) strcmp(a,b)
++#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_strcmp(a, b) strcmp(a, b)
+
+ #define TENDSTR "\n"
+ #define TSTR(x) KERN_WARNING x
++#define TCONT(x) x
+ #define TOUT(p) printk p
+
+ #define yaffs_trace(mask, fmt, args...) \
+@@ -90,6 +103,8 @@
+
+ #elif defined CONFIG_YAFFS_DIRECT
+
++#define MTD_VERSION_CODE MTD_VERSION(2, 6, 22)
++
+ /* Direct interface */
+ #include "ydirectenv.h"
+
+@@ -111,11 +126,12 @@
+ #define YCHAR char
+ #define YUCHAR unsigned char
+ #define _Y(x) x
+-#define yaffs_strcpy(a,b) strcpy(a,b)
+-#define yaffs_strncpy(a,b,c) strncpy(a,b,c)
+-#define yaffs_strlen(s) strlen(s)
+-#define yaffs_sprintf sprintf
+-#define yaffs_toupper(a) toupper(a)
++#define yaffs_strcat(a, b) strcat(a, b)
++#define yaffs_strcpy(a, b) strcpy(a, b)
++#define yaffs_strncpy(a, b, c) strncpy(a, b, c)
++#define yaffs_strlen(s) strlen(s)
++#define yaffs_sprintf sprintf
++#define yaffs_toupper(a) toupper(a)
+
+ #define Y_INLINE inline
+
+@@ -133,8 +149,8 @@
+ #define YAFFS_ROOT_MODE 0666
+ #define YAFFS_LOSTNFOUND_MODE 0666
+
+-#define yaffs_SumCompare(x,y) ((x) == (y))
+-#define yaffs_strcmp(a,b) strcmp(a,b)
++#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_strcmp(a, b) strcmp(a, b)
+
+ #else
+ /* Should have specified a configuration type */
+@@ -178,10 +194,10 @@ extern unsigned int yaffs_wr_attempts;
+ #define YAFFS_TRACE_ALWAYS 0xF0000000
+
+
+-#define T(mask,p) do{ if((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p);} while(0)
++#define T(mask, p) do { if ((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p); } while (0)
+
+-#ifndef CONFIG_YAFFS_WINCE
+-#define YBUG() T(YAFFS_TRACE_BUG,(TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR),__LINE__))
++#ifndef YBUG
++#define YBUG() do {T(YAFFS_TRACE_BUG, (TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR), __LINE__)); } while (0)
+ #endif
+
+ #endif
diff --git a/target/linux/generic/patches-3.3/502-yaffs_git_2010_10_20.patch b/target/linux/generic/patches-3.3/502-yaffs_git_2010_10_20.patch
new file mode 100644
index 000000000..f4535a6b5
--- /dev/null
+++ b/target/linux/generic/patches-3.3/502-yaffs_git_2010_10_20.patch
@@ -0,0 +1,27068 @@
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -35,7 +35,6 @@ source "fs/gfs2/Kconfig"
+ source "fs/ocfs2/Kconfig"
+ source "fs/btrfs/Kconfig"
+ source "fs/nilfs2/Kconfig"
+-source "fs/yaffs2/Kconfig"
+
+ endif # BLOCK
+
+@@ -201,6 +200,10 @@ source "fs/hfsplus/Kconfig"
+ source "fs/befs/Kconfig"
+ source "fs/bfs/Kconfig"
+ source "fs/efs/Kconfig"
++
++# Patched by YAFFS
++source "fs/yaffs2/Kconfig"
++
+ source "fs/jffs2/Kconfig"
+ # UBIFS File system configuration
+ source "fs/ubifs/Kconfig"
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -125,5 +125,6 @@ obj-$(CONFIG_GFS2_FS) += gfs2/
+ obj-y += exofs/ # Multiple modules
+ obj-$(CONFIG_CEPH_FS) += ceph/
+ obj-$(CONFIG_PSTORE) += pstore/
+-obj-$(CONFIG_YAFFS_FS) += yaffs2/
+
++# Patched by YAFFS
++obj-$(CONFIG_YAFFS_FS) += yaffs2/
+--- a/fs/yaffs2/devextras.h
++++ b/fs/yaffs2/devextras.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -24,6 +24,8 @@
+ #define __EXTRAS_H__
+
+
++#include "yportenv.h"
++
+ #if !(defined __KERNEL__)
+
+ /* Definition of types */
+@@ -33,103 +35,6 @@ typedef unsigned __u32;
+
+ #endif
+
+-/*
+- * This is a simple doubly linked list implementation that matches the
+- * way the Linux kernel doubly linked list implementation works.
+- */
+-
+-struct ylist_head {
+- struct ylist_head *next; /* next in chain */
+- struct ylist_head *prev; /* previous in chain */
+-};
+-
+-
+-/* Initialise a static list */
+-#define YLIST_HEAD(name) \
+-struct ylist_head name = { &(name), &(name)}
+-
+-
+-
+-/* Initialise a list head to an empty list */
+-#define YINIT_LIST_HEAD(p) \
+-do { \
+- (p)->next = (p);\
+- (p)->prev = (p); \
+-} while (0)
+-
+-
+-/* Add an element to a list */
+-static __inline__ void ylist_add(struct ylist_head *newEntry,
+- struct ylist_head *list)
+-{
+- struct ylist_head *listNext = list->next;
+-
+- list->next = newEntry;
+- newEntry->prev = list;
+- newEntry->next = listNext;
+- listNext->prev = newEntry;
+-
+-}
+-
+-static __inline__ void ylist_add_tail(struct ylist_head *newEntry,
+- struct ylist_head *list)
+-{
+- struct ylist_head *listPrev = list->prev;
+-
+- list->prev = newEntry;
+- newEntry->next = list;
+- newEntry->prev = listPrev;
+- listPrev->next = newEntry;
+-
+-}
+-
+-
+-/* Take an element out of its current list, with or without
+- * reinitialising the links.of the entry*/
+-static __inline__ void ylist_del(struct ylist_head *entry)
+-{
+- struct ylist_head *listNext = entry->next;
+- struct ylist_head *listPrev = entry->prev;
+-
+- listNext->prev = listPrev;
+- listPrev->next = listNext;
+-
+-}
+-
+-static __inline__ void ylist_del_init(struct ylist_head *entry)
+-{
+- ylist_del(entry);
+- entry->next = entry->prev = entry;
+-}
+-
+-
+-/* Test if the list is empty */
+-static __inline__ int ylist_empty(struct ylist_head *entry)
+-{
+- return (entry->next == entry);
+-}
+-
+-
+-/* ylist_entry takes a pointer to a list entry and offsets it to that
+- * we can find a pointer to the object it is embedded in.
+- */
+-
+-
+-#define ylist_entry(entry, type, member) \
+- ((type *)((char *)(entry)-(unsigned long)(&((type *)NULL)->member)))
+-
+-
+-/* ylist_for_each and list_for_each_safe iterate over lists.
+- * ylist_for_each_safe uses temporary storage to make the list delete safe
+- */
+-
+-#define ylist_for_each(itervar, list) \
+- for (itervar = (list)->next; itervar != (list); itervar = itervar->next)
+-
+-#define ylist_for_each_safe(itervar, saveVar, list) \
+- for (itervar = (list)->next, saveVar = (list)->next->next; \
+- itervar != (list); itervar = saveVar, saveVar = saveVar->next)
+-
+
+ #if !(defined __KERNEL__)
+
+--- a/fs/yaffs2/Kconfig
++++ b/fs/yaffs2/Kconfig
+@@ -90,23 +90,15 @@ config YAFFS_AUTO_YAFFS2
+
+ If unsure, say Y.
+
+-config YAFFS_DISABLE_LAZY_LOAD
+- bool "Disable lazy loading"
+- depends on YAFFS_YAFFS2
++config YAFFS_DISABLE_TAGS_ECC
++ bool "Disable YAFFS from doing ECC on tags by default"
++ depends on YAFFS_FS && YAFFS_YAFFS2
+ default n
+ help
+- "Lazy loading" defers loading file details until they are
+- required. This saves mount time, but makes the first look-up
+- a bit longer.
+-
+- Lazy loading will only happen if enabled by this option being 'n'
+- and if the appropriate tags are available, else yaffs2 will
+- automatically fall back to immediate loading and do the right
+- thing.
+-
+- Lazy laoding will be required by checkpointing.
+-
+- Setting this to 'y' will disable lazy loading.
++ This defaults Yaffs to using its own ECC calculations on tags instead of
++ just relying on the MTD.
++ This behavior can also be overridden with tags_ecc_on and
++ tags_ecc_off mount options.
+
+ If unsure, say N.
+
+@@ -154,3 +146,45 @@ config YAFFS_SHORT_NAMES_IN_RAM
+ but makes look-ups faster.
+
+ If unsure, say Y.
++
++config YAFFS_EMPTY_LOST_AND_FOUND
++ bool "Empty lost and found on boot"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is enabled then the contents of lost and found is
++ automatically dumped at mount.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BLOCK_REFRESHING
++ bool "Disable yaffs2 block refreshing"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is set, then block refreshing is disabled.
++ Block refreshing infrequently refreshes the oldest block in
++ a yaffs2 file system. This mechanism helps to refresh flash to
++ mitigate against data loss. This is particularly useful for MLC.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BACKGROUND
++ bool "Disable yaffs2 background processing"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is set, then background processing is disabled.
++ Background processing makes many foreground activities faster.
++
++ If unsure, say N.
++
++config YAFFS_XATTR
++ bool "Enable yaffs2 xattr support"
++ depends on YAFFS_FS
++ default y
++ help
++ If this is set then yaffs2 will provide xattr support.
++ If unsure, say Y.
++
++
+--- a/fs/yaffs2/Makefile
++++ b/fs/yaffs2/Makefile
+@@ -4,7 +4,14 @@
+
+ obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+-yaffs-y := yaffs_ecc.o yaffs_fs.o yaffs_guts.o yaffs_checkptrw.o
+-yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
++yaffs-y := yaffs_ecc.o yaffs_vfs_glue.o yaffs_guts.o yaffs_checkptrw.o
++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+ yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+ yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
++yaffs-y += yaffs_nameval.o
++yaffs-y += yaffs_allocator.o
++yaffs-y += yaffs_yaffs1.o
++yaffs-y += yaffs_yaffs2.o
++yaffs-y += yaffs_bitmap.o
++yaffs-y += yaffs_verify.o
++
+--- a/fs/yaffs2/moduleconfig.h
++++ b/fs/yaffs2/moduleconfig.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Martin Fouts <Martin.Fouts@palmsource.com>
+@@ -29,25 +29,46 @@
+ /* Meaning: Yaffs does its own ECC, rather than using MTD ECC */
+ /* #define CONFIG_YAFFS_DOES_ECC */
+
++/* Default: Selected */
++/* Meaning: Yaffs does its own ECC on tags for packed tags rather than use mtd */
++#define CONFIG_YAFFS_DOES_TAGS_ECC
++
+ /* Default: Not selected */
+ /* Meaning: ECC byte order is 'wrong'. Only meaningful if */
+ /* CONFIG_YAFFS_DOES_ECC is set */
+ /* #define CONFIG_YAFFS_ECC_WRONG_ORDER */
+
+-/* Default: Selected */
+-/* Meaning: Disables testing whether chunks are erased before writing to them*/
+-#define CONFIG_YAFFS_DISABLE_CHUNK_ERASED_CHECK
++/* Default: Not selected */
++/* Meaning: Always test whether chunks are erased before writing to them.
++ Use during mtd debugging and init. */
++/* #define CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED */
++
++/* Default: Not Selected */
++/* Meaning: At mount automatically empty all files from lost and found. */
++/* This is done to fix an old problem where rmdir was not checking for an */
++/* empty directory. This can also be achieved with a mount option. */
++#define CONFIG_YAFFS_EMPTY_LOST_AND_FOUND
+
+ /* Default: Selected */
+ /* Meaning: Cache short names, taking more RAM, but faster look-ups */
+ #define CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+
+-/* Default: 10 */
+-/* Meaning: set the count of blocks to reserve for checkpointing */
+-#define CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS 10
++/* Default: Unselected */
++/* Meaning: Select to disable block refreshing. */
++/* Block Refreshing periodically rewrites the oldest block. */
++/* #define CONFIG_DISABLE_BLOCK_REFRESHING */
++
++/* Default: Unselected */
++/* Meaning: Select to disable background processing */
++/* #define CONFIG_DISABLE_BACKGROUND */
++
++
++/* Default: Selected */
++/* Meaning: Enable XATTR support */
++#define CONFIG_YAFFS_XATTR
+
+ /*
+-Older-style on-NAND data format has a "pageStatus" byte to record
++Older-style on-NAND data format has a "page_status" byte to record
+ chunk/page state. This byte is zeroed when the page is discarded.
+ Choose this option if you have existing on-NAND data in this format
+ that you need to continue to support. New data written also uses the
+@@ -57,7 +78,7 @@ adjusted to use the older-style format.
+ MTD versions in yaffs_mtdif1.c.
+ */
+ /* Default: Not selected */
+-/* Meaning: Use older-style on-NAND data format with pageStatus byte */
++/* Meaning: Use older-style on-NAND data format with page_status byte */
+ /* #define CONFIG_YAFFS_9BYTE_TAGS */
+
+ #endif /* YAFFS_OUT_OF_TREE */
+--- /dev/null
++++ b/fs/yaffs2/yaffs_allocator.c
+@@ -0,0 +1,409 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#include "yaffs_allocator.h"
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yportenv.h"
++
++#ifdef CONFIG_YAFFS_YMALLOC_ALLOCATOR
++
++void yaffs_deinit_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ dev = dev;
++}
++
++void yaffs_init_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ dev = dev;
++}
++
++yaffs_tnode_t *yaffs_alloc_raw_tnode(yaffs_dev_t *dev)
++{
++ return (yaffs_tnode_t *)YMALLOC(dev->tnode_size);
++}
++
++void yaffs_free_raw_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn)
++{
++ dev = dev;
++ YFREE(tn);
++}
++
++void yaffs_init_raw_objs(yaffs_dev_t *dev)
++{
++ dev = dev;
++}
++
++void yaffs_deinit_raw_objs(yaffs_dev_t *dev)
++{
++ dev = dev;
++}
++
++yaffs_obj_t *yaffs_alloc_raw_obj(yaffs_dev_t *dev)
++{
++ dev = dev;
++ return (yaffs_obj_t *) YMALLOC(sizeof(yaffs_obj_t));
++}
++
++
++void yaffs_free_raw_obj(yaffs_dev_t *dev, yaffs_obj_t *obj)
++{
++
++ dev = dev;
++ YFREE(obj);
++}
++
++#else
++
++struct yaffs_tnode_list {
++ struct yaffs_tnode_list *next;
++ yaffs_tnode_t *tnodes;
++};
++
++typedef struct yaffs_tnode_list yaffs_tnodelist_t;
++
++struct yaffs_obj_tList_struct {
++ yaffs_obj_t *objects;
++ struct yaffs_obj_tList_struct *next;
++};
++
++typedef struct yaffs_obj_tList_struct yaffs_obj_tList;
++
++
++struct yaffs_AllocatorStruct {
++ int n_tnodesCreated;
++ yaffs_tnode_t *freeTnodes;
++ int nFreeTnodes;
++ yaffs_tnodelist_t *allocatedTnodeList;
++
++ int n_objCreated;
++ yaffs_obj_t *freeObjects;
++ int nFreeObjects;
++
++ yaffs_obj_tList *allocatedObjectList;
++};
++
++typedef struct yaffs_AllocatorStruct yaffs_Allocator;
++
++
++static void yaffs_deinit_raw_tnodes(yaffs_dev_t *dev)
++{
++
++ yaffs_Allocator *allocator = (yaffs_Allocator *)dev->allocator;
++
++ yaffs_tnodelist_t *tmp;
++
++ if(!allocator){
++ YBUG();
++ return;
++ }
++
++ while (allocator->allocatedTnodeList) {
++ tmp = allocator->allocatedTnodeList->next;
++
++ YFREE(allocator->allocatedTnodeList->tnodes);
++ YFREE(allocator->allocatedTnodeList);
++ allocator->allocatedTnodeList = tmp;
++
++ }
++
++ allocator->freeTnodes = NULL;
++ allocator->nFreeTnodes = 0;
++ allocator->n_tnodesCreated = 0;
++}
++
++static void yaffs_init_raw_tnodes(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(allocator){
++ allocator->allocatedTnodeList = NULL;
++ allocator->freeTnodes = NULL;
++ allocator->nFreeTnodes = 0;
++ allocator->n_tnodesCreated = 0;
++ } else
++ YBUG();
++}
++
++static int yaffs_create_tnodes(yaffs_dev_t *dev, int n_tnodes)
++{
++ yaffs_Allocator *allocator = (yaffs_Allocator *)dev->allocator;
++ int i;
++ yaffs_tnode_t *newTnodes;
++ __u8 *mem;
++ yaffs_tnode_t *curr;
++ yaffs_tnode_t *next;
++ yaffs_tnodelist_t *tnl;
++
++ if(!allocator){
++ YBUG();
++ return YAFFS_FAIL;
++ }
++
++ if (n_tnodes < 1)
++ return YAFFS_OK;
++
++
++ /* make these things */
++
++ newTnodes = YMALLOC(n_tnodes * dev->tnode_size);
++ mem = (__u8 *)newTnodes;
++
++ if (!newTnodes) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
++ return YAFFS_FAIL;
++ }
++
++ /* New hookup for wide tnodes */
++ for (i = 0; i < n_tnodes - 1; i++) {
++ curr = (yaffs_tnode_t *) &mem[i * dev->tnode_size];
++ next = (yaffs_tnode_t *) &mem[(i+1) * dev->tnode_size];
++ curr->internal[0] = next;
++ }
++
++ curr = (yaffs_tnode_t *) &mem[(n_tnodes - 1) * dev->tnode_size];
++ curr->internal[0] = allocator->freeTnodes;
++ allocator->freeTnodes = (yaffs_tnode_t *)mem;
++
++ allocator->nFreeTnodes += n_tnodes;
++ allocator->n_tnodesCreated += n_tnodes;
++
++ /* Now add this bunch of tnodes to a list for freeing up.
++ * NB If we can't add this to the management list it isn't fatal
++ * but it just means we can't free this bunch of tnodes later.
++ */
++
++ tnl = YMALLOC(sizeof(yaffs_tnodelist_t));
++ if (!tnl) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs: Could not add tnodes to management list" TENDSTR)));
++ return YAFFS_FAIL;
++ } else {
++ tnl->tnodes = newTnodes;
++ tnl->next = allocator->allocatedTnodeList;
++ allocator->allocatedTnodeList = tnl;
++ }
++
++ T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR)));
++
++ return YAFFS_OK;
++}
++
++
++yaffs_tnode_t *yaffs_alloc_raw_tnode(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = (yaffs_Allocator *)dev->allocator;
++ yaffs_tnode_t *tn = NULL;
++
++ if(!allocator){
++ YBUG();
++ return NULL;
++ }
++
++ /* If there are none left make more */
++ if (!allocator->freeTnodes)
++ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
++
++ if (allocator->freeTnodes) {
++ tn = allocator->freeTnodes;
++ allocator->freeTnodes = allocator->freeTnodes->internal[0];
++ allocator->nFreeTnodes--;
++ }
++
++ return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++void yaffs_free_raw_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(!allocator){
++ YBUG();
++ return;
++ }
++
++ if (tn) {
++ tn->internal[0] = allocator->freeTnodes;
++ allocator->freeTnodes = tn;
++ allocator->nFreeTnodes++;
++ }
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
++}
++
++
++
++static void yaffs_init_raw_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(allocator) {
++ allocator->allocatedObjectList = NULL;
++ allocator->freeObjects = NULL;
++ allocator->nFreeObjects = 0;
++ } else
++ YBUG();
++}
++
++static void yaffs_deinit_raw_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ yaffs_obj_tList *tmp;
++
++ if(!allocator){
++ YBUG();
++ return;
++ }
++
++ while (allocator->allocatedObjectList) {
++ tmp = allocator->allocatedObjectList->next;
++ YFREE(allocator->allocatedObjectList->objects);
++ YFREE(allocator->allocatedObjectList);
++
++ allocator->allocatedObjectList = tmp;
++ }
++
++ allocator->freeObjects = NULL;
++ allocator->nFreeObjects = 0;
++ allocator->n_objCreated = 0;
++}
++
++
++static int yaffs_create_free_objs(yaffs_dev_t *dev, int n_obj)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++
++ int i;
++ yaffs_obj_t *newObjects;
++ yaffs_obj_tList *list;
++
++ if(!allocator){
++ YBUG();
++ return YAFFS_FAIL;
++ }
++
++ if (n_obj < 1)
++ return YAFFS_OK;
++
++ /* make these things */
++ newObjects = YMALLOC(n_obj * sizeof(yaffs_obj_t));
++ list = YMALLOC(sizeof(yaffs_obj_tList));
++
++ if (!newObjects || !list) {
++ if (newObjects){
++ YFREE(newObjects);
++ newObjects = NULL;
++ }
++ if (list){
++ YFREE(list);
++ list = NULL;
++ }
++ T(YAFFS_TRACE_ALLOCATE,
++ (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
++ return YAFFS_FAIL;
++ }
++
++ /* Hook them into the free list */
++ for (i = 0; i < n_obj - 1; i++) {
++ newObjects[i].siblings.next =
++ (struct ylist_head *)(&newObjects[i + 1]);
++ }
++
++ newObjects[n_obj - 1].siblings.next = (void *)allocator->freeObjects;
++ allocator->freeObjects = newObjects;
++ allocator->nFreeObjects += n_obj;
++ allocator->n_objCreated += n_obj;
++
++ /* Now add this bunch of Objects to a list for freeing up. */
++
++ list->objects = newObjects;
++ list->next = allocator->allocatedObjectList;
++ allocator->allocatedObjectList = list;
++
++ return YAFFS_OK;
++}
++
++yaffs_obj_t *yaffs_alloc_raw_obj(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj = NULL;
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(!allocator) {
++ YBUG();
++ return obj;
++ }
++
++ /* If there are none left make more */
++ if (!allocator->freeObjects)
++ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
++
++ if (allocator->freeObjects) {
++ obj = allocator->freeObjects;
++ allocator->freeObjects =
++ (yaffs_obj_t *) (allocator->freeObjects->siblings.next);
++ allocator->nFreeObjects--;
++ }
++
++ return obj;
++}
++
++
++void yaffs_free_raw_obj(yaffs_dev_t *dev, yaffs_obj_t *obj)
++{
++
++ yaffs_Allocator *allocator = dev->allocator;
++
++ if(!allocator)
++ YBUG();
++ else {
++ /* Link into the free list. */
++ obj->siblings.next = (struct ylist_head *)(allocator->freeObjects);
++ allocator->freeObjects = obj;
++ allocator->nFreeObjects++;
++ }
++}
++
++void yaffs_deinit_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ if(dev->allocator){
++ yaffs_deinit_raw_tnodes(dev);
++ yaffs_deinit_raw_objs(dev);
++
++ YFREE(dev->allocator);
++ dev->allocator=NULL;
++ } else
++ YBUG();
++}
++
++void yaffs_init_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator;
++
++ if(!dev->allocator){
++ allocator = YMALLOC(sizeof(yaffs_Allocator));
++ if(allocator){
++ dev->allocator = allocator;
++ yaffs_init_raw_tnodes(dev);
++ yaffs_init_raw_objs(dev);
++ }
++ } else
++ YBUG();
++}
++
++
++#endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_allocator.h
+@@ -0,0 +1,30 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_ALLOCATOR_H__
++#define __YAFFS_ALLOCATOR_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_init_raw_tnodes_and_objs(yaffs_dev_t *dev);
++void yaffs_deinit_raw_tnodes_and_objs(yaffs_dev_t *dev);
++
++yaffs_tnode_t *yaffs_alloc_raw_tnode(yaffs_dev_t *dev);
++void yaffs_free_raw_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn);
++
++yaffs_obj_t *yaffs_alloc_raw_obj(yaffs_dev_t *dev);
++void yaffs_free_raw_obj(yaffs_dev_t *dev, yaffs_obj_t *obj);
++
++#endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_bitmap.c
+@@ -0,0 +1,105 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_bitmap.h"
++#include "yaffs_trace.h"
++/*
++ * Chunk bitmap manipulations
++ */
++
++static Y_INLINE __u8 *yaffs_BlockBits(yaffs_dev_t *dev, int blk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
++ blk));
++ YBUG();
++ }
++ return dev->chunk_bits +
++ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
++}
++
++void yaffs_verify_chunk_bit_id(yaffs_dev_t *dev, int blk, int chunk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
++ chunk < 0 || chunk >= dev->param.chunks_per_block) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),
++ blk, chunk));
++ YBUG();
++ }
++}
++
++void yaffs_clear_chunk_bits(yaffs_dev_t *dev, int blk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++ memset(blkBits, 0, dev->chunk_bit_stride);
++}
++
++void yaffs_clear_chunk_bit(yaffs_dev_t *dev, int blk, int chunk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++
++ blkBits[chunk / 8] &= ~(1 << (chunk & 7));
++}
++
++void yaffs_set_chunk_bit(yaffs_dev_t *dev, int blk, int chunk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++
++ blkBits[chunk / 8] |= (1 << (chunk & 7));
++}
++
++int yaffs_check_chunk_bit(yaffs_dev_t *dev, int blk, int chunk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++
++ return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++}
++
++int yaffs_still_some_chunks(yaffs_dev_t *dev, int blk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++ int i;
++ for (i = 0; i < dev->chunk_bit_stride; i++) {
++ if (*blkBits)
++ return 1;
++ blkBits++;
++ }
++ return 0;
++}
++
++int yaffs_count_chunk_bits(yaffs_dev_t *dev, int blk)
++{
++ __u8 *blkBits = yaffs_BlockBits(dev, blk);
++ int i;
++ int n = 0;
++ for (i = 0; i < dev->chunk_bit_stride; i++) {
++ __u8 x = *blkBits;
++ while (x) {
++ if (x & 1)
++ n++;
++ x >>= 1;
++ }
++
++ blkBits++;
++ }
++ return n;
++}
++
+--- /dev/null
++++ b/fs/yaffs2/yaffs_bitmap.h
+@@ -0,0 +1,31 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * Chunk bitmap manipulations
++ */
++
++#ifndef __YAFFS_BITMAP_H__
++#define __YAFFS_BITMAP_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_chunk_bit_id(yaffs_dev_t *dev, int blk, int chunk);
++void yaffs_clear_chunk_bits(yaffs_dev_t *dev, int blk);
++void yaffs_clear_chunk_bit(yaffs_dev_t *dev, int blk, int chunk);
++void yaffs_set_chunk_bit(yaffs_dev_t *dev, int blk, int chunk);
++int yaffs_check_chunk_bit(yaffs_dev_t *dev, int blk, int chunk);
++int yaffs_still_some_chunks(yaffs_dev_t *dev, int blk);
++int yaffs_count_chunk_bits(yaffs_dev_t *dev, int blk);
++
++#endif
+--- a/fs/yaffs2/yaffs_checkptrw.c
++++ b/fs/yaffs2/yaffs_checkptrw.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -11,16 +11,12 @@
+ * published by the Free Software Foundation.
+ */
+
+-const char *yaffs_checkptrw_c_version =
+- "$Id: yaffs_checkptrw.c,v 1.18 2009-03-06 17:20:49 wookey Exp $";
+-
+-
+ #include "yaffs_checkptrw.h"
+ #include "yaffs_getblockinfo.h"
+
+-static int yaffs_CheckpointSpaceOk(yaffs_Device *dev)
++static int yaffs2_checkpt_space_ok(yaffs_dev_t *dev)
+ {
+- int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
++ int blocksAvailable = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("checkpt blocks available = %d" TENDSTR),
+@@ -30,53 +26,56 @@ static int yaffs_CheckpointSpaceOk(yaffs
+ }
+
+
+-static int yaffs_CheckpointErase(yaffs_Device *dev)
++static int yaffs_checkpt_erase(yaffs_dev_t *dev)
+ {
+ int i;
+
+- if (!dev->eraseBlockInNAND)
++ if (!dev->param.erase_fn)
+ return 0;
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checking blocks %d to %d"TENDSTR),
+- dev->internalStartBlock, dev->internalEndBlock));
++ dev->internal_start_block, dev->internal_end_block));
+
+- for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
+- if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, i);
++ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("erasing checkpt block %d"TENDSTR), i));
+- if (dev->eraseBlockInNAND(dev, i - dev->blockOffset /* realign */)) {
+- bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+- dev->nErasedBlocks++;
+- dev->nFreeChunks += dev->nChunksPerBlock;
++
++ dev->n_erasures++;
++
++ if (dev->param.erase_fn(dev, i - dev->block_offset /* realign */)) {
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else {
+- dev->markNANDBlockBad(dev, i);
+- bi->blockState = YAFFS_BLOCK_STATE_DEAD;
++ dev->param.bad_block_fn(dev, i);
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+- dev->blocksInCheckpoint = 0;
++ dev->blocks_in_checkpt = 0;
+
+ return 1;
+ }
+
+
+-static void yaffs_CheckpointFindNextErasedBlock(yaffs_Device *dev)
++static void yaffs2_checkpt_find_erased_block(yaffs_dev_t *dev)
+ {
+ int i;
+- int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
++ int blocksAvailable = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+ T(YAFFS_TRACE_CHECKPOINT,
+ (TSTR("allocating checkpt block: erased %d reserved %d avail %d next %d "TENDSTR),
+- dev->nErasedBlocks, dev->nReservedBlocks, blocksAvailable, dev->checkpointNextBlock));
++ dev->n_erased_blocks, dev->param.n_reserved_blocks, blocksAvailable, dev->checkpt_next_block));
+
+- if (dev->checkpointNextBlock >= 0 &&
+- dev->checkpointNextBlock <= dev->internalEndBlock &&
++ if (dev->checkpt_next_block >= 0 &&
++ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocksAvailable > 0) {
+
+- for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
+- if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
+- dev->checkpointNextBlock = i + 1;
+- dev->checkpointCurrentBlock = i;
++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block; i++) {
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, i);
++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ dev->checkpt_next_block = i + 1;
++ dev->checkpt_cur_block = i;
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("allocating checkpt block %d"TENDSTR), i));
+ return;
+ }
+@@ -84,34 +83,34 @@ static void yaffs_CheckpointFindNextEras
+ }
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("out of checkpt blocks"TENDSTR)));
+
+- dev->checkpointNextBlock = -1;
+- dev->checkpointCurrentBlock = -1;
++ dev->checkpt_next_block = -1;
++ dev->checkpt_cur_block = -1;
+ }
+
+-static void yaffs_CheckpointFindNextCheckpointBlock(yaffs_Device *dev)
++static void yaffs2_checkpt_find_block(yaffs_dev_t *dev)
+ {
+ int i;
+- yaffs_ExtendedTags tags;
++ yaffs_ext_tags tags;
+
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: start: blocks %d next %d" TENDSTR),
+- dev->blocksInCheckpoint, dev->checkpointNextBlock));
++ dev->blocks_in_checkpt, dev->checkpt_next_block));
+
+- if (dev->blocksInCheckpoint < dev->checkpointMaxBlocks)
+- for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
+- int chunk = i * dev->nChunksPerBlock;
+- int realignedChunk = chunk - dev->chunkOffset;
++ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block; i++) {
++ int chunk = i * dev->param.chunks_per_block;
++ int realignedChunk = chunk - dev->chunk_offset;
+
+- dev->readChunkWithTagsFromNAND(dev, realignedChunk,
++ dev->param.read_chunk_tags_fn(dev, realignedChunk,
+ NULL, &tags);
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR),
+- i, tags.objectId, tags.sequenceNumber, tags.eccResult));
++ i, tags.obj_id, tags.seq_number, tags.ecc_result));
+
+- if (tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
++ if (tags.seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ /* Right kind of block */
+- dev->checkpointNextBlock = tags.objectId;
+- dev->checkpointCurrentBlock = i;
+- dev->checkpointBlockList[dev->blocksInCheckpoint] = i;
+- dev->blocksInCheckpoint++;
++ dev->checkpt_next_block = tags.obj_id;
++ dev->checkpt_cur_block = i;
++ dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
++ dev->blocks_in_checkpt++;
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("found checkpt block %d"TENDSTR), i));
+ return;
+ }
+@@ -119,122 +118,127 @@ static void yaffs_CheckpointFindNextChec
+
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("found no more checkpt blocks"TENDSTR)));
+
+- dev->checkpointNextBlock = -1;
+- dev->checkpointCurrentBlock = -1;
++ dev->checkpt_next_block = -1;
++ dev->checkpt_cur_block = -1;
+ }
+
+
+-int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting)
++int yaffs2_checkpt_open(yaffs_dev_t *dev, int forWriting)
+ {
+
++
++ dev->checkpt_open_write = forWriting;
++
+ /* Got the functions we need? */
+- if (!dev->writeChunkWithTagsToNAND ||
+- !dev->readChunkWithTagsFromNAND ||
+- !dev->eraseBlockInNAND ||
+- !dev->markNANDBlockBad)
++ if (!dev->param.write_chunk_tags_fn ||
++ !dev->param.read_chunk_tags_fn ||
++ !dev->param.erase_fn ||
++ !dev->param.bad_block_fn)
+ return 0;
+
+- if (forWriting && !yaffs_CheckpointSpaceOk(dev))
++ if (forWriting && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+- if (!dev->checkpointBuffer)
+- dev->checkpointBuffer = YMALLOC_DMA(dev->totalBytesPerChunk);
+- if (!dev->checkpointBuffer)
++ if (!dev->checkpt_buffer)
++ dev->checkpt_buffer = YMALLOC_DMA(dev->param.total_bytes_per_chunk);
++ if (!dev->checkpt_buffer)
+ return 0;
+
+
+- dev->checkpointPageSequence = 0;
+-
+- dev->checkpointOpenForWrite = forWriting;
+-
+- dev->checkpointByteCount = 0;
+- dev->checkpointSum = 0;
+- dev->checkpointXor = 0;
+- dev->checkpointCurrentBlock = -1;
+- dev->checkpointCurrentChunk = -1;
+- dev->checkpointNextBlock = dev->internalStartBlock;
++ dev->checkpt_page_seq = 0;
++ dev->checkpt_byte_count = 0;
++ dev->checkpt_sum = 0;
++ dev->checkpt_xor = 0;
++ dev->checkpt_cur_block = -1;
++ dev->checkpt_cur_chunk = -1;
++ dev->checkpt_next_block = dev->internal_start_block;
+
+ /* Erase all the blocks in the checkpoint area */
+ if (forWriting) {
+- memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
+- dev->checkpointByteOffset = 0;
+- return yaffs_CheckpointErase(dev);
++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
++ dev->checkpt_byte_offs = 0;
++ return yaffs_checkpt_erase(dev);
+ } else {
+ int i;
+ /* Set to a value that will kick off a read */
+- dev->checkpointByteOffset = dev->nDataBytesPerChunk;
++ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
+ * going to be way more than we need */
+- dev->blocksInCheckpoint = 0;
+- dev->checkpointMaxBlocks = (dev->internalEndBlock - dev->internalStartBlock)/16 + 2;
+- dev->checkpointBlockList = YMALLOC(sizeof(int) * dev->checkpointMaxBlocks);
+- for (i = 0; i < dev->checkpointMaxBlocks; i++)
+- dev->checkpointBlockList[i] = -1;
++ dev->blocks_in_checkpt = 0;
++ dev->checkpt_max_blocks = (dev->internal_end_block - dev->internal_start_block)/16 + 2;
++ dev->checkpt_block_list = YMALLOC(sizeof(int) * dev->checkpt_max_blocks);
++ if(!dev->checkpt_block_list)
++ return 0;
++
++ for (i = 0; i < dev->checkpt_max_blocks; i++)
++ dev->checkpt_block_list[i] = -1;
+ }
+
+ return 1;
+ }
+
+-int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum)
++int yaffs2_get_checkpt_sum(yaffs_dev_t *dev, __u32 *sum)
+ {
+ __u32 compositeSum;
+- compositeSum = (dev->checkpointSum << 8) | (dev->checkpointXor & 0xFF);
++ compositeSum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xFF);
+ *sum = compositeSum;
+ return 1;
+ }
+
+-static int yaffs_CheckpointFlushBuffer(yaffs_Device *dev)
++static int yaffs2_checkpt_flush_buffer(yaffs_dev_t *dev)
+ {
+ int chunk;
+ int realignedChunk;
+
+- yaffs_ExtendedTags tags;
++ yaffs_ext_tags tags;
+
+- if (dev->checkpointCurrentBlock < 0) {
+- yaffs_CheckpointFindNextErasedBlock(dev);
+- dev->checkpointCurrentChunk = 0;
++ if (dev->checkpt_cur_block < 0) {
++ yaffs2_checkpt_find_erased_block(dev);
++ dev->checkpt_cur_chunk = 0;
+ }
+
+- if (dev->checkpointCurrentBlock < 0)
++ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+- tags.chunkDeleted = 0;
+- tags.objectId = dev->checkpointNextBlock; /* Hint to next place to look */
+- tags.chunkId = dev->checkpointPageSequence + 1;
+- tags.sequenceNumber = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+- tags.byteCount = dev->nDataBytesPerChunk;
+- if (dev->checkpointCurrentChunk == 0) {
++ tags.is_deleted = 0;
++ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
++ tags.chunk_id = dev->checkpt_page_seq + 1;
++ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
++ tags.n_bytes = dev->data_bytes_per_chunk;
++ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointCurrentBlock);
+- bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
+- dev->blocksInCheckpoint++;
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, dev->checkpt_cur_block);
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ dev->blocks_in_checkpt++;
+ }
+
+- chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock + dev->checkpointCurrentChunk;
++ chunk = dev->checkpt_cur_block * dev->param.chunks_per_block + dev->checkpt_cur_chunk;
+
+
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
+- chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk, tags.objectId, tags.chunkId));
++ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk, tags.obj_id, tags.chunk_id));
+
+- realignedChunk = chunk - dev->chunkOffset;
++ realignedChunk = chunk - dev->chunk_offset;
+
+- dev->writeChunkWithTagsToNAND(dev, realignedChunk,
+- dev->checkpointBuffer, &tags);
+- dev->checkpointByteOffset = 0;
+- dev->checkpointPageSequence++;
+- dev->checkpointCurrentChunk++;
+- if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock) {
+- dev->checkpointCurrentChunk = 0;
+- dev->checkpointCurrentBlock = -1;
++ dev->n_page_writes++;
++
++ dev->param.write_chunk_tags_fn(dev, realignedChunk,
++ dev->checkpt_buffer, &tags);
++ dev->checkpt_byte_offs = 0;
++ dev->checkpt_page_seq++;
++ dev->checkpt_cur_chunk++;
++ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
++ dev->checkpt_cur_chunk = 0;
++ dev->checkpt_cur_block = -1;
+ }
+- memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ return 1;
+ }
+
+
+-int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes)
++int yaffs2_checkpt_wr(yaffs_dev_t *dev, const void *data, int n_bytes)
+ {
+ int i = 0;
+ int ok = 1;
+@@ -244,36 +248,36 @@ int yaffs_CheckpointWrite(yaffs_Device *
+
+
+
+- if (!dev->checkpointBuffer)
++ if (!dev->checkpt_buffer)
+ return 0;
+
+- if (!dev->checkpointOpenForWrite)
++ if (!dev->checkpt_open_write)
+ return -1;
+
+- while (i < nBytes && ok) {
+- dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes;
+- dev->checkpointSum += *dataBytes;
+- dev->checkpointXor ^= *dataBytes;
++ while (i < n_bytes && ok) {
++ dev->checkpt_buffer[dev->checkpt_byte_offs] = *dataBytes;
++ dev->checkpt_sum += *dataBytes;
++ dev->checkpt_xor ^= *dataBytes;
+
+- dev->checkpointByteOffset++;
++ dev->checkpt_byte_offs++;
+ i++;
+ dataBytes++;
+- dev->checkpointByteCount++;
++ dev->checkpt_byte_count++;
+
+
+- if (dev->checkpointByteOffset < 0 ||
+- dev->checkpointByteOffset >= dev->nDataBytesPerChunk)
+- ok = yaffs_CheckpointFlushBuffer(dev);
++ if (dev->checkpt_byte_offs < 0 ||
++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
++ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+ }
+
+-int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes)
++int yaffs2_checkpt_rd(yaffs_dev_t *dev, void *data, int n_bytes)
+ {
+ int i = 0;
+ int ok = 1;
+- yaffs_ExtendedTags tags;
++ yaffs_ext_tags tags;
+
+
+ int chunk;
+@@ -281,113 +285,116 @@ int yaffs_CheckpointRead(yaffs_Device *d
+
+ __u8 *dataBytes = (__u8 *)data;
+
+- if (!dev->checkpointBuffer)
++ if (!dev->checkpt_buffer)
+ return 0;
+
+- if (dev->checkpointOpenForWrite)
++ if (dev->checkpt_open_write)
+ return -1;
+
+- while (i < nBytes && ok) {
++ while (i < n_bytes && ok) {
+
+
+- if (dev->checkpointByteOffset < 0 ||
+- dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
++ if (dev->checkpt_byte_offs < 0 ||
++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+- if (dev->checkpointCurrentBlock < 0) {
+- yaffs_CheckpointFindNextCheckpointBlock(dev);
+- dev->checkpointCurrentChunk = 0;
++ if (dev->checkpt_cur_block < 0) {
++ yaffs2_checkpt_find_block(dev);
++ dev->checkpt_cur_chunk = 0;
+ }
+
+- if (dev->checkpointCurrentBlock < 0)
++ if (dev->checkpt_cur_block < 0)
+ ok = 0;
+ else {
+- chunk = dev->checkpointCurrentBlock *
+- dev->nChunksPerBlock +
+- dev->checkpointCurrentChunk;
+-
+- realignedChunk = chunk - dev->chunkOffset;
++ chunk = dev->checkpt_cur_block *
++ dev->param.chunks_per_block +
++ dev->checkpt_cur_chunk;
++
++ realignedChunk = chunk - dev->chunk_offset;
++
++ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ /* printf("read checkpoint page %d\n",dev->checkpointPage); */
+- dev->readChunkWithTagsFromNAND(dev,
++ dev->param.read_chunk_tags_fn(dev,
+ realignedChunk,
+- dev->checkpointBuffer,
++ dev->checkpt_buffer,
+ &tags);
+
+- if (tags.chunkId != (dev->checkpointPageSequence + 1) ||
+- tags.eccResult > YAFFS_ECC_RESULT_FIXED ||
+- tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
++ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
++ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ ok = 0;
+
+- dev->checkpointByteOffset = 0;
+- dev->checkpointPageSequence++;
+- dev->checkpointCurrentChunk++;
++ dev->checkpt_byte_offs = 0;
++ dev->checkpt_page_seq++;
++ dev->checkpt_cur_chunk++;
+
+- if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
+- dev->checkpointCurrentBlock = -1;
++ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block)
++ dev->checkpt_cur_block = -1;
+ }
+ }
+
+ if (ok) {
+- *dataBytes = dev->checkpointBuffer[dev->checkpointByteOffset];
+- dev->checkpointSum += *dataBytes;
+- dev->checkpointXor ^= *dataBytes;
+- dev->checkpointByteOffset++;
++ *dataBytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
++ dev->checkpt_sum += *dataBytes;
++ dev->checkpt_xor ^= *dataBytes;
++ dev->checkpt_byte_offs++;
+ i++;
+ dataBytes++;
+- dev->checkpointByteCount++;
++ dev->checkpt_byte_count++;
+ }
+ }
+
+ return i;
+ }
+
+-int yaffs_CheckpointClose(yaffs_Device *dev)
++int yaffs_checkpt_close(yaffs_dev_t *dev)
+ {
+
+- if (dev->checkpointOpenForWrite) {
+- if (dev->checkpointByteOffset != 0)
+- yaffs_CheckpointFlushBuffer(dev);
+- } else {
++ if (dev->checkpt_open_write) {
++ if (dev->checkpt_byte_offs != 0)
++ yaffs2_checkpt_flush_buffer(dev);
++ } else if(dev->checkpt_block_list){
+ int i;
+- for (i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointBlockList[i]);
+- if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
+- bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
++ for (i = 0; i < dev->blocks_in_checkpt && dev->checkpt_block_list[i] >= 0; i++) {
++ int blk = dev->checkpt_block_list[i];
++ yaffs_block_info_t *bi = NULL;
++ if( dev->internal_start_block <= blk && blk <= dev->internal_end_block)
++ bi = yaffs_get_block_info(dev, blk);
++ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ else {
+ /* Todo this looks odd... */
+ }
+ }
+- YFREE(dev->checkpointBlockList);
+- dev->checkpointBlockList = NULL;
++ YFREE(dev->checkpt_block_list);
++ dev->checkpt_block_list = NULL;
+ }
+
+- dev->nFreeChunks -= dev->blocksInCheckpoint * dev->nChunksPerBlock;
+- dev->nErasedBlocks -= dev->blocksInCheckpoint;
++ dev->n_free_chunks -= dev->blocks_in_checkpt * dev->param.chunks_per_block;
++ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+
+ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint byte count %d" TENDSTR),
+- dev->checkpointByteCount));
++ dev->checkpt_byte_count));
+
+- if (dev->checkpointBuffer) {
++ if (dev->checkpt_buffer) {
+ /* free the buffer */
+- YFREE(dev->checkpointBuffer);
+- dev->checkpointBuffer = NULL;
++ YFREE(dev->checkpt_buffer);
++ dev->checkpt_buffer = NULL;
+ return 1;
+ } else
+ return 0;
+ }
+
+-int yaffs_CheckpointInvalidateStream(yaffs_Device *dev)
++int yaffs2_checkpt_invalidate_stream(yaffs_dev_t *dev)
+ {
+- /* Erase the first checksum block */
+-
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint invalidate"TENDSTR)));
++ /* Erase the checkpoint data */
+
+- if (!yaffs_CheckpointSpaceOk(dev))
+- return 0;
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint invalidate of %d blocks"TENDSTR),
++ dev->blocks_in_checkpt));
+
+- return yaffs_CheckpointErase(dev);
++ return yaffs_checkpt_erase(dev);
+ }
+
+
+--- a/fs/yaffs2/yaffs_checkptrw.h
++++ b/fs/yaffs2/yaffs_checkptrw.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -18,18 +18,17 @@
+
+ #include "yaffs_guts.h"
+
+-int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting);
++int yaffs2_checkpt_open(yaffs_dev_t *dev, int forWriting);
+
+-int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes);
++int yaffs2_checkpt_wr(yaffs_dev_t *dev, const void *data, int n_bytes);
+
+-int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes);
++int yaffs2_checkpt_rd(yaffs_dev_t *dev, void *data, int n_bytes);
+
+-int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum);
++int yaffs2_get_checkpt_sum(yaffs_dev_t *dev, __u32 *sum);
+
+-int yaffs_CheckpointClose(yaffs_Device *dev);
++int yaffs_checkpt_close(yaffs_dev_t *dev);
+
+-int yaffs_CheckpointInvalidateStream(yaffs_Device *dev);
++int yaffs2_checkpt_invalidate_stream(yaffs_dev_t *dev);
+
+
+ #endif
+-
+--- a/fs/yaffs2/yaffs_ecc.c
++++ b/fs/yaffs2/yaffs_ecc.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -28,9 +28,6 @@
+ * this bytes influence on the line parity.
+ */
+
+-const char *yaffs_ecc_c_version =
+- "$Id: yaffs_ecc.c,v 1.11 2009-03-06 17:20:50 wookey Exp $";
+-
+ #include "yportenv.h"
+
+ #include "yaffs_ecc.h"
+@@ -72,7 +69,7 @@ static const unsigned char column_parity
+
+ /* Count the bits in an unsigned char or a U32 */
+
+-static int yaffs_CountBits(unsigned char x)
++static int yaffs_count_bits(unsigned char x)
+ {
+ int r = 0;
+ while (x) {
+@@ -83,7 +80,7 @@ static int yaffs_CountBits(unsigned char
+ return r;
+ }
+
+-static int yaffs_CountBits32(unsigned x)
++static int yaffs_count_bits32(unsigned x)
+ {
+ int r = 0;
+ while (x) {
+@@ -95,7 +92,7 @@ static int yaffs_CountBits32(unsigned x)
+ }
+
+ /* Calculate the ECC for a 256-byte block of data */
+-void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc)
++void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc)
+ {
+ unsigned int i;
+
+@@ -166,7 +163,7 @@ void yaffs_ECCCalculate(const unsigned c
+
+ /* Correct the ECC on a 256 byte block of data */
+
+-int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+ {
+ unsigned char d0, d1, d2; /* deltas */
+@@ -226,9 +223,9 @@ int yaffs_ECCCorrect(unsigned char *data
+ return 1; /* Corrected the error */
+ }
+
+- if ((yaffs_CountBits(d0) +
+- yaffs_CountBits(d1) +
+- yaffs_CountBits(d2)) == 1) {
++ if ((yaffs_count_bits(d0) +
++ yaffs_count_bits(d1) +
++ yaffs_count_bits(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+@@ -248,7 +245,7 @@ int yaffs_ECCCorrect(unsigned char *data
+ /*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+-void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ yaffs_ECCOther *eccOther)
+ {
+ unsigned int i;
+@@ -258,7 +255,7 @@ void yaffs_ECCCalculateOther(const unsig
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+- for (i = 0; i < nBytes; i++) {
++ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+@@ -275,7 +272,7 @@ void yaffs_ECCCalculateOther(const unsig
+ eccOther->lineParityPrime = line_parity_prime;
+ }
+
+-int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ yaffs_ECCOther *read_ecc,
+ const yaffs_ECCOther *test_ecc)
+ {
+@@ -304,7 +301,7 @@ int yaffs_ECCCorrectOther(unsigned char
+ if (cDelta & 0x02)
+ bit |= 0x01;
+
+- if (lDelta >= nBytes)
++ if (lDelta >= n_bytes)
+ return -1;
+
+ data[lDelta] ^= (1 << bit);
+@@ -312,8 +309,8 @@ int yaffs_ECCCorrectOther(unsigned char
+ return 1; /* corrected */
+ }
+
+- if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) +
+- yaffs_CountBits(cDelta)) == 1) {
++ if ((yaffs_count_bits32(lDelta) + yaffs_count_bits32(lDeltaPrime) +
++ yaffs_count_bits(cDelta)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+--- a/fs/yaffs2/yaffs_ecc.h
++++ b/fs/yaffs2/yaffs_ecc.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -32,13 +32,13 @@ typedef struct {
+ unsigned lineParityPrime;
+ } yaffs_ECCOther;
+
+-void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc);
+-int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
++void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc);
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+-void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ yaffs_ECCOther *ecc);
+-int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ yaffs_ECCOther *read_ecc,
+ const yaffs_ECCOther *test_ecc);
+ #endif
+--- a/fs/yaffs2/yaffs_fs.c
++++ /dev/null
+@@ -1,2529 +0,0 @@
+-/*
+- * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+- *
+- * Copyright (C) 2002-2009 Aleph One Ltd.
+- * for Toby Churchill Ltd and Brightstar Engineering
+- *
+- * Created by Charles Manning <charles@aleph1.co.uk>
+- * Acknowledgements:
+- * Luc van OostenRyck for numerous patches.
+- * Nick Bane for numerous patches.
+- * Nick Bane for 2.5/2.6 integration.
+- * Andras Toth for mknod rdev issue.
+- * Michael Fischer for finding the problem with inode inconsistency.
+- * Some code bodily lifted from JFFS
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-/*
+- *
+- * This is the file system front-end to YAFFS that hooks it up to
+- * the VFS.
+- *
+- * Special notes:
+- * >> 2.4: sb->u.generic_sbp points to the yaffs_Device associated with
+- * this superblock
+- * >> 2.6: sb->s_fs_info points to the yaffs_Device associated with this
+- * superblock
+- * >> inode->u.generic_ip points to the associated yaffs_Object.
+- */
+-
+-const char *yaffs_fs_c_version =
+- "$Id: yaffs_fs.c,v 1.79 2009-03-17 01:12:00 wookey Exp $";
+-extern const char *yaffs_guts_c_version;
+-
+-#include <linux/version.h>
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+-#include <linux/config.h>
+-#endif
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/init.h>
+-#include <linux/fs.h>
+-#include <linux/proc_fs.h>
+-#include <linux/smp_lock.h>
+-#include <linux/pagemap.h>
+-#include <linux/mtd/mtd.h>
+-#include <linux/interrupt.h>
+-#include <linux/string.h>
+-#include <linux/ctype.h>
+-
+-#include "asm/div64.h"
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-
+-#include <linux/statfs.h> /* Added NCB 15-8-2003 */
+-#include <linux/statfs.h>
+-#define UnlockPage(p) unlock_page(p)
+-#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+-
+-/* FIXME: use sb->s_id instead ? */
+-#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+-
+-#else
+-
+-#include <linux/locks.h>
+-#define BDEVNAME_SIZE 0
+-#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
+-
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+-/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+-#define __user
+-#endif
+-
+-#endif
+-
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+-#define YPROC_ROOT (&proc_root)
+-#else
+-#define YPROC_ROOT NULL
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-#define WRITE_SIZE_STR "writesize"
+-#define WRITE_SIZE(mtd) ((mtd)->writesize)
+-#else
+-#define WRITE_SIZE_STR "oobblock"
+-#define WRITE_SIZE(mtd) ((mtd)->oobblock)
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
+-#define YAFFS_USE_WRITE_BEGIN_END 1
+-#else
+-#define YAFFS_USE_WRITE_BEGIN_END 0
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
+-static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+-{
+- uint64_t result = partition_size;
+- do_div(result, block_size);
+- return (uint32_t)result;
+-}
+-#else
+-#define YCALCBLOCKS(s, b) ((s)/(b))
+-#endif
+-
+-#include <linux/uaccess.h>
+-
+-#include "yportenv.h"
+-#include "yaffs_guts.h"
+-
+-#include <linux/mtd/mtd.h>
+-#include "yaffs_mtdif.h"
+-#include "yaffs_mtdif1.h"
+-#include "yaffs_mtdif2.h"
+-
+-unsigned int yaffs_traceMask = YAFFS_TRACE_BAD_BLOCKS;
+-unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+-unsigned int yaffs_auto_checkpoint = 1;
+-
+-/* Module Parameters */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-module_param(yaffs_traceMask, uint, 0644);
+-module_param(yaffs_wr_attempts, uint, 0644);
+-module_param(yaffs_auto_checkpoint, uint, 0644);
+-#else
+-MODULE_PARM(yaffs_traceMask, "i");
+-MODULE_PARM(yaffs_wr_attempts, "i");
+-MODULE_PARM(yaffs_auto_checkpoint, "i");
+-#endif
+-
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+-/* use iget and read_inode */
+-#define Y_IGET(sb, inum) iget((sb), (inum))
+-static void yaffs_read_inode(struct inode *inode);
+-
+-#else
+-/* Call local equivalent */
+-#define YAFFS_USE_OWN_IGET
+-#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
+-
+-static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino);
+-#endif
+-
+-/*#define T(x) printk x */
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+-#define yaffs_InodeToObjectLV(iptr) ((iptr)->i_private)
+-#else
+-#define yaffs_InodeToObjectLV(iptr) ((iptr)->u.generic_ip)
+-#endif
+-
+-#define yaffs_InodeToObject(iptr) ((yaffs_Object *)(yaffs_InodeToObjectLV(iptr)))
+-#define yaffs_DentryToObject(dptr) yaffs_InodeToObject((dptr)->d_inode)
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-#define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->s_fs_info)
+-#else
+-#define yaffs_SuperToDevice(sb) ((yaffs_Device *)sb->u.generic_sbp)
+-#endif
+-
+-static void yaffs_put_super(struct super_block *sb);
+-
+-static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+- loff_t *pos);
+-static ssize_t yaffs_hold_space(struct file *f);
+-static void yaffs_release_space(struct file *f);
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_file_flush(struct file *file, fl_owner_t id);
+-#else
+-static int yaffs_file_flush(struct file *file);
+-#endif
+-
+-static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+- int datasync);
+-
+-static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+- struct nameidata *n);
+-static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+- struct nameidata *n);
+-#else
+-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
+-static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
+-#endif
+-static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *dentry);
+-static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
+-static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+- const char *symname);
+-static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- dev_t dev);
+-#else
+-static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- int dev);
+-#endif
+-static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry);
+-static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_sync_fs(struct super_block *sb, int wait);
+-static void yaffs_write_super(struct super_block *sb);
+-#else
+-static int yaffs_sync_fs(struct super_block *sb);
+-static int yaffs_write_super(struct super_block *sb);
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
+-#else
+-static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
+-#endif
+-
+-#ifdef YAFFS_HAS_PUT_INODE
+-static void yaffs_put_inode(struct inode *inode);
+-#endif
+-
+-static void yaffs_delete_inode(struct inode *);
+-static void yaffs_clear_inode(struct inode *);
+-
+-static int yaffs_readpage(struct file *file, struct page *page);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
+-#else
+-static int yaffs_writepage(struct page *page);
+-#endif
+-
+-
+-#if (YAFFS_USE_WRITE_BEGIN_END != 0)
+-static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned flags,
+- struct page **pagep, void **fsdata);
+-static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned copied,
+- struct page *pg, void *fsdadata);
+-#else
+-static int yaffs_prepare_write(struct file *f, struct page *pg,
+- unsigned offset, unsigned to);
+-static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+- unsigned to);
+-
+-#endif
+-
+-static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
+- int buflen);
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+-static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+-#else
+-static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
+-#endif
+-
+-static struct address_space_operations yaffs_file_address_operations = {
+- .readpage = yaffs_readpage,
+- .writepage = yaffs_writepage,
+-#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+- .write_begin = yaffs_write_begin,
+- .write_end = yaffs_write_end,
+-#else
+- .prepare_write = yaffs_prepare_write,
+- .commit_write = yaffs_commit_write,
+-#endif
+-};
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
+-static const struct file_operations yaffs_file_operations = {
+- .read = do_sync_read,
+- .write = do_sync_write,
+- .aio_read = generic_file_aio_read,
+- .aio_write = generic_file_aio_write,
+- .mmap = generic_file_mmap,
+- .flush = yaffs_file_flush,
+- .fsync = yaffs_sync_object,
+- .splice_read = generic_file_splice_read,
+- .splice_write = generic_file_splice_write,
+- .llseek = generic_file_llseek,
+-};
+-
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+-
+-static const struct file_operations yaffs_file_operations = {
+- .read = do_sync_read,
+- .write = do_sync_write,
+- .aio_read = generic_file_aio_read,
+- .aio_write = generic_file_aio_write,
+- .mmap = generic_file_mmap,
+- .flush = yaffs_file_flush,
+- .fsync = yaffs_sync_object,
+- .sendfile = generic_file_sendfile,
+-};
+-
+-#else
+-
+-static const struct file_operations yaffs_file_operations = {
+- .read = generic_file_read,
+- .write = generic_file_write,
+- .mmap = generic_file_mmap,
+- .flush = yaffs_file_flush,
+- .fsync = yaffs_sync_object,
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- .sendfile = generic_file_sendfile,
+-#endif
+-};
+-#endif
+-
+-static const struct inode_operations yaffs_file_inode_operations = {
+- .setattr = yaffs_setattr,
+-};
+-
+-static const struct inode_operations yaffs_symlink_inode_operations = {
+- .readlink = yaffs_readlink,
+- .follow_link = yaffs_follow_link,
+- .setattr = yaffs_setattr,
+-};
+-
+-static const struct inode_operations yaffs_dir_inode_operations = {
+- .create = yaffs_create,
+- .lookup = yaffs_lookup,
+- .link = yaffs_link,
+- .unlink = yaffs_unlink,
+- .symlink = yaffs_symlink,
+- .mkdir = yaffs_mkdir,
+- .rmdir = yaffs_unlink,
+- .mknod = yaffs_mknod,
+- .rename = yaffs_rename,
+- .setattr = yaffs_setattr,
+-};
+-
+-static const struct file_operations yaffs_dir_operations = {
+- .read = generic_read_dir,
+- .readdir = yaffs_readdir,
+- .fsync = yaffs_sync_object,
+-};
+-
+-static const struct super_operations yaffs_super_ops = {
+- .statfs = yaffs_statfs,
+-
+-#ifndef YAFFS_USE_OWN_IGET
+- .read_inode = yaffs_read_inode,
+-#endif
+-#ifdef YAFFS_HAS_PUT_INODE
+- .put_inode = yaffs_put_inode,
+-#endif
+- .put_super = yaffs_put_super,
+- .delete_inode = yaffs_delete_inode,
+- .clear_inode = yaffs_clear_inode,
+- .sync_fs = yaffs_sync_fs,
+- .write_super = yaffs_write_super,
+-};
+-
+-static void yaffs_GrossLock(yaffs_Device *dev)
+-{
+- T(YAFFS_TRACE_OS, ("yaffs locking %p\n", current));
+- down(&dev->grossLock);
+- T(YAFFS_TRACE_OS, ("yaffs locked %p\n", current));
+-}
+-
+-static void yaffs_GrossUnlock(yaffs_Device *dev)
+-{
+- T(YAFFS_TRACE_OS, ("yaffs unlocking %p\n", current));
+- up(&dev->grossLock);
+-}
+-
+-static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
+- int buflen)
+-{
+- unsigned char *alias;
+- int ret;
+-
+- yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
+-
+- yaffs_GrossUnlock(dev);
+-
+- if (!alias)
+- return -ENOMEM;
+-
+- ret = vfs_readlink(dentry, buffer, buflen, alias);
+- kfree(alias);
+- return ret;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+-static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+-#else
+-static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+-#endif
+-{
+- unsigned char *alias;
+- int ret;
+- yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
+-
+- yaffs_GrossUnlock(dev);
+-
+- if (!alias) {
+- ret = -ENOMEM;
+- goto out;
+- }
+-
+- ret = vfs_follow_link(nd, alias);
+- kfree(alias);
+-out:
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+- return ERR_PTR(ret);
+-#else
+- return ret;
+-#endif
+-}
+-
+-struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+- yaffs_Object *obj);
+-
+-/*
+- * Lookup is used to find objects in the fs
+- */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-
+-static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+- struct nameidata *n)
+-#else
+-static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+-#endif
+-{
+- yaffs_Object *obj;
+- struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
+-
+- yaffs_Device *dev = yaffs_InodeToObject(dir)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_lookup for %d:%s\n",
+- yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
+-
+- obj = yaffs_FindObjectByName(yaffs_InodeToObject(dir),
+- dentry->d_name.name);
+-
+- obj = yaffs_GetEquivalentObject(obj); /* in case it was a hardlink */
+-
+- /* Can't hold gross lock when calling yaffs_get_inode() */
+- yaffs_GrossUnlock(dev);
+-
+- if (obj) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_lookup found %d\n", obj->objectId));
+-
+- inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+-
+- if (inode) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_loookup dentry \n"));
+-/* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
+- * d_add even if NULL inode */
+-#if 0
+- /*dget(dentry); // try to solve directory bug */
+- d_add(dentry, inode);
+-
+- /* return dentry; */
+- return NULL;
+-#endif
+- }
+-
+- } else {
+- T(YAFFS_TRACE_OS, ("yaffs_lookup not found\n"));
+-
+- }
+-
+-/* added NCB for 2.5/6 compatability - forces add even if inode is
+- * NULL which creates dentry hash */
+- d_add(dentry, inode);
+-
+- return NULL;
+-}
+-
+-
+-#ifdef YAFFS_HAS_PUT_INODE
+-
+-/* For now put inode is just for debugging
+- * Put inode is called when the inode **structure** is put.
+- */
+-static void yaffs_put_inode(struct inode *inode)
+-{
+- T(YAFFS_TRACE_OS,
+- ("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count)));
+-
+-}
+-#endif
+-
+-/* clear is called to tell the fs to release any per-inode data it holds */
+-static void yaffs_clear_inode(struct inode *inode)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+-
+- obj = yaffs_InodeToObject(inode);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count),
+- obj ? "object exists" : "null object"));
+-
+- if (obj) {
+- dev = obj->myDev;
+- yaffs_GrossLock(dev);
+-
+- /* Clear the association between the inode and
+- * the yaffs_Object.
+- */
+- obj->myInode = NULL;
+- yaffs_InodeToObjectLV(inode) = NULL;
+-
+- /* If the object freeing was deferred, then the real
+- * free happens now.
+- * This should fix the inode inconsistency problem.
+- */
+-
+- yaffs_HandleDeferedFree(obj);
+-
+- yaffs_GrossUnlock(dev);
+- }
+-
+-}
+-
+-/* delete is called when the link count is zero and the inode
+- * is put (ie. nobody wants to know about it anymore, time to
+- * delete the file).
+- * NB Must call clear_inode()
+- */
+-static void yaffs_delete_inode(struct inode *inode)
+-{
+- yaffs_Object *obj = yaffs_InodeToObject(inode);
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
+- atomic_read(&inode->i_count),
+- obj ? "object exists" : "null object"));
+-
+- if (obj) {
+- dev = obj->myDev;
+- yaffs_GrossLock(dev);
+- yaffs_DeleteObject(obj);
+- yaffs_GrossUnlock(dev);
+- }
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+- truncate_inode_pages(&inode->i_data, 0);
+-#endif
+- clear_inode(inode);
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_file_flush(struct file *file, fl_owner_t id)
+-#else
+-static int yaffs_file_flush(struct file *file)
+-#endif
+-{
+- yaffs_Object *obj = yaffs_DentryToObject(file->f_dentry);
+-
+- yaffs_Device *dev = obj->myDev;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_flush object %d (%s)\n", obj->objectId,
+- obj->dirty ? "dirty" : "clean"));
+-
+- yaffs_GrossLock(dev);
+-
+- yaffs_FlushFile(obj, 1);
+-
+- yaffs_GrossUnlock(dev);
+-
+- return 0;
+-}
+-
+-static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+-{
+- /* Lifted from jffs2 */
+-
+- yaffs_Object *obj;
+- unsigned char *pg_buf;
+- int ret;
+-
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_readpage at %08x, size %08x\n",
+- (unsigned)(pg->index << PAGE_CACHE_SHIFT),
+- (unsigned)PAGE_CACHE_SIZE));
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+-
+- dev = obj->myDev;
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- BUG_ON(!PageLocked(pg));
+-#else
+- if (!PageLocked(pg))
+- PAGE_BUG(pg);
+-#endif
+-
+- pg_buf = kmap(pg);
+- /* FIXME: Can kmap fail? */
+-
+- yaffs_GrossLock(dev);
+-
+- ret = yaffs_ReadDataFromFile(obj, pg_buf,
+- pg->index << PAGE_CACHE_SHIFT,
+- PAGE_CACHE_SIZE);
+-
+- yaffs_GrossUnlock(dev);
+-
+- if (ret >= 0)
+- ret = 0;
+-
+- if (ret) {
+- ClearPageUptodate(pg);
+- SetPageError(pg);
+- } else {
+- SetPageUptodate(pg);
+- ClearPageError(pg);
+- }
+-
+- flush_dcache_page(pg);
+- kunmap(pg);
+-
+- T(YAFFS_TRACE_OS, ("yaffs_readpage done\n"));
+- return ret;
+-}
+-
+-static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+-{
+- int ret = yaffs_readpage_nolock(f, pg);
+- UnlockPage(pg);
+- return ret;
+-}
+-
+-static int yaffs_readpage(struct file *f, struct page *pg)
+-{
+- return yaffs_readpage_unlock(f, pg);
+-}
+-
+-/* writepage inspired by/stolen from smbfs */
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+-#else
+-static int yaffs_writepage(struct page *page)
+-#endif
+-{
+- struct address_space *mapping = page->mapping;
+- loff_t offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
+- struct inode *inode;
+- unsigned long end_index;
+- char *buffer;
+- yaffs_Object *obj;
+- int nWritten = 0;
+- unsigned nBytes;
+-
+- if (!mapping)
+- BUG();
+- inode = mapping->host;
+- if (!inode)
+- BUG();
+-
+- if (offset > inode->i_size) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_writepage at %08x, inode size = %08x!!!\n",
+- (unsigned)(page->index << PAGE_CACHE_SHIFT),
+- (unsigned)inode->i_size));
+- T(YAFFS_TRACE_OS,
+- (" -> don't care!!\n"));
+- unlock_page(page);
+- return 0;
+- }
+-
+- end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+-
+- /* easy case */
+- if (page->index < end_index)
+- nBytes = PAGE_CACHE_SIZE;
+- else
+- nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);
+-
+- get_page(page);
+-
+- buffer = kmap(page);
+-
+- obj = yaffs_InodeToObject(inode);
+- yaffs_GrossLock(obj->myDev);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_writepage at %08x, size %08x\n",
+- (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
+- T(YAFFS_TRACE_OS,
+- ("writepag0: obj = %05x, ino = %05x\n",
+- (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+-
+- nWritten = yaffs_WriteDataToFile(obj, buffer,
+- page->index << PAGE_CACHE_SHIFT, nBytes, 0);
+-
+- T(YAFFS_TRACE_OS,
+- ("writepag1: obj = %05x, ino = %05x\n",
+- (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
+-
+- yaffs_GrossUnlock(obj->myDev);
+-
+- kunmap(page);
+- SetPageUptodate(page);
+- UnlockPage(page);
+- put_page(page);
+-
+- return (nWritten == nBytes) ? 0 : -ENOSPC;
+-}
+-
+-
+-#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+-static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned flags,
+- struct page **pagep, void **fsdata)
+-{
+- struct page *pg = NULL;
+- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+- uint32_t offset = pos & (PAGE_CACHE_SIZE - 1);
+- uint32_t to = offset + len;
+-
+- int ret = 0;
+- int space_held = 0;
+-
+- T(YAFFS_TRACE_OS, ("start yaffs_write_begin\n"));
+- /* Get a page */
+-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28)
+- pg = grab_cache_page_write_begin(mapping, index, flags);
+-#else
+- pg = __grab_cache_page(mapping, index);
+-#endif
+-
+- *pagep = pg;
+- if (!pg) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- /* Get fs space */
+- space_held = yaffs_hold_space(filp);
+-
+- if (!space_held) {
+- ret = -ENOSPC;
+- goto out;
+- }
+-
+- /* Update page if required */
+-
+- if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
+- ret = yaffs_readpage_nolock(filp, pg);
+-
+- if (ret)
+- goto out;
+-
+- /* Happy path return */
+- T(YAFFS_TRACE_OS, ("end yaffs_write_begin - ok\n"));
+-
+- return 0;
+-
+-out:
+- T(YAFFS_TRACE_OS, ("end yaffs_write_begin fail returning %d\n", ret));
+- if (space_held)
+- yaffs_release_space(filp);
+- if (pg) {
+- unlock_page(pg);
+- page_cache_release(pg);
+- }
+- return ret;
+-}
+-
+-#else
+-
+-static int yaffs_prepare_write(struct file *f, struct page *pg,
+- unsigned offset, unsigned to)
+-{
+- T(YAFFS_TRACE_OS, ("yaffs_prepair_write\n"));
+-
+- if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
+- return yaffs_readpage_nolock(f, pg);
+- return 0;
+-}
+-#endif
+-
+-#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+-static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned copied,
+- struct page *pg, void *fsdadata)
+-{
+- int ret = 0;
+- void *addr, *kva;
+- uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+-
+- kva = kmap(pg);
+- addr = kva + offset_into_page;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_write_end addr %x pos %x nBytes %d\n",
+- (unsigned) addr,
+- (int)pos, copied));
+-
+- ret = yaffs_file_write(filp, addr, copied, &pos);
+-
+- if (ret != copied) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_write_end not same size ret %d copied %d\n",
+- ret, copied));
+- SetPageError(pg);
+- ClearPageUptodate(pg);
+- } else {
+- SetPageUptodate(pg);
+- }
+-
+- kunmap(pg);
+-
+- yaffs_release_space(filp);
+- unlock_page(pg);
+- page_cache_release(pg);
+- return ret;
+-}
+-#else
+-
+-static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+- unsigned to)
+-{
+- void *addr, *kva;
+-
+- loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
+- int nBytes = to - offset;
+- int nWritten;
+-
+- unsigned spos = pos;
+- unsigned saddr;
+-
+- kva = kmap(pg);
+- addr = kva + offset;
+-
+- saddr = (unsigned) addr;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_commit_write addr %x pos %x nBytes %d\n",
+- saddr, spos, nBytes));
+-
+- nWritten = yaffs_file_write(f, addr, nBytes, &pos);
+-
+- if (nWritten != nBytes) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_commit_write not same size nWritten %d nBytes %d\n",
+- nWritten, nBytes));
+- SetPageError(pg);
+- ClearPageUptodate(pg);
+- } else {
+- SetPageUptodate(pg);
+- }
+-
+- kunmap(pg);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_commit_write returning %d\n",
+- nWritten == nBytes ? 0 : nWritten));
+-
+- return nWritten == nBytes ? 0 : nWritten;
+-}
+-#endif
+-
+-
+-static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object *obj)
+-{
+- if (inode && obj) {
+-
+-
+- /* Check mode against the variant type and attempt to repair if broken. */
+- __u32 mode = obj->yst_mode;
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- if (!S_ISREG(mode)) {
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFREG;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- if (!S_ISLNK(mode)) {
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFLNK;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- if (!S_ISDIR(mode)) {
+- obj->yst_mode &= ~S_IFMT;
+- obj->yst_mode |= S_IFDIR;
+- }
+-
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- default:
+- /* TODO? */
+- break;
+- }
+-
+- inode->i_flags |= S_NOATIME;
+-
+- inode->i_ino = obj->objectId;
+- inode->i_mode = obj->yst_mode;
+- inode->i_uid = obj->yst_uid;
+- inode->i_gid = obj->yst_gid;
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+- inode->i_blksize = inode->i_sb->s_blocksize;
+-#endif
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-
+- inode->i_rdev = old_decode_dev(obj->yst_rdev);
+- inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+- inode->i_atime.tv_nsec = 0;
+- inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+- inode->i_mtime.tv_nsec = 0;
+- inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+- inode->i_ctime.tv_nsec = 0;
+-#else
+- inode->i_rdev = obj->yst_rdev;
+- inode->i_atime = obj->yst_atime;
+- inode->i_mtime = obj->yst_mtime;
+- inode->i_ctime = obj->yst_ctime;
+-#endif
+- inode->i_size = yaffs_GetObjectFileLength(obj);
+- inode->i_blocks = (inode->i_size + 511) >> 9;
+-
+- inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
+- inode->i_mode, inode->i_uid, inode->i_gid,
+- (int)inode->i_size, atomic_read(&inode->i_count)));
+-
+- switch (obj->yst_mode & S_IFMT) {
+- default: /* fifo, device or socket */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- init_special_inode(inode, obj->yst_mode,
+- old_decode_dev(obj->yst_rdev));
+-#else
+- init_special_inode(inode, obj->yst_mode,
+- (dev_t) (obj->yst_rdev));
+-#endif
+- break;
+- case S_IFREG: /* file */
+- inode->i_op = &yaffs_file_inode_operations;
+- inode->i_fop = &yaffs_file_operations;
+- inode->i_mapping->a_ops =
+- &yaffs_file_address_operations;
+- break;
+- case S_IFDIR: /* directory */
+- inode->i_op = &yaffs_dir_inode_operations;
+- inode->i_fop = &yaffs_dir_operations;
+- break;
+- case S_IFLNK: /* symlink */
+- inode->i_op = &yaffs_symlink_inode_operations;
+- break;
+- }
+-
+- yaffs_InodeToObjectLV(inode) = obj;
+-
+- obj->myInode = inode;
+-
+- } else {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_FileInode invalid parameters\n"));
+- }
+-
+-}
+-
+-struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+- yaffs_Object *obj)
+-{
+- struct inode *inode;
+-
+- if (!sb) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_get_inode for NULL super_block!!\n"));
+- return NULL;
+-
+- }
+-
+- if (!obj) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_get_inode for NULL object!!\n"));
+- return NULL;
+-
+- }
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_get_inode for object %d\n", obj->objectId));
+-
+- inode = Y_IGET(sb, obj->objectId);
+- if (IS_ERR(inode))
+- return NULL;
+-
+- /* NB Side effect: iget calls back to yaffs_read_inode(). */
+- /* iget also increments the inode's i_count */
+- /* NB You can't be holding grossLock or deadlock will happen! */
+-
+- return inode;
+-}
+-
+-static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+- loff_t *pos)
+-{
+- yaffs_Object *obj;
+- int nWritten, ipos;
+- struct inode *inode;
+- yaffs_Device *dev;
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+-
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- inode = f->f_dentry->d_inode;
+-
+- if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+- ipos = inode->i_size;
+- else
+- ipos = *pos;
+-
+- if (!obj)
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_write: hey obj is null!\n"));
+- else
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_write about to write writing %zu bytes"
+- "to object %d at %d\n",
+- n, obj->objectId, ipos));
+-
+- nWritten = yaffs_WriteDataToFile(obj, buf, ipos, n, 0);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_write writing %zu bytes, %d written at %d\n",
+- n, nWritten, ipos));
+-
+- if (nWritten > 0) {
+- ipos += nWritten;
+- *pos = ipos;
+- if (ipos > inode->i_size) {
+- inode->i_size = ipos;
+- inode->i_blocks = (ipos + 511) >> 9;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_file_write size updated to %d bytes, "
+- "%d blocks\n",
+- ipos, (int)(inode->i_blocks)));
+- }
+-
+- }
+- yaffs_GrossUnlock(dev);
+- return nWritten == 0 ? -ENOSPC : nWritten;
+-}
+-
+-/* Space holding and freeing is done to ensure we have space available for write_begin/end */
+-/* For now we just assume few parallel writes and check against a small number. */
+-/* Todo: need to do this with a counter to handle parallel reads better */
+-
+-static ssize_t yaffs_hold_space(struct file *f)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+-
+- int nFreeChunks;
+-
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+-
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- nFreeChunks = yaffs_GetNumberOfFreeChunks(dev);
+-
+- yaffs_GrossUnlock(dev);
+-
+- return (nFreeChunks > 20) ? 1 : 0;
+-}
+-
+-static void yaffs_release_space(struct file *f)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+-
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+-
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+-
+- yaffs_GrossUnlock(dev);
+-}
+-
+-static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+- struct inode *inode = f->f_dentry->d_inode;
+- unsigned long offset, curoffs;
+- struct ylist_head *i;
+- yaffs_Object *l;
+-
+- char name[YAFFS_MAX_NAME_LENGTH + 1];
+-
+- obj = yaffs_DentryToObject(f->f_dentry);
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- offset = f->f_pos;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_readdir: starting at %d\n", (int)offset));
+-
+- if (offset == 0) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_readdir: entry . ino %d \n",
+- (int)inode->i_ino));
+- if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0)
+- goto out;
+- offset++;
+- f->f_pos++;
+- }
+- if (offset == 1) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_readdir: entry .. ino %d \n",
+- (int)f->f_dentry->d_parent->d_inode->i_ino));
+- if (filldir(dirent, "..", 2, offset,
+- f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
+- goto out;
+- offset++;
+- f->f_pos++;
+- }
+-
+- curoffs = 1;
+-
+- /* If the directory has changed since the open or last call to
+- readdir, rewind to after the 2 canned entries. */
+-
+- if (f->f_version != inode->i_version) {
+- offset = 2;
+- f->f_pos = offset;
+- f->f_version = inode->i_version;
+- }
+-
+- ylist_for_each(i, &obj->variant.directoryVariant.children) {
+- curoffs++;
+- if (curoffs >= offset) {
+- l = ylist_entry(i, yaffs_Object, siblings);
+-
+- yaffs_GetObjectName(l, name,
+- YAFFS_MAX_NAME_LENGTH + 1);
+- T(YAFFS_TRACE_OS,
+- ("yaffs_readdir: %s inode %d\n", name,
+- yaffs_GetObjectInode(l)));
+-
+- if (filldir(dirent,
+- name,
+- strlen(name),
+- offset,
+- yaffs_GetObjectInode(l),
+- yaffs_GetObjectType(l)) < 0)
+- goto up_and_out;
+-
+- offset++;
+- f->f_pos++;
+- }
+- }
+-
+-up_and_out:
+-out:
+- yaffs_GrossUnlock(dev);
+-
+- return 0;
+-}
+-
+-/*
+- * File creation. Allocate an inode, and we're done..
+- */
+-
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+-#define YCRED(x) x
+-#else
+-#define YCRED(x) (x->cred)
+-#endif
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- dev_t rdev)
+-#else
+-static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+- int rdev)
+-#endif
+-{
+- struct inode *inode;
+-
+- yaffs_Object *obj = NULL;
+- yaffs_Device *dev;
+-
+- yaffs_Object *parent = yaffs_InodeToObject(dir);
+-
+- int error = -ENOSPC;
+- uid_t uid = YCRED(current)->fsuid;
+- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+-
+- if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+- mode |= S_ISGID;
+-
+- if (parent) {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod: parent object %d type %d\n",
+- parent->objectId, parent->variantType));
+- } else {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod: could not get parent object\n"));
+- return -EPERM;
+- }
+-
+- T(YAFFS_TRACE_OS, ("yaffs_mknod: making oject for %s, "
+- "mode %x dev %x\n",
+- dentry->d_name.name, mode, rdev));
+-
+- dev = parent->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- switch (mode & S_IFMT) {
+- default:
+- /* Special (socket, fifo, device...) */
+- T(YAFFS_TRACE_OS, ("yaffs_mknod: making special\n"));
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+- gid, old_encode_dev(rdev));
+-#else
+- obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
+- gid, rdev);
+-#endif
+- break;
+- case S_IFREG: /* file */
+- T(YAFFS_TRACE_OS, ("yaffs_mknod: making file\n"));
+- obj = yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
+- gid);
+- break;
+- case S_IFDIR: /* directory */
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod: making directory\n"));
+- obj = yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
+- uid, gid);
+- break;
+- case S_IFLNK: /* symlink */
+- T(YAFFS_TRACE_OS, ("yaffs_mknod: making symlink\n"));
+- obj = NULL; /* Do we ever get here? */
+- break;
+- }
+-
+- /* Can not call yaffs_get_inode() with gross lock held */
+- yaffs_GrossUnlock(dev);
+-
+- if (obj) {
+- inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+- d_instantiate(dentry, inode);
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod created object %d count = %d\n",
+- obj->objectId, atomic_read(&inode->i_count)));
+- error = 0;
+- } else {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_mknod failed making object\n"));
+- error = -ENOMEM;
+- }
+-
+- return error;
+-}
+-
+-static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+-{
+- int retVal;
+- T(YAFFS_TRACE_OS, ("yaffs_mkdir\n"));
+- retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+- return retVal;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+- struct nameidata *n)
+-#else
+-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+-#endif
+-{
+- T(YAFFS_TRACE_OS, ("yaffs_create\n"));
+- return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+-}
+-
+-static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+-{
+- int retVal;
+-
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_unlink %d:%s\n", (int)(dir->i_ino),
+- dentry->d_name.name));
+-
+- dev = yaffs_InodeToObject(dir)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- retVal = yaffs_Unlink(yaffs_InodeToObject(dir), dentry->d_name.name);
+-
+- if (retVal == YAFFS_OK) {
+- dentry->d_inode->i_nlink--;
+- dir->i_version++;
+- yaffs_GrossUnlock(dev);
+- mark_inode_dirty(dentry->d_inode);
+- return 0;
+- }
+- yaffs_GrossUnlock(dev);
+- return -ENOTEMPTY;
+-}
+-
+-/*
+- * Create a link...
+- */
+-static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *dentry)
+-{
+- struct inode *inode = old_dentry->d_inode;
+- yaffs_Object *obj = NULL;
+- yaffs_Object *link = NULL;
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_link\n"));
+-
+- obj = yaffs_InodeToObject(inode);
+- dev = obj->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+- link = yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
+- obj);
+-
+- if (link) {
+- old_dentry->d_inode->i_nlink = yaffs_GetObjectLinkCount(obj);
+- d_instantiate(dentry, old_dentry->d_inode);
+- atomic_inc(&old_dentry->d_inode->i_count);
+- T(YAFFS_TRACE_OS,
+- ("yaffs_link link count %d i_count %d\n",
+- old_dentry->d_inode->i_nlink,
+- atomic_read(&old_dentry->d_inode->i_count)));
+- }
+-
+- yaffs_GrossUnlock(dev);
+-
+- if (link)
+- return 0;
+-
+- return -EPERM;
+-}
+-
+-static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+- const char *symname)
+-{
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+- uid_t uid = YCRED(current)->fsuid;
+- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_symlink\n"));
+-
+- dev = yaffs_InodeToObject(dir)->myDev;
+- yaffs_GrossLock(dev);
+- obj = yaffs_MknodSymLink(yaffs_InodeToObject(dir), dentry->d_name.name,
+- S_IFLNK | S_IRWXUGO, uid, gid, symname);
+- yaffs_GrossUnlock(dev);
+-
+- if (obj) {
+- struct inode *inode;
+-
+- inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+- d_instantiate(dentry, inode);
+- T(YAFFS_TRACE_OS, ("symlink created OK\n"));
+- return 0;
+- } else {
+- T(YAFFS_TRACE_OS, ("symlink not created\n"));
+- }
+-
+- return -ENOMEM;
+-}
+-
+-static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+- int datasync)
+-{
+-
+- yaffs_Object *obj;
+- yaffs_Device *dev;
+-
+- obj = yaffs_DentryToObject(dentry);
+-
+- dev = obj->myDev;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_sync_object\n"));
+- yaffs_GrossLock(dev);
+- yaffs_FlushFile(obj, 1);
+- yaffs_GrossUnlock(dev);
+- return 0;
+-}
+-
+-/*
+- * The VFS layer already does all the dentry stuff for rename.
+- *
+- * NB: POSIX says you can rename an object over an old object of the same name
+- */
+-static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry)
+-{
+- yaffs_Device *dev;
+- int retVal = YAFFS_FAIL;
+- yaffs_Object *target;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_rename\n"));
+- dev = yaffs_InodeToObject(old_dir)->myDev;
+-
+- yaffs_GrossLock(dev);
+-
+- /* Check if the target is an existing directory that is not empty. */
+- target = yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
+- new_dentry->d_name.name);
+-
+-
+-
+- if (target && target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+- !ylist_empty(&target->variant.directoryVariant.children)) {
+-
+- T(YAFFS_TRACE_OS, ("target is non-empty dir\n"));
+-
+- retVal = YAFFS_FAIL;
+- } else {
+- /* Now does unlinking internally using shadowing mechanism */
+- T(YAFFS_TRACE_OS, ("calling yaffs_RenameObject\n"));
+-
+- retVal = yaffs_RenameObject(yaffs_InodeToObject(old_dir),
+- old_dentry->d_name.name,
+- yaffs_InodeToObject(new_dir),
+- new_dentry->d_name.name);
+- }
+- yaffs_GrossUnlock(dev);
+-
+- if (retVal == YAFFS_OK) {
+- if (target) {
+- new_dentry->d_inode->i_nlink--;
+- mark_inode_dirty(new_dentry->d_inode);
+- }
+-
+- return 0;
+- } else {
+- return -ENOTEMPTY;
+- }
+-}
+-
+-static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+-{
+- struct inode *inode = dentry->d_inode;
+- int error;
+- yaffs_Device *dev;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_setattr of object %d\n",
+- yaffs_InodeToObject(inode)->objectId));
+-
+- error = inode_change_ok(inode, attr);
+- if (error == 0) {
+- dev = yaffs_InodeToObject(inode)->myDev;
+- yaffs_GrossLock(dev);
+- if (yaffs_SetAttributes(yaffs_InodeToObject(inode), attr) ==
+- YAFFS_OK) {
+- error = 0;
+- } else {
+- error = -EPERM;
+- }
+- yaffs_GrossUnlock(dev);
+- if (!error)
+- error = inode_setattr(inode, attr);
+- }
+- return error;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+-{
+- yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
+- struct super_block *sb = dentry->d_sb;
+-#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+-{
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-#else
+-static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
+-{
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-#endif
+-
+- T(YAFFS_TRACE_OS, ("yaffs_statfs\n"));
+-
+- yaffs_GrossLock(dev);
+-
+- buf->f_type = YAFFS_MAGIC;
+- buf->f_bsize = sb->s_blocksize;
+- buf->f_namelen = 255;
+-
+- if (dev->nDataBytesPerChunk & (dev->nDataBytesPerChunk - 1)) {
+- /* Do this if chunk size is not a power of 2 */
+-
+- uint64_t bytesInDev;
+- uint64_t bytesFree;
+-
+- bytesInDev = ((uint64_t)((dev->endBlock - dev->startBlock + 1))) *
+- ((uint64_t)(dev->nChunksPerBlock * dev->nDataBytesPerChunk));
+-
+- do_div(bytesInDev, sb->s_blocksize); /* bytesInDev becomes the number of blocks */
+- buf->f_blocks = bytesInDev;
+-
+- bytesFree = ((uint64_t)(yaffs_GetNumberOfFreeChunks(dev))) *
+- ((uint64_t)(dev->nDataBytesPerChunk));
+-
+- do_div(bytesFree, sb->s_blocksize);
+-
+- buf->f_bfree = bytesFree;
+-
+- } else if (sb->s_blocksize > dev->nDataBytesPerChunk) {
+-
+- buf->f_blocks =
+- (dev->endBlock - dev->startBlock + 1) *
+- dev->nChunksPerBlock /
+- (sb->s_blocksize / dev->nDataBytesPerChunk);
+- buf->f_bfree =
+- yaffs_GetNumberOfFreeChunks(dev) /
+- (sb->s_blocksize / dev->nDataBytesPerChunk);
+- } else {
+- buf->f_blocks =
+- (dev->endBlock - dev->startBlock + 1) *
+- dev->nChunksPerBlock *
+- (dev->nDataBytesPerChunk / sb->s_blocksize);
+-
+- buf->f_bfree =
+- yaffs_GetNumberOfFreeChunks(dev) *
+- (dev->nDataBytesPerChunk / sb->s_blocksize);
+- }
+-
+- buf->f_files = 0;
+- buf->f_ffree = 0;
+- buf->f_bavail = buf->f_bfree;
+-
+- yaffs_GrossUnlock(dev);
+- return 0;
+-}
+-
+-
+-static int yaffs_do_sync_fs(struct super_block *sb)
+-{
+-
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+- T(YAFFS_TRACE_OS, ("yaffs_do_sync_fs\n"));
+-
+- if (sb->s_dirt) {
+- yaffs_GrossLock(dev);
+-
+- if (dev) {
+- yaffs_FlushEntireDeviceCache(dev);
+- yaffs_CheckpointSave(dev);
+- }
+-
+- yaffs_GrossUnlock(dev);
+-
+- sb->s_dirt = 0;
+- }
+- return 0;
+-}
+-
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static void yaffs_write_super(struct super_block *sb)
+-#else
+-static int yaffs_write_super(struct super_block *sb)
+-#endif
+-{
+-
+- T(YAFFS_TRACE_OS, ("yaffs_write_super\n"));
+- if (yaffs_auto_checkpoint >= 2)
+- yaffs_do_sync_fs(sb);
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
+- return 0;
+-#endif
+-}
+-
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_sync_fs(struct super_block *sb, int wait)
+-#else
+-static int yaffs_sync_fs(struct super_block *sb)
+-#endif
+-{
+- T(YAFFS_TRACE_OS, ("yaffs_sync_fs\n"));
+-
+- if (yaffs_auto_checkpoint >= 1)
+- yaffs_do_sync_fs(sb);
+-
+- return 0;
+-}
+-
+-#ifdef YAFFS_USE_OWN_IGET
+-
+-static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+-{
+- struct inode *inode;
+- yaffs_Object *obj;
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_iget for %lu\n", ino));
+-
+- inode = iget_locked(sb, ino);
+- if (!inode)
+- return ERR_PTR(-ENOMEM);
+- if (!(inode->i_state & I_NEW))
+- return inode;
+-
+- /* NB This is called as a side effect of other functions, but
+- * we had to release the lock to prevent deadlocks, so
+- * need to lock again.
+- */
+-
+- yaffs_GrossLock(dev);
+-
+- obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
+-
+- yaffs_FillInodeFromObject(inode, obj);
+-
+- yaffs_GrossUnlock(dev);
+-
+- unlock_new_inode(inode);
+- return inode;
+-}
+-
+-#else
+-
+-static void yaffs_read_inode(struct inode *inode)
+-{
+- /* NB This is called as a side effect of other functions, but
+- * we had to release the lock to prevent deadlocks, so
+- * need to lock again.
+- */
+-
+- yaffs_Object *obj;
+- yaffs_Device *dev = yaffs_SuperToDevice(inode->i_sb);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_read_inode for %d\n", (int)inode->i_ino));
+-
+- yaffs_GrossLock(dev);
+-
+- obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
+-
+- yaffs_FillInodeFromObject(inode, obj);
+-
+- yaffs_GrossUnlock(dev);
+-}
+-
+-#endif
+-
+-static YLIST_HEAD(yaffs_dev_list);
+-
+-#if 0 /* not used */
+-static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
+-{
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-
+- if (*flags & MS_RDONLY) {
+- struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_remount_fs: %s: RO\n", dev->name));
+-
+- yaffs_GrossLock(dev);
+-
+- yaffs_FlushEntireDeviceCache(dev);
+-
+- yaffs_CheckpointSave(dev);
+-
+- if (mtd->sync)
+- mtd->sync(mtd);
+-
+- yaffs_GrossUnlock(dev);
+- } else {
+- T(YAFFS_TRACE_OS,
+- ("yaffs_remount_fs: %s: RW\n", dev->name));
+- }
+-
+- return 0;
+-}
+-#endif
+-
+-static void yaffs_put_super(struct super_block *sb)
+-{
+- yaffs_Device *dev = yaffs_SuperToDevice(sb);
+-
+- T(YAFFS_TRACE_OS, ("yaffs_put_super\n"));
+-
+- yaffs_GrossLock(dev);
+-
+- yaffs_FlushEntireDeviceCache(dev);
+-
+- yaffs_CheckpointSave(dev);
+-
+- if (dev->putSuperFunc)
+- dev->putSuperFunc(sb);
+-
+- yaffs_Deinitialise(dev);
+-
+- yaffs_GrossUnlock(dev);
+-
+- /* we assume this is protected by lock_kernel() in mount/umount */
+- ylist_del(&dev->devList);
+-
+- if (dev->spareBuffer) {
+- YFREE(dev->spareBuffer);
+- dev->spareBuffer = NULL;
+- }
+-
+- kfree(dev);
+-}
+-
+-
+-static void yaffs_MTDPutSuper(struct super_block *sb)
+-{
+- struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
+-
+- if (mtd->sync)
+- mtd->sync(mtd);
+-
+- put_mtd_device(mtd);
+-}
+-
+-
+-static void yaffs_MarkSuperBlockDirty(void *vsb)
+-{
+- struct super_block *sb = (struct super_block *)vsb;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_MarkSuperBlockDirty() sb = %p\n", sb));
+- if (sb)
+- sb->s_dirt = 1;
+-}
+-
+-typedef struct {
+- int inband_tags;
+- int skip_checkpoint_read;
+- int skip_checkpoint_write;
+- int no_cache;
+-} yaffs_options;
+-
+-#define MAX_OPT_LEN 20
+-static int yaffs_parse_options(yaffs_options *options, const char *options_str)
+-{
+- char cur_opt[MAX_OPT_LEN + 1];
+- int p;
+- int error = 0;
+-
+- /* Parse through the options which is a comma seperated list */
+-
+- while (options_str && *options_str && !error) {
+- memset(cur_opt, 0, MAX_OPT_LEN + 1);
+- p = 0;
+-
+- while (*options_str && *options_str != ',') {
+- if (p < MAX_OPT_LEN) {
+- cur_opt[p] = *options_str;
+- p++;
+- }
+- options_str++;
+- }
+-
+- if (!strcmp(cur_opt, "inband-tags"))
+- options->inband_tags = 1;
+- else if (!strcmp(cur_opt, "no-cache"))
+- options->no_cache = 1;
+- else if (!strcmp(cur_opt, "no-checkpoint-read"))
+- options->skip_checkpoint_read = 1;
+- else if (!strcmp(cur_opt, "no-checkpoint-write"))
+- options->skip_checkpoint_write = 1;
+- else if (!strcmp(cur_opt, "no-checkpoint")) {
+- options->skip_checkpoint_read = 1;
+- options->skip_checkpoint_write = 1;
+- } else {
+- printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+- cur_opt);
+- error = 1;
+- }
+- }
+-
+- return error;
+-}
+-
+-static struct super_block *yaffs_internal_read_super(int yaffsVersion,
+- struct super_block *sb,
+- void *data, int silent)
+-{
+- int nBlocks;
+- struct inode *inode = NULL;
+- struct dentry *root;
+- yaffs_Device *dev = 0;
+- char devname_buf[BDEVNAME_SIZE + 1];
+- struct mtd_info *mtd;
+- int err;
+- char *data_str = (char *)data;
+-
+- yaffs_options options;
+-
+- sb->s_magic = YAFFS_MAGIC;
+- sb->s_op = &yaffs_super_ops;
+- sb->s_flags |= MS_NOATIME;
+-
+- if (!sb)
+- printk(KERN_INFO "yaffs: sb is NULL\n");
+- else if (!sb->s_dev)
+- printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+- else if (!yaffs_devname(sb, devname_buf))
+- printk(KERN_INFO "yaffs: devname is NULL\n");
+- else
+- printk(KERN_INFO "yaffs: dev is %d name is \"%s\"\n",
+- sb->s_dev,
+- yaffs_devname(sb, devname_buf));
+-
+- if (!data_str)
+- data_str = "";
+-
+- printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+-
+- memset(&options, 0, sizeof(options));
+-
+- if (yaffs_parse_options(&options, data_str)) {
+- /* Option parsing failed */
+- return NULL;
+- }
+-
+-
+- sb->s_blocksize = PAGE_CACHE_SIZE;
+- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+- T(YAFFS_TRACE_OS, ("yaffs_read_super: Using yaffs%d\n", yaffsVersion));
+- T(YAFFS_TRACE_OS,
+- ("yaffs_read_super: block size %d\n", (int)(sb->s_blocksize)));
+-
+-#ifdef CONFIG_YAFFS_DISABLE_WRITE_VERIFY
+- T(YAFFS_TRACE_OS,
+- ("yaffs: Write verification disabled. All guarantees "
+- "null and void\n"));
+-#endif
+-
+- T(YAFFS_TRACE_ALWAYS, ("yaffs: Attempting MTD mount on %u.%u, "
+- "\"%s\"\n",
+- MAJOR(sb->s_dev), MINOR(sb->s_dev),
+- yaffs_devname(sb, devname_buf)));
+-
+- /* Check it's an mtd device..... */
+- if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
+- return NULL; /* This isn't an mtd device */
+-
+- /* Get the device */
+- mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+- if (!mtd) {
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device #%u doesn't appear to exist\n",
+- MINOR(sb->s_dev)));
+- return NULL;
+- }
+- /* Check it's NAND */
+- if (mtd->type != MTD_NANDFLASH) {
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device is not NAND it's type %d\n", mtd->type));
+- return NULL;
+- }
+-
+- T(YAFFS_TRACE_OS, (" erase %p\n", mtd->erase));
+- T(YAFFS_TRACE_OS, (" read %p\n", mtd->read));
+- T(YAFFS_TRACE_OS, (" write %p\n", mtd->write));
+- T(YAFFS_TRACE_OS, (" readoob %p\n", mtd->read_oob));
+- T(YAFFS_TRACE_OS, (" writeoob %p\n", mtd->write_oob));
+- T(YAFFS_TRACE_OS, (" block_isbad %p\n", mtd->block_isbad));
+- T(YAFFS_TRACE_OS, (" block_markbad %p\n", mtd->block_markbad));
+- T(YAFFS_TRACE_OS, (" %s %d\n", WRITE_SIZE_STR, WRITE_SIZE(mtd)));
+- T(YAFFS_TRACE_OS, (" oobsize %d\n", mtd->oobsize));
+- T(YAFFS_TRACE_OS, (" erasesize %d\n", mtd->erasesize));
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+- T(YAFFS_TRACE_OS, (" size %u\n", mtd->size));
+-#else
+- T(YAFFS_TRACE_OS, (" size %lld\n", mtd->size));
+-#endif
+-
+-#ifdef CONFIG_YAFFS_AUTO_YAFFS2
+-
+- if (yaffsVersion == 1 && WRITE_SIZE(mtd) >= 2048) {
+- T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs2\n"));
+- yaffsVersion = 2;
+- }
+-
+- /* Added NCB 26/5/2006 for completeness */
+- if (yaffsVersion == 2 && !options.inband_tags && WRITE_SIZE(mtd) == 512) {
+- T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs1\n"));
+- yaffsVersion = 1;
+- }
+-
+-#endif
+-
+- if (yaffsVersion == 2) {
+- /* Check for version 2 style functions */
+- if (!mtd->erase ||
+- !mtd->block_isbad ||
+- !mtd->block_markbad ||
+- !mtd->read ||
+- !mtd->write ||
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- !mtd->read_oob || !mtd->write_oob) {
+-#else
+- !mtd->write_ecc ||
+- !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
+-#endif
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device does not support required "
+- "functions\n"));;
+- return NULL;
+- }
+-
+- if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+- mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+- !options.inband_tags) {
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device does not have the "
+- "right page sizes\n"));
+- return NULL;
+- }
+- } else {
+- /* Check for V1 style functions */
+- if (!mtd->erase ||
+- !mtd->read ||
+- !mtd->write ||
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- !mtd->read_oob || !mtd->write_oob) {
+-#else
+- !mtd->write_ecc ||
+- !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
+-#endif
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device does not support required "
+- "functions\n"));;
+- return NULL;
+- }
+-
+- if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+- mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs: MTD device does not support have the "
+- "right page sizes\n"));
+- return NULL;
+- }
+- }
+-
+- /* OK, so if we got here, we have an MTD that's NAND and looks
+- * like it has the right capabilities
+- * Set the yaffs_Device up for mtd
+- */
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+- sb->s_fs_info = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+-#else
+- sb->u.generic_sbp = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
+-#endif
+- if (!dev) {
+- /* Deep shit could not allocate device structure */
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs_read_super: Failed trying to allocate "
+- "yaffs_Device. \n"));
+- return NULL;
+- }
+-
+- memset(dev, 0, sizeof(yaffs_Device));
+- dev->genericDevice = mtd;
+- dev->name = mtd->name;
+-
+- /* Set up the memory size parameters.... */
+-
+- nBlocks = YCALCBLOCKS(mtd->size, (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
+-
+- dev->startBlock = 0;
+- dev->endBlock = nBlocks - 1;
+- dev->nChunksPerBlock = YAFFS_CHUNKS_PER_BLOCK;
+- dev->totalBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
+- dev->nReservedBlocks = 5;
+- dev->nShortOpCaches = (options.no_cache) ? 0 : 10;
+- dev->inbandTags = options.inband_tags;
+-
+- /* ... and the functions. */
+- if (yaffsVersion == 2) {
+- dev->writeChunkWithTagsToNAND =
+- nandmtd2_WriteChunkWithTagsToNAND;
+- dev->readChunkWithTagsFromNAND =
+- nandmtd2_ReadChunkWithTagsFromNAND;
+- dev->markNANDBlockBad = nandmtd2_MarkNANDBlockBad;
+- dev->queryNANDBlock = nandmtd2_QueryNANDBlock;
+- dev->spareBuffer = YMALLOC(mtd->oobsize);
+- dev->isYaffs2 = 1;
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- dev->totalBytesPerChunk = mtd->writesize;
+- dev->nChunksPerBlock = mtd->erasesize / mtd->writesize;
+-#else
+- dev->totalBytesPerChunk = mtd->oobblock;
+- dev->nChunksPerBlock = mtd->erasesize / mtd->oobblock;
+-#endif
+- nBlocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+-
+- dev->startBlock = 0;
+- dev->endBlock = nBlocks - 1;
+- } else {
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- /* use the MTD interface in yaffs_mtdif1.c */
+- dev->writeChunkWithTagsToNAND =
+- nandmtd1_WriteChunkWithTagsToNAND;
+- dev->readChunkWithTagsFromNAND =
+- nandmtd1_ReadChunkWithTagsFromNAND;
+- dev->markNANDBlockBad = nandmtd1_MarkNANDBlockBad;
+- dev->queryNANDBlock = nandmtd1_QueryNANDBlock;
+-#else
+- dev->writeChunkToNAND = nandmtd_WriteChunkToNAND;
+- dev->readChunkFromNAND = nandmtd_ReadChunkFromNAND;
+-#endif
+- dev->isYaffs2 = 0;
+- }
+- /* ... and common functions */
+- dev->eraseBlockInNAND = nandmtd_EraseBlockInNAND;
+- dev->initialiseNAND = nandmtd_InitialiseNAND;
+-
+- dev->putSuperFunc = yaffs_MTDPutSuper;
+-
+- dev->superBlock = (void *)sb;
+- dev->markSuperBlockDirty = yaffs_MarkSuperBlockDirty;
+-
+-
+-#ifndef CONFIG_YAFFS_DOES_ECC
+- dev->useNANDECC = 1;
+-#endif
+-
+-#ifdef CONFIG_YAFFS_DISABLE_WIDE_TNODES
+- dev->wideTnodesDisabled = 1;
+-#endif
+-
+- dev->skipCheckpointRead = options.skip_checkpoint_read;
+- dev->skipCheckpointWrite = options.skip_checkpoint_write;
+-
+- /* we assume this is protected by lock_kernel() in mount/umount */
+- ylist_add_tail(&dev->devList, &yaffs_dev_list);
+-
+- init_MUTEX(&dev->grossLock);
+-
+- yaffs_GrossLock(dev);
+-
+- err = yaffs_GutsInitialise(dev);
+-
+- T(YAFFS_TRACE_OS,
+- ("yaffs_read_super: guts initialised %s\n",
+- (err == YAFFS_OK) ? "OK" : "FAILED"));
+-
+- /* Release lock before yaffs_get_inode() */
+- yaffs_GrossUnlock(dev);
+-
+- /* Create root inode */
+- if (err == YAFFS_OK)
+- inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0,
+- yaffs_Root(dev));
+-
+- if (!inode)
+- return NULL;
+-
+- inode->i_op = &yaffs_dir_inode_operations;
+- inode->i_fop = &yaffs_dir_operations;
+-
+- T(YAFFS_TRACE_OS, ("yaffs_read_super: got root inode\n"));
+-
+- root = d_alloc_root(inode);
+-
+- T(YAFFS_TRACE_OS, ("yaffs_read_super: d_alloc_root done\n"));
+-
+- if (!root) {
+- iput(inode);
+- return NULL;
+- }
+- sb->s_root = root;
+- sb->s_dirt = !dev->isCheckpointed;
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs_read_super: isCheckpointed %d\n", dev->isCheckpointed));
+-
+- T(YAFFS_TRACE_OS, ("yaffs_read_super: done\n"));
+- return sb;
+-}
+-
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+- int silent)
+-{
+- return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data, struct vfsmount *mnt)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs_internal_read_super_mtd, mnt);
+-}
+-#else
+-static struct super_block *yaffs_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs_internal_read_super_mtd);
+-}
+-#endif
+-
+-static struct file_system_type yaffs_fs_type = {
+- .owner = THIS_MODULE,
+- .name = "yaffs",
+- .get_sb = yaffs_read_super,
+- .kill_sb = kill_block_super,
+- .fs_flags = FS_REQUIRES_DEV,
+-};
+-#else
+-static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+- int silent)
+-{
+- return yaffs_internal_read_super(1, sb, data, silent);
+-}
+-
+-static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+- FS_REQUIRES_DEV);
+-#endif
+-
+-
+-#ifdef CONFIG_YAFFS_YAFFS2
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+-static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+- int silent)
+-{
+- return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs2_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name, void *data,
+- struct vfsmount *mnt)
+-{
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs2_internal_read_super_mtd, mnt);
+-}
+-#else
+-static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs2_internal_read_super_mtd);
+-}
+-#endif
+-
+-static struct file_system_type yaffs2_fs_type = {
+- .owner = THIS_MODULE,
+- .name = "yaffs2",
+- .get_sb = yaffs2_read_super,
+- .kill_sb = kill_block_super,
+- .fs_flags = FS_REQUIRES_DEV,
+-};
+-#else
+-static struct super_block *yaffs2_read_super(struct super_block *sb,
+- void *data, int silent)
+-{
+- return yaffs_internal_read_super(2, sb, data, silent);
+-}
+-
+-static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+- FS_REQUIRES_DEV);
+-#endif
+-
+-#endif /* CONFIG_YAFFS_YAFFS2 */
+-
+-static struct proc_dir_entry *my_proc_entry;
+-
+-static char *yaffs_dump_dev(char *buf, yaffs_Device * dev)
+-{
+- buf += sprintf(buf, "startBlock......... %d\n", dev->startBlock);
+- buf += sprintf(buf, "endBlock........... %d\n", dev->endBlock);
+- buf += sprintf(buf, "totalBytesPerChunk. %d\n", dev->totalBytesPerChunk);
+- buf += sprintf(buf, "nDataBytesPerChunk. %d\n", dev->nDataBytesPerChunk);
+- buf += sprintf(buf, "chunkGroupBits..... %d\n", dev->chunkGroupBits);
+- buf += sprintf(buf, "chunkGroupSize..... %d\n", dev->chunkGroupSize);
+- buf += sprintf(buf, "nErasedBlocks...... %d\n", dev->nErasedBlocks);
+- buf += sprintf(buf, "nReservedBlocks.... %d\n", dev->nReservedBlocks);
+- buf += sprintf(buf, "blocksInCheckpoint. %d\n", dev->blocksInCheckpoint);
+- buf += sprintf(buf, "nTnodesCreated..... %d\n", dev->nTnodesCreated);
+- buf += sprintf(buf, "nFreeTnodes........ %d\n", dev->nFreeTnodes);
+- buf += sprintf(buf, "nObjectsCreated.... %d\n", dev->nObjectsCreated);
+- buf += sprintf(buf, "nFreeObjects....... %d\n", dev->nFreeObjects);
+- buf += sprintf(buf, "nFreeChunks........ %d\n", dev->nFreeChunks);
+- buf += sprintf(buf, "nPageWrites........ %d\n", dev->nPageWrites);
+- buf += sprintf(buf, "nPageReads......... %d\n", dev->nPageReads);
+- buf += sprintf(buf, "nBlockErasures..... %d\n", dev->nBlockErasures);
+- buf += sprintf(buf, "nGCCopies.......... %d\n", dev->nGCCopies);
+- buf += sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
+- buf += sprintf(buf, "passiveGCs......... %d\n",
+- dev->passiveGarbageCollections);
+- buf += sprintf(buf, "nRetriedWrites..... %d\n", dev->nRetriedWrites);
+- buf += sprintf(buf, "nShortOpCaches..... %d\n", dev->nShortOpCaches);
+- buf += sprintf(buf, "nRetireBlocks...... %d\n", dev->nRetiredBlocks);
+- buf += sprintf(buf, "eccFixed........... %d\n", dev->eccFixed);
+- buf += sprintf(buf, "eccUnfixed......... %d\n", dev->eccUnfixed);
+- buf += sprintf(buf, "tagsEccFixed....... %d\n", dev->tagsEccFixed);
+- buf += sprintf(buf, "tagsEccUnfixed..... %d\n", dev->tagsEccUnfixed);
+- buf += sprintf(buf, "cacheHits.......... %d\n", dev->cacheHits);
+- buf += sprintf(buf, "nDeletedFiles...... %d\n", dev->nDeletedFiles);
+- buf += sprintf(buf, "nUnlinkedFiles..... %d\n", dev->nUnlinkedFiles);
+- buf +=
+- sprintf(buf, "nBackgroudDeletions %d\n", dev->nBackgroundDeletions);
+- buf += sprintf(buf, "useNANDECC......... %d\n", dev->useNANDECC);
+- buf += sprintf(buf, "isYaffs2........... %d\n", dev->isYaffs2);
+- buf += sprintf(buf, "inbandTags......... %d\n", dev->inbandTags);
+-
+- return buf;
+-}
+-
+-static int yaffs_proc_read(char *page,
+- char **start,
+- off_t offset, int count, int *eof, void *data)
+-{
+- struct ylist_head *item;
+- char *buf = page;
+- int step = offset;
+- int n = 0;
+-
+- /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+- * We use 'offset' (*ppos) to indicate where we are in devList.
+- * This also assumes the user has posted a read buffer large
+- * enough to hold the complete output; but that's life in /proc.
+- */
+-
+- *(int *)start = 1;
+-
+- /* Print header first */
+- if (step == 0) {
+- buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__
+- "\n%s\n%s\n", yaffs_fs_c_version,
+- yaffs_guts_c_version);
+- }
+-
+- /* hold lock_kernel while traversing yaffs_dev_list */
+- lock_kernel();
+-
+- /* Locate and print the Nth entry. Order N-squared but N is small. */
+- ylist_for_each(item, &yaffs_dev_list) {
+- yaffs_Device *dev = ylist_entry(item, yaffs_Device, devList);
+- if (n < step) {
+- n++;
+- continue;
+- }
+- buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->name);
+- buf = yaffs_dump_dev(buf, dev);
+- break;
+- }
+- unlock_kernel();
+-
+- return buf - page < count ? buf - page : count;
+-}
+-
+-/**
+- * Set the verbosity of the warnings and error messages.
+- *
+- * Note that the names can only be a..z or _ with the current code.
+- */
+-
+-static struct {
+- char *mask_name;
+- unsigned mask_bitfield;
+-} mask_flags[] = {
+- {"allocate", YAFFS_TRACE_ALLOCATE},
+- {"always", YAFFS_TRACE_ALWAYS},
+- {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+- {"buffers", YAFFS_TRACE_BUFFERS},
+- {"bug", YAFFS_TRACE_BUG},
+- {"checkpt", YAFFS_TRACE_CHECKPOINT},
+- {"deletion", YAFFS_TRACE_DELETION},
+- {"erase", YAFFS_TRACE_ERASE},
+- {"error", YAFFS_TRACE_ERROR},
+- {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+- {"gc", YAFFS_TRACE_GC},
+- {"mtd", YAFFS_TRACE_MTD},
+- {"nandaccess", YAFFS_TRACE_NANDACCESS},
+- {"os", YAFFS_TRACE_OS},
+- {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+- {"scan", YAFFS_TRACE_SCAN},
+- {"tracing", YAFFS_TRACE_TRACING},
+-
+- {"verify", YAFFS_TRACE_VERIFY},
+- {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+- {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+- {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+-
+- {"write", YAFFS_TRACE_WRITE},
+- {"all", 0xffffffff},
+- {"none", 0},
+- {NULL, 0},
+-};
+-
+-#define MAX_MASK_NAME_LENGTH 40
+-static int yaffs_proc_write(struct file *file, const char *buf,
+- unsigned long count, void *data)
+-{
+- unsigned rg = 0, mask_bitfield;
+- char *end;
+- char *mask_name;
+- const char *x;
+- char substring[MAX_MASK_NAME_LENGTH + 1];
+- int i;
+- int done = 0;
+- int add, len = 0;
+- int pos = 0;
+-
+- rg = yaffs_traceMask;
+-
+- while (!done && (pos < count)) {
+- done = 1;
+- while ((pos < count) && isspace(buf[pos]))
+- pos++;
+-
+- switch (buf[pos]) {
+- case '+':
+- case '-':
+- case '=':
+- add = buf[pos];
+- pos++;
+- break;
+-
+- default:
+- add = ' ';
+- break;
+- }
+- mask_name = NULL;
+-
+- mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+-
+- if (end > buf + pos) {
+- mask_name = "numeral";
+- len = end - (buf + pos);
+- pos += len;
+- done = 0;
+- } else {
+- for (x = buf + pos, i = 0;
+- (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+- i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+- substring[i] = *x;
+- substring[i] = '\0';
+-
+- for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+- if (strcmp(substring, mask_flags[i].mask_name) == 0) {
+- mask_name = mask_flags[i].mask_name;
+- mask_bitfield = mask_flags[i].mask_bitfield;
+- done = 0;
+- break;
+- }
+- }
+- }
+-
+- if (mask_name != NULL) {
+- done = 0;
+- switch (add) {
+- case '-':
+- rg &= ~mask_bitfield;
+- break;
+- case '+':
+- rg |= mask_bitfield;
+- break;
+- case '=':
+- rg = mask_bitfield;
+- break;
+- default:
+- rg |= mask_bitfield;
+- break;
+- }
+- }
+- }
+-
+- yaffs_traceMask = rg | YAFFS_TRACE_ALWAYS;
+-
+- printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_traceMask);
+-
+- if (rg & YAFFS_TRACE_ALWAYS) {
+- for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+- char flag;
+- flag = ((rg & mask_flags[i].mask_bitfield) == mask_flags[i].mask_bitfield) ? '+' : '-';
+- printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name);
+- }
+- }
+-
+- return count;
+-}
+-
+-/* Stuff to handle installation of file systems */
+-struct file_system_to_install {
+- struct file_system_type *fst;
+- int installed;
+-};
+-
+-static struct file_system_to_install fs_to_install[] = {
+- {&yaffs_fs_type, 0},
+- {&yaffs2_fs_type, 0},
+- {NULL, 0}
+-};
+-
+-static int __init init_yaffs_fs(void)
+-{
+- int error = 0;
+- struct file_system_to_install *fsinst;
+-
+- T(YAFFS_TRACE_ALWAYS,
+- ("yaffs " __DATE__ " " __TIME__ " Installing. \n"));
+-
+- /* Install the proc_fs entry */
+- my_proc_entry = create_proc_entry("yaffs",
+- S_IRUGO | S_IFREG,
+- YPROC_ROOT);
+-
+- if (my_proc_entry) {
+- my_proc_entry->write_proc = yaffs_proc_write;
+- my_proc_entry->read_proc = yaffs_proc_read;
+- my_proc_entry->data = NULL;
+- } else
+- return -ENOMEM;
+-
+- /* Now add the file system entries */
+-
+- fsinst = fs_to_install;
+-
+- while (fsinst->fst && !error) {
+- error = register_filesystem(fsinst->fst);
+- if (!error)
+- fsinst->installed = 1;
+- fsinst++;
+- }
+-
+- /* Any errors? uninstall */
+- if (error) {
+- fsinst = fs_to_install;
+-
+- while (fsinst->fst) {
+- if (fsinst->installed) {
+- unregister_filesystem(fsinst->fst);
+- fsinst->installed = 0;
+- }
+- fsinst++;
+- }
+- }
+-
+- return error;
+-}
+-
+-static void __exit exit_yaffs_fs(void)
+-{
+-
+- struct file_system_to_install *fsinst;
+-
+- T(YAFFS_TRACE_ALWAYS, ("yaffs " __DATE__ " " __TIME__
+- " removing. \n"));
+-
+- remove_proc_entry("yaffs", YPROC_ROOT);
+-
+- fsinst = fs_to_install;
+-
+- while (fsinst->fst) {
+- if (fsinst->installed) {
+- unregister_filesystem(fsinst->fst);
+- fsinst->installed = 0;
+- }
+- fsinst++;
+- }
+-}
+-
+-module_init(init_yaffs_fs)
+-module_exit(exit_yaffs_fs)
+-
+-MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+-MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2006");
+-MODULE_LICENSE("GPL");
+--- a/fs/yaffs2/yaffs_getblockinfo.h
++++ b/fs/yaffs2/yaffs_getblockinfo.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -17,18 +17,19 @@
+ #define __YAFFS_GETBLOCKINFO_H__
+
+ #include "yaffs_guts.h"
++#include "yaffs_trace.h"
+
+ /* Function to manipulate block info */
+-static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
++static Y_INLINE yaffs_block_info_t *yaffs_get_block_info(yaffs_dev_t * dev, int blk)
+ {
+- if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
+ blk));
+ YBUG();
+ }
+- return &dev->blockInfo[blk - dev->internalStartBlock];
++ return &dev->block_info[blk - dev->internal_start_block];
+ }
+
+ #endif
+--- a/fs/yaffs2/yaffs_guts.c
++++ b/fs/yaffs2/yaffs_guts.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -10,11 +10,8 @@
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+-
+-const char *yaffs_guts_c_version =
+- "$Id: yaffs_guts.c,v 1.82 2009-03-09 04:24:17 charles Exp $";
+-
+ #include "yportenv.h"
++#include "yaffs_trace.h"
+
+ #include "yaffsinterface.h"
+ #include "yaffs_guts.h"
+@@ -22,118 +19,109 @@ const char *yaffs_guts_c_version =
+ #include "yaffs_getblockinfo.h"
+
+ #include "yaffs_tagscompat.h"
+-#ifndef CONFIG_YAFFS_USE_OWN_SORT
+-#include "yaffs_qsort.h"
+-#endif
++
+ #include "yaffs_nand.h"
+
+-#include "yaffs_checkptrw.h"
++#include "yaffs_yaffs1.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_verify.h"
+
+ #include "yaffs_nand.h"
+ #include "yaffs_packedtags2.h"
+
++#include "yaffs_nameval.h"
++#include "yaffs_allocator.h"
+
+-#define YAFFS_PASSIVE_GC_CHUNKS 2
++/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
++#define YAFFS_GC_GOOD_ENOUGH 2
++#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+ #include "yaffs_ecc.h"
+
+
++
+ /* Robustification (if it ever comes about...) */
+-static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND);
+-static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
++static void yaffs_retire_block(yaffs_dev_t *dev, int flash_block);
++static void yaffs_handle_chunk_wr_error(yaffs_dev_t *dev, int nand_chunk,
+ int erasedOk);
+-static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++static void yaffs_handle_chunk_wr_ok(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+-static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
+- const yaffs_ExtendedTags *tags);
++ const yaffs_ext_tags *tags);
++static void yaffs_handle_chunk_update(yaffs_dev_t *dev, int nand_chunk,
++ const yaffs_ext_tags *tags);
+
+ /* Other local prototypes */
+-static int yaffs_UnlinkObject(yaffs_Object *obj);
+-static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj);
+-
+-static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList);
++static void yaffs_update_parent(yaffs_obj_t *obj);
++static int yaffs_unlink_obj(yaffs_obj_t *obj);
++static int yaffs_obj_cache_dirty(yaffs_obj_t *obj);
+
+-static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device *dev,
++static int yaffs_write_new_chunk(yaffs_dev_t *dev,
+ const __u8 *buffer,
+- yaffs_ExtendedTags *tags,
++ yaffs_ext_tags *tags,
+ int useReserve);
+-static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
+- int chunkInNAND, int inScan);
+
+-static yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
+- yaffs_ObjectType type);
+-static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
+- yaffs_Object *obj);
+-static int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name,
+- int force, int isShrink, int shadows);
+-static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj);
+-static int yaffs_CheckStructures(void);
+-static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
+- int chunkOffset, int *limit);
+-static int yaffs_DoGenericObjectDeletion(yaffs_Object *in);
+-
+-static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device *dev, int blockNo);
+
++static yaffs_obj_t *yaffs_new_obj(yaffs_dev_t *dev, int number,
++ yaffs_obj_type type);
+
+-static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND);
+
+-static int yaffs_UnlinkWorker(yaffs_Object *obj);
++static int yaffs_apply_xattrib_mod(yaffs_obj_t *obj, char *buffer, yaffs_xattr_mod *xmod);
+
+-static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
+- int chunkInObject);
++static void yaffs_remove_obj_from_dir(yaffs_obj_t *obj);
++static int yaffs_check_structures(void);
++static int yaffs_generic_obj_del(yaffs_obj_t *in);
++
++static int yaffs_check_chunk_erased(struct yaffs_dev_s *dev,
++ int nand_chunk);
+
+-static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
+- yaffs_BlockInfo **blockUsedPtr);
++static int yaffs_unlink_worker(yaffs_obj_t *obj);
+
+-static void yaffs_VerifyFreeChunks(yaffs_Device *dev);
++static int yaffs_tags_match(const yaffs_ext_tags *tags, int obj_id,
++ int chunkInObject);
+
+-static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in);
++static int yaffs_alloc_chunk(yaffs_dev_t *dev, int useReserve,
++ yaffs_block_info_t **blockUsedPtr);
+
+-static void yaffs_VerifyDirectory(yaffs_Object *directory);
+-#ifdef YAFFS_PARANOID
+-static int yaffs_CheckFileSanity(yaffs_Object *in);
+-#else
+-#define yaffs_CheckFileSanity(in)
+-#endif
++static void yaffs_check_obj_details_loaded(yaffs_obj_t *in);
++
++static void yaffs_invalidate_whole_cache(yaffs_obj_t *in);
++static void yaffs_invalidate_chunk_cache(yaffs_obj_t *object, int chunk_id);
+
+-static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in);
+-static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId);
++static int yaffs_find_chunk_in_file(yaffs_obj_t *in, int inode_chunk,
++ yaffs_ext_tags *tags);
+
+-static void yaffs_InvalidateCheckpoint(yaffs_Device *dev);
++static int yaffs_verify_chunk_written(yaffs_dev_t *dev,
++ int nand_chunk,
++ const __u8 *data,
++ yaffs_ext_tags *tags);
+
+-static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
+- yaffs_ExtendedTags *tags);
+
+-static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
+- unsigned pos);
+-static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
+- yaffs_FileStructure *fStruct,
+- __u32 chunkId);
++static void yaffs_load_name_from_oh(yaffs_dev_t *dev,YCHAR *name, const YCHAR *ohName, int bufferSize);
++static void yaffs_load_oh_from_name(yaffs_dev_t *dev,YCHAR *ohName, const YCHAR *name);
+
+
+ /* Function to calculate chunk and offset */
+
+-static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, int *chunkOut,
++static void yaffs_addr_to_chunk(yaffs_dev_t *dev, loff_t addr, int *chunkOut,
+ __u32 *offsetOut)
+ {
+ int chunk;
+ __u32 offset;
+
+- chunk = (__u32)(addr >> dev->chunkShift);
++ chunk = (__u32)(addr >> dev->chunk_shift);
+
+- if (dev->chunkDiv == 1) {
++ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+- offset = (__u32)(addr & dev->chunkMask);
++ offset = (__u32)(addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunkBase;
+
+- chunk /= dev->chunkDiv;
++ chunk /= dev->chunk_div;
+
+- chunkBase = ((loff_t)chunk) * dev->nDataBytesPerChunk;
++ chunkBase = ((loff_t)chunk) * dev->data_bytes_per_chunk;
+ offset = (__u32)(addr - chunkBase);
+ }
+
+@@ -172,7 +160,7 @@ static __u32 ShiftsGE(__u32 x)
+
+ static __u32 Shifts(__u32 x)
+ {
+- int nShifts;
++ __u32 nShifts;
+
+ nShifts = 0;
+
+@@ -193,49 +181,49 @@ static __u32 Shifts(__u32 x)
+ * Temporary buffer manipulations.
+ */
+
+-static int yaffs_InitialiseTempBuffers(yaffs_Device *dev)
++static int yaffs_init_tmp_buffers(yaffs_dev_t *dev)
+ {
+ int i;
+ __u8 *buf = (__u8 *)1;
+
+- memset(dev->tempBuffer, 0, sizeof(dev->tempBuffer));
++ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+- dev->tempBuffer[i].line = 0; /* not in use */
+- dev->tempBuffer[i].buffer = buf =
+- YMALLOC_DMA(dev->totalBytesPerChunk);
++ dev->temp_buffer[i].line = 0; /* not in use */
++ dev->temp_buffer[i].buffer = buf =
++ YMALLOC_DMA(dev->param.total_bytes_per_chunk);
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+ }
+
+-__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo)
++__u8 *yaffs_get_temp_buffer(yaffs_dev_t *dev, int line_no)
+ {
+ int i, j;
+
+- dev->tempInUse++;
+- if (dev->tempInUse > dev->maxTemp)
+- dev->maxTemp = dev->tempInUse;
++ dev->temp_in_use++;
++ if (dev->temp_in_use > dev->max_temp)
++ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+- if (dev->tempBuffer[i].line == 0) {
+- dev->tempBuffer[i].line = lineNo;
+- if ((i + 1) > dev->maxTemp) {
+- dev->maxTemp = i + 1;
++ if (dev->temp_buffer[i].line == 0) {
++ dev->temp_buffer[i].line = line_no;
++ if ((i + 1) > dev->max_temp) {
++ dev->max_temp = i + 1;
+ for (j = 0; j <= i; j++)
+- dev->tempBuffer[j].maxLine =
+- dev->tempBuffer[j].line;
++ dev->temp_buffer[j].max_line =
++ dev->temp_buffer[j].line;
+ }
+
+- return dev->tempBuffer[i].buffer;
++ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ T(YAFFS_TRACE_BUFFERS,
+ (TSTR("Out of temp buffers at line %d, other held by lines:"),
+- lineNo));
++ line_no));
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+- T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line));
++ T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->temp_buffer[i].line));
+
+ T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR)));
+
+@@ -244,21 +232,21 @@ __u8 *yaffs_GetTempBuffer(yaffs_Device *
+ * This is not good.
+ */
+
+- dev->unmanagedTempAllocations++;
+- return YMALLOC(dev->nDataBytesPerChunk);
++ dev->unmanaged_buffer_allocs++;
++ return YMALLOC(dev->data_bytes_per_chunk);
+
+ }
+
+-void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer,
+- int lineNo)
++void yaffs_release_temp_buffer(yaffs_dev_t *dev, __u8 *buffer,
++ int line_no)
+ {
+ int i;
+
+- dev->tempInUse--;
++ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+- if (dev->tempBuffer[i].buffer == buffer) {
+- dev->tempBuffer[i].line = 0;
++ if (dev->temp_buffer[i].buffer == buffer) {
++ dev->temp_buffer[i].line = 0;
+ return;
+ }
+ }
+@@ -267,9 +255,9 @@ void yaffs_ReleaseTempBuffer(yaffs_Devic
+ /* assume it is an unmanaged one. */
+ T(YAFFS_TRACE_BUFFERS,
+ (TSTR("Releasing unmanaged temp buffer in line %d" TENDSTR),
+- lineNo));
++ line_no));
+ YFREE(buffer);
+- dev->unmanagedTempDeallocations++;
++ dev->unmanaged_buffer_deallocs++;
+ }
+
+ }
+@@ -277,21 +265,21 @@ void yaffs_ReleaseTempBuffer(yaffs_Devic
+ /*
+ * Determine if we have a managed buffer.
+ */
+-int yaffs_IsManagedTempBuffer(yaffs_Device *dev, const __u8 *buffer)
++int yaffs_is_managed_tmp_buffer(yaffs_dev_t *dev, const __u8 *buffer)
+ {
+ int i;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+- if (dev->tempBuffer[i].buffer == buffer)
++ if (dev->temp_buffer[i].buffer == buffer)
+ return 1;
+ }
+
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].data == buffer)
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].data == buffer)
+ return 1;
+ }
+
+- if (buffer == dev->checkpointBuffer)
++ if (buffer == dev->checkpt_buffer)
+ return 1;
+
+ T(YAFFS_TRACE_ALWAYS,
+@@ -299,6397 +287,4205 @@ int yaffs_IsManagedTempBuffer(yaffs_Devi
+ return 0;
+ }
+
+-
+-
+ /*
+- * Chunk bitmap manipulations
++ * Verification code
+ */
+
+-static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device *dev, int blk)
+-{
+- if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
+- blk));
+- YBUG();
+- }
+- return dev->chunkBits +
+- (dev->chunkBitmapStride * (blk - dev->internalStartBlock));
+-}
+
+-static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk)
+-{
+- if (blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
+- chunk < 0 || chunk >= dev->nChunksPerBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),
+- blk, chunk));
+- YBUG();
+- }
+-}
+
+-static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device *dev, int blk)
+-{
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+
+- memset(blkBits, 0, dev->chunkBitmapStride);
+-}
++/*
++ * Simple hash function. Needs to have a reasonable spread
++ */
+
+-static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device *dev, int blk, int chunk)
++static Y_INLINE int yaffs_hash_fn(int n)
+ {
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+-
+- yaffs_VerifyChunkBitId(dev, blk, chunk);
+-
+- blkBits[chunk / 8] &= ~(1 << (chunk & 7));
++ n = abs(n);
++ return n % YAFFS_NOBJECT_BUCKETS;
+ }
+
+-static Y_INLINE void yaffs_SetChunkBit(yaffs_Device *dev, int blk, int chunk)
+-{
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+-
+- yaffs_VerifyChunkBitId(dev, blk, chunk);
+-
+- blkBits[chunk / 8] |= (1 << (chunk & 7));
+-}
++/*
++ * Access functions to useful fake objects.
++ * Note that root might have a presence in NAND if permissions are set.
++ */
+
+-static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device *dev, int blk, int chunk)
++yaffs_obj_t *yaffs_root(yaffs_dev_t *dev)
+ {
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+- yaffs_VerifyChunkBitId(dev, blk, chunk);
+-
+- return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++ return dev->root_dir;
+ }
+
+-static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device *dev, int blk)
++yaffs_obj_t *yaffs_lost_n_found(yaffs_dev_t *dev)
+ {
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+- int i;
+- for (i = 0; i < dev->chunkBitmapStride; i++) {
+- if (*blkBits)
+- return 1;
+- blkBits++;
+- }
+- return 0;
++ return dev->lost_n_found;
+ }
+
+-static int yaffs_CountChunkBits(yaffs_Device *dev, int blk)
+-{
+- __u8 *blkBits = yaffs_BlockBits(dev, blk);
+- int i;
+- int n = 0;
+- for (i = 0; i < dev->chunkBitmapStride; i++) {
+- __u8 x = *blkBits;
+- while (x) {
+- if (x & 1)
+- n++;
+- x >>= 1;
+- }
+-
+- blkBits++;
+- }
+- return n;
+-}
+
+ /*
+- * Verification code
++ * Erased NAND checking functions
+ */
+
+-static int yaffs_SkipVerification(yaffs_Device *dev)
+-{
+- return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+-}
+-
+-static int yaffs_SkipFullVerification(yaffs_Device *dev)
+-{
+- return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_FULL));
+-}
+-
+-static int yaffs_SkipNANDVerification(yaffs_Device *dev)
++int yaffs_check_ff(__u8 *buffer, int n_bytes)
+ {
+- return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND));
++ /* Horrible, slow implementation */
++ while (n_bytes--) {
++ if (*buffer != 0xFF)
++ return 0;
++ buffer++;
++ }
++ return 1;
+ }
+
+-static const char *blockStateName[] = {
+-"Unknown",
+-"Needs scanning",
+-"Scanning",
+-"Empty",
+-"Allocating",
+-"Full",
+-"Dirty",
+-"Checkpoint",
+-"Collecting",
+-"Dead"
+-};
+-
+-static void yaffs_VerifyBlock(yaffs_Device *dev, yaffs_BlockInfo *bi, int n)
++static int yaffs_check_chunk_erased(struct yaffs_dev_s *dev,
++ int nand_chunk)
+ {
+- int actuallyUsed;
+- int inUse;
++ int retval = YAFFS_OK;
++ __u8 *data = yaffs_get_temp_buffer(dev, __LINE__);
++ yaffs_ext_tags tags;
++ int result;
+
+- if (yaffs_SkipVerification(dev))
+- return;
++ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+- /* Report illegal runtime states */
+- if (bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has undefined state %d"TENDSTR), n, bi->blockState));
++ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
++ retval = YAFFS_FAIL;
+
+- switch (bi->blockState) {
+- case YAFFS_BLOCK_STATE_UNKNOWN:
+- case YAFFS_BLOCK_STATE_SCANNING:
+- case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has bad run-state %s"TENDSTR),
+- n, blockStateName[bi->blockState]));
++ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) || tags.chunk_used) {
++ T(YAFFS_TRACE_NANDACCESS,
++ (TSTR("Chunk %d not erased" TENDSTR), nand_chunk));
++ retval = YAFFS_FAIL;
+ }
+
+- /* Check pages in use and soft deletions are legal */
+-
+- actuallyUsed = bi->pagesInUse - bi->softDeletions;
+-
+- if (bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
+- bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock ||
+- actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock)
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
+- n, bi->pagesInUse, bi->softDeletions));
+-
++ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+- /* Check chunk bitmap legal */
+- inUse = yaffs_CountChunkBits(dev, n);
+- if (inUse != bi->pagesInUse)
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
+- n, bi->pagesInUse, inUse));
++ return retval;
+
+- /* Check that the sequence number is valid.
+- * Ten million is legal, but is very unlikely
+- */
+- if (dev->isYaffs2 &&
+- (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) &&
+- (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000))
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has suspect sequence number of %d"TENDSTR),
+- n, bi->sequenceNumber));
+ }
+
+-static void yaffs_VerifyCollectedBlock(yaffs_Device *dev, yaffs_BlockInfo *bi,
+- int n)
++
++static int yaffs_verify_chunk_written(yaffs_dev_t *dev,
++ int nand_chunk,
++ const __u8 *data,
++ yaffs_ext_tags *tags)
+ {
+- yaffs_VerifyBlock(dev, bi, n);
++ int retval = YAFFS_OK;
++ yaffs_ext_tags tempTags;
++ __u8 *buffer = yaffs_get_temp_buffer(dev,__LINE__);
++ int result;
++
++ result = yaffs_rd_chunk_tags_nand(dev,nand_chunk,buffer,&tempTags);
++ if(memcmp(buffer,data,dev->data_bytes_per_chunk) ||
++ tempTags.obj_id != tags->obj_id ||
++ tempTags.chunk_id != tags->chunk_id ||
++ tempTags.n_bytes != tags->n_bytes)
++ retval = YAFFS_FAIL;
+
+- /* After collection the block should be in the erased state */
+- /* This will need to change if we do partial gc */
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+- if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING &&
+- bi->blockState != YAFFS_BLOCK_STATE_EMPTY) {
+- T(YAFFS_TRACE_ERROR, (TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
+- n, bi->blockState));
+- }
++ return retval;
+ }
+
+-static void yaffs_VerifyBlocks(yaffs_Device *dev)
++static int yaffs_write_new_chunk(struct yaffs_dev_s *dev,
++ const __u8 *data,
++ yaffs_ext_tags *tags,
++ int useReserve)
+ {
+- int i;
+- int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
+- int nIllegalBlockStates = 0;
+-
+- if (yaffs_SkipVerification(dev))
+- return;
+-
+- memset(nBlocksPerState, 0, sizeof(nBlocksPerState));
+-
+- for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
+- yaffs_VerifyBlock(dev, bi, i);
+-
+- if (bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
+- nBlocksPerState[bi->blockState]++;
+- else
+- nIllegalBlockStates++;
+- }
++ int attempts = 0;
++ int writeOk = 0;
++ int chunk;
+
+- T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
+- T(YAFFS_TRACE_VERIFY, (TSTR("Block summary"TENDSTR)));
++ yaffs2_checkpt_invalidate(dev);
+
+- T(YAFFS_TRACE_VERIFY, (TSTR("%d blocks have illegal states"TENDSTR), nIllegalBlockStates));
+- if (nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+- T(YAFFS_TRACE_VERIFY, (TSTR("Too many allocating blocks"TENDSTR)));
++ do {
++ yaffs_block_info_t *bi = 0;
++ int erasedOk = 0;
+
+- for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("%s %d blocks"TENDSTR),
+- blockStateName[i], nBlocksPerState[i]));
++ chunk = yaffs_alloc_chunk(dev, useReserve, &bi);
++ if (chunk < 0) {
++ /* no space */
++ break;
++ }
+
+- if (dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
+- dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
++ /* First check this chunk is erased, if it needs
++ * checking. The checking policy (unless forced
++ * always on) is as follows:
++ *
++ * Check the first page we try to write in a block.
++ * If the check passes then we don't need to check any
++ * more. If the check fails, we check again...
++ * If the block has been erased, we don't need to check.
++ *
++ * However, if the block has been prioritised for gc,
++ * then we think there might be something odd about
++ * this block and stop using it.
++ *
++ * Rationale: We should only ever see chunks that have
++ * not been erased if there was a partially written
++ * chunk due to power loss. This checking policy should
++ * catch that case with very few checks and thus save a
++ * lot of checks that are most likely not needed.
++ *
++ * Mods to the above
++ * If an erase check fails or the write fails we skip the
++ * rest of the block.
++ */
+
+- if (dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
+- dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
++ /* let's give it a try */
++ attempts++;
+
+- if (nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
+- nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
++ if(dev->param.always_check_erased)
++ bi->skip_erased_check = 0;
+
+- T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++ if (!bi->skip_erased_check) {
++ erasedOk = yaffs_check_chunk_erased(dev, chunk);
++ if (erasedOk != YAFFS_OK) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs chunk %d was not erased"
++ TENDSTR), chunk));
+
+-}
++ /* If not erased, delete this one,
++ * skip rest of block and
++ * try another chunk */
++ yaffs_chunk_del(dev,chunk,1,__LINE__);
++ yaffs_skip_rest_of_block(dev);
++ continue;
++ }
++ }
+
+-/*
+- * Verify the object header. oh must be valid, but obj and tags may be NULL in which
+- * case those tests will not be performed.
+- */
+-static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck)
+-{
+- if (obj && yaffs_SkipVerification(obj->myDev))
+- return;
++ writeOk = yaffs_wr_chunk_tags_nand(dev, chunk,
++ data, tags);
+
+- if (!(tags && obj && oh)) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
+- (__u32)tags, (__u32)obj, (__u32)oh));
+- return;
+- }
++ if(!bi->skip_erased_check)
++ writeOk = yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+- if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+- oh->type > YAFFS_OBJECT_TYPE_MAX)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
+- tags->objectId, oh->type));
++ if (writeOk != YAFFS_OK) {
++ /* Clean up aborted write, skip to next block and
++ * try another chunk */
++ yaffs_handle_chunk_wr_error(dev, chunk, erasedOk);
++ continue;
++ }
+
+- if (tags->objectId != obj->objectId)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch objectId %d"TENDSTR),
+- tags->objectId, obj->objectId));
++ bi->skip_erased_check = 1;
+
++ /* Copy the data into the robustification buffer */
++ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+- /*
+- * Check that the object's parent ids match if parentCheck requested.
+- *
+- * Tests do not apply to the root object.
+- */
++ } while (writeOk != YAFFS_OK &&
++ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+- if (parentCheck && tags->objectId > 1 && !obj->parent)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
+- tags->objectId, oh->parentObjectId));
++ if (!writeOk)
++ chunk = -1;
+
+- if (parentCheck && obj->parent &&
+- oh->parentObjectId != obj->parent->objectId &&
+- (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
+- obj->parent->objectId != YAFFS_OBJECTID_DELETED))
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
+- tags->objectId, oh->parentObjectId, obj->parent->objectId));
++ if (attempts > 1) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("**>> yaffs write required %d attempts" TENDSTR),
++ attempts));
+
+- if (tags->objectId > 1 && oh->name[0] == 0) /* Null name */
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header name is NULL"TENDSTR),
+- obj->objectId));
++ dev->n_retired_writes += (attempts - 1);
++ }
+
+- if (tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d header name is 0xFF"TENDSTR),
+- obj->objectId));
++ return chunk;
+ }
+
+
++
++/*
++ * Block retiring for handling a broken block.
++ */
+
+-static int yaffs_VerifyTnodeWorker(yaffs_Object *obj, yaffs_Tnode *tn,
+- __u32 level, int chunkOffset)
++static void yaffs_retire_block(yaffs_dev_t *dev, int flash_block)
+ {
+- int i;
+- yaffs_Device *dev = obj->myDev;
+- int ok = 1;
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, flash_block);
+
+- if (tn) {
+- if (level > 0) {
++ yaffs2_checkpt_invalidate(dev);
++
++ yaffs2_clear_oldest_dirty_seq(dev,bi);
+
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+- if (tn->internal[i]) {
+- ok = yaffs_VerifyTnodeWorker(obj,
+- tn->internal[i],
+- level - 1,
+- (chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
+- }
+- }
+- } else if (level == 0) {
+- yaffs_ExtendedTags tags;
+- __u32 objectId = obj->objectId;
++ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
++ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR(
++ "yaffs: Failed to mark bad and erase block %d"
++ TENDSTR), flash_block));
++ } else {
++ yaffs_ext_tags tags;
++ int chunk_id = flash_block * dev->param.chunks_per_block;
+
+- chunkOffset <<= YAFFS_TNODES_LEVEL0_BITS;
++ __u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+- for (i = 0; i < YAFFS_NTNODES_LEVEL0; i++) {
+- __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
++ memset(buffer, 0xff, dev->data_bytes_per_chunk);
++ yaffs_init_tags(&tags);
++ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
++ if (dev->param.write_chunk_tags_fn(dev, chunk_id -
++ dev->chunk_offset, buffer, &tags) != YAFFS_OK)
++ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to "
++ TCONT("write bad block marker to block %d")
++ TENDSTR), flash_block));
+
+- if (theChunk > 0) {
+- /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.objectId,tags.chunkId,theChunk)); */
+- yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
+- if (tags.objectId != objectId || tags.chunkId != chunkOffset) {
+- T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+- objectId, chunkOffset, theChunk,
+- tags.objectId, tags.chunkId));
+- }
+- }
+- chunkOffset++;
+- }
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+ }
+ }
+
+- return ok;
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++ bi->gc_prioritise = 0;
++ bi->needs_retiring = 0;
+
++ dev->n_retired_blocks++;
+ }
+
++/*
++ * Functions for robustisizing TODO
++ *
++ */
+
+-static void yaffs_VerifyFile(yaffs_Object *obj)
++static void yaffs_handle_chunk_wr_ok(yaffs_dev_t *dev, int nand_chunk,
++ const __u8 *data,
++ const yaffs_ext_tags *tags)
+ {
+- int requiredTallness;
+- int actualTallness;
+- __u32 lastChunk;
+- __u32 x;
+- __u32 i;
+- yaffs_Device *dev;
+- yaffs_ExtendedTags tags;
+- yaffs_Tnode *tn;
+- __u32 objectId;
++ dev=dev;
++ nand_chunk=nand_chunk;
++ data=data;
++ tags=tags;
++}
+
+- if (!obj)
+- return;
++static void yaffs_handle_chunk_update(yaffs_dev_t *dev, int nand_chunk,
++ const yaffs_ext_tags *tags)
++{
++ dev=dev;
++ nand_chunk=nand_chunk;
++ tags=tags;
++}
+
+- if (yaffs_SkipVerification(obj->myDev))
+- return;
++void yaffs_handle_chunk_error(yaffs_dev_t *dev, yaffs_block_info_t *bi)
++{
++ if (!bi->gc_prioritise) {
++ bi->gc_prioritise = 1;
++ dev->has_pending_prioritised_gc = 1;
++ bi->chunk_error_strikes++;
+
+- dev = obj->myDev;
+- objectId = obj->objectId;
++ if (bi->chunk_error_strikes > 3) {
++ bi->needs_retiring = 1; /* Too many stikes, so retire this */
++ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
+
+- /* Check file size is consistent with tnode depth */
+- lastChunk = obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1;
+- x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
+- requiredTallness = 0;
+- while (x > 0) {
+- x >>= YAFFS_TNODES_INTERNAL_BITS;
+- requiredTallness++;
++ }
+ }
++}
+
+- actualTallness = obj->variant.fileVariant.topLevel;
++static void yaffs_handle_chunk_wr_error(yaffs_dev_t *dev, int nand_chunk,
++ int erasedOk)
++{
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, flash_block);
+
+- if (requiredTallness > actualTallness)
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR),
+- obj->objectId, actualTallness, requiredTallness));
++ yaffs_handle_chunk_error(dev, bi);
+
++ if (erasedOk) {
++ /* Was an actual write failure, so mark the block for retirement */
++ bi->needs_retiring = 1;
++ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("**>> Block %d needs retiring" TENDSTR), flash_block));
++ }
+
+- /* Check that the chunks in the tnode tree are all correct.
+- * We do this by scanning through the tnode tree and
+- * checking the tags for every chunk match.
+- */
++ /* Delete the chunk */
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ yaffs_skip_rest_of_block(dev);
++}
+
+- if (yaffs_SkipNANDVerification(dev))
+- return;
+
+- for (i = 1; i <= lastChunk; i++) {
+- tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant, i);
++/*---------------- Name handling functions ------------*/
+
+- if (tn) {
+- __u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+- if (theChunk > 0) {
+- /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */
+- yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
+- if (tags.objectId != objectId || tags.chunkId != i) {
+- T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
+- objectId, i, theChunk,
+- tags.objectId, tags.chunkId));
+- }
+- }
++static __u16 yaffs_calc_name_sum(const YCHAR *name)
++{
++ __u16 sum = 0;
++ __u16 i = 1;
++
++ const YUCHAR *bname = (const YUCHAR *) name;
++ if (bname) {
++ while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++ sum += yaffs_toupper(*bname) * i;
++#else
++ sum += (*bname) * i;
++#endif
++ i++;
++ bname++;
+ }
+ }
++ return sum;
+ }
+
+-
+-static void yaffs_VerifyHardLink(yaffs_Object *obj)
++void yaffs_set_obj_name(yaffs_obj_t *obj, const YCHAR *name)
+ {
+- if (obj && yaffs_SkipVerification(obj->myDev))
+- return;
+-
+- /* Verify sane equivalent object */
++#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++ memset(obj->short_name, 0, sizeof(YCHAR) * (YAFFS_SHORT_NAME_LENGTH+1));
++ if (name && yaffs_strnlen(name,YAFFS_SHORT_NAME_LENGTH+1) <= YAFFS_SHORT_NAME_LENGTH)
++ yaffs_strcpy(obj->short_name, name);
++ else
++ obj->short_name[0] = _Y('\0');
++#endif
++ obj->sum = yaffs_calc_name_sum(name);
+ }
+
+-static void yaffs_VerifySymlink(yaffs_Object *obj)
++void yaffs_set_obj_name_from_oh(yaffs_obj_t *obj, const yaffs_obj_header *oh)
+ {
+- if (obj && yaffs_SkipVerification(obj->myDev))
+- return;
+-
+- /* Verify symlink string */
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ YCHAR tmpName[YAFFS_MAX_NAME_LENGTH+1];
++ memset(tmpName,0,sizeof(tmpName));
++ yaffs_load_name_from_oh(obj->my_dev,tmpName,oh->name,YAFFS_MAX_NAME_LENGTH+1);
++ yaffs_set_obj_name(obj,tmpName);
++#else
++ yaffs_set_obj_name(obj,oh->name);
++#endif
+ }
+
+-static void yaffs_VerifySpecial(yaffs_Object *obj)
+-{
+- if (obj && yaffs_SkipVerification(obj->myDev))
+- return;
+-}
+-
+-static void yaffs_VerifyObject(yaffs_Object *obj)
+-{
+- yaffs_Device *dev;
+-
+- __u32 chunkMin;
+- __u32 chunkMax;
+-
+- __u32 chunkIdOk;
+- __u32 chunkInRange;
+- __u32 chunkShouldNotBeDeleted;
+- __u32 chunkValid;
+-
+- if (!obj)
+- return;
+-
+- if (obj->beingCreated)
+- return;
++/*-------------------- TNODES -------------------
+
+- dev = obj->myDev;
++ * List of spare tnodes
++ * The list is hooked together using the first pointer
++ * in the tnode.
++ */
+
+- if (yaffs_SkipVerification(dev))
+- return;
+
+- /* Check sane object header chunk */
++yaffs_tnode_t *yaffs_get_tnode(yaffs_dev_t *dev)
++{
++ yaffs_tnode_t *tn = yaffs_alloc_raw_tnode(dev);
++ if (tn){
++ memset(tn, 0, dev->tnode_size);
++ dev->n_tnodes++;
++ }
+
+- chunkMin = dev->internalStartBlock * dev->nChunksPerBlock;
+- chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1;
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
+
+- chunkInRange = (((unsigned)(obj->hdrChunk)) >= chunkMin && ((unsigned)(obj->hdrChunk)) <= chunkMax);
+- chunkIdOk = chunkInRange || obj->hdrChunk == 0;
+- chunkValid = chunkInRange &&
+- yaffs_CheckChunkBit(dev,
+- obj->hdrChunk / dev->nChunksPerBlock,
+- obj->hdrChunk % dev->nChunksPerBlock);
+- chunkShouldNotBeDeleted = chunkInRange && !chunkValid;
++ return tn;
++}
+
+- if (!obj->fake &&
+- (!chunkIdOk || chunkShouldNotBeDeleted)) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
+- obj->objectId, obj->hdrChunk,
+- chunkIdOk ? "" : ",out of range",
+- chunkShouldNotBeDeleted ? ",marked as deleted" : ""));
+- }
++/* FreeTnode frees up a tnode and puts it back on the free list */
++static void yaffs_free_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn)
++{
++ yaffs_free_raw_tnode(dev,tn);
++ dev->n_tnodes--;
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
++}
+
+- if (chunkValid && !yaffs_SkipNANDVerification(dev)) {
+- yaffs_ExtendedTags tags;
+- yaffs_ObjectHeader *oh;
+- __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++static void yaffs_deinit_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ yaffs_deinit_raw_tnodes_and_objs(dev);
++ dev->n_obj = 0;
++ dev->n_tnodes = 0;
++}
+
+- oh = (yaffs_ObjectHeader *)buffer;
+
+- yaffs_ReadChunkWithTagsFromNAND(dev, obj->hdrChunk, buffer,
+- &tags);
++void yaffs_load_tnode_0(yaffs_dev_t *dev, yaffs_tnode_t *tn, unsigned pos,
++ unsigned val)
++{
++ __u32 *map = (__u32 *)tn;
++ __u32 bitInMap;
++ __u32 bitInWord;
++ __u32 wordInMap;
++ __u32 mask;
+
+- yaffs_VerifyObjectHeader(obj, oh, &tags, 1);
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
++ val >>= dev->chunk_grp_bits;
+
+- yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+- }
++ bitInMap = pos * dev->tnode_width;
++ wordInMap = bitInMap / 32;
++ bitInWord = bitInMap & (32 - 1);
+
+- /* Verify it has a parent */
+- if (obj && !obj->fake &&
+- (!obj->parent || obj->parent->myDev != dev)) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
+- obj->objectId, obj->parent));
+- }
++ mask = dev->tnode_mask << bitInWord;
+
+- /* Verify parent is a directory */
+- if (obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
+- obj->objectId, obj->parent->variantType));
+- }
++ map[wordInMap] &= ~mask;
++ map[wordInMap] |= (mask & (val << bitInWord));
+
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- yaffs_VerifyFile(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- yaffs_VerifySymlink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- yaffs_VerifyDirectory(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- yaffs_VerifyHardLink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- yaffs_VerifySpecial(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- default:
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Obj %d has illegaltype %d"TENDSTR),
+- obj->objectId, obj->variantType));
+- break;
++ if (dev->tnode_width > (32 - bitInWord)) {
++ bitInWord = (32 - bitInWord);
++ wordInMap++;;
++ mask = dev->tnode_mask >> (/*dev->tnode_width -*/ bitInWord);
++ map[wordInMap] &= ~mask;
++ map[wordInMap] |= (mask & (val >> bitInWord));
+ }
+ }
+
+-static void yaffs_VerifyObjects(yaffs_Device *dev)
++__u32 yaffs_get_group_base(yaffs_dev_t *dev, yaffs_tnode_t *tn,
++ unsigned pos)
+ {
+- yaffs_Object *obj;
+- int i;
+- struct ylist_head *lh;
++ __u32 *map = (__u32 *)tn;
++ __u32 bitInMap;
++ __u32 bitInWord;
++ __u32 wordInMap;
++ __u32 val;
+
+- if (yaffs_SkipVerification(dev))
+- return;
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+- /* Iterate through the objects in each hash entry */
++ bitInMap = pos * dev->tnode_width;
++ wordInMap = bitInMap / 32;
++ bitInWord = bitInMap & (32 - 1);
+
+- for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+- ylist_for_each(lh, &dev->objectBucket[i].list) {
+- if (lh) {
+- obj = ylist_entry(lh, yaffs_Object, hashLink);
+- yaffs_VerifyObject(obj);
+- }
+- }
+- }
+-}
++ val = map[wordInMap] >> bitInWord;
+
++ if (dev->tnode_width > (32 - bitInWord)) {
++ bitInWord = (32 - bitInWord);
++ wordInMap++;;
++ val |= (map[wordInMap] << bitInWord);
++ }
+
+-/*
+- * Simple hash function. Needs to have a reasonable spread
+- */
++ val &= dev->tnode_mask;
++ val <<= dev->chunk_grp_bits;
+
+-static Y_INLINE int yaffs_HashFunction(int n)
+-{
+- n = abs(n);
+- return n % YAFFS_NOBJECT_BUCKETS;
++ return val;
+ }
+
+-/*
+- * Access functions to useful fake objects.
+- * Note that root might have a presence in NAND if permissions are set.
++/* ------------------- End of individual tnode manipulation -----------------*/
++
++/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
++ * The look up tree is represented by the top tnode and the number of top_level
++ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+-yaffs_Object *yaffs_Root(yaffs_Device *dev)
++/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
++yaffs_tnode_t *yaffs_find_tnode_0(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct,
++ __u32 chunk_id)
+ {
+- return dev->rootDir;
+-}
++ yaffs_tnode_t *tn = file_struct->top;
++ __u32 i;
++ int requiredTallness;
++ int level = file_struct->top_level;
+
+-yaffs_Object *yaffs_LostNFound(yaffs_Device *dev)
+-{
+- return dev->lostNFoundDir;
+-}
++ dev=dev;
+
++ /* Check sane level and chunk Id */
++ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
++ return NULL;
+
+-/*
+- * Erased NAND checking functions
+- */
++ if (chunk_id > YAFFS_MAX_CHUNK_ID)
++ return NULL;
+
+-int yaffs_CheckFF(__u8 *buffer, int nBytes)
+-{
+- /* Horrible, slow implementation */
+- while (nBytes--) {
+- if (*buffer != 0xFF)
+- return 0;
+- buffer++;
++ /* First check we're tall enough (ie enough top_level) */
++
++ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++ requiredTallness = 0;
++ while (i) {
++ i >>= YAFFS_TNODES_INTERNAL_BITS;
++ requiredTallness++;
+ }
+- return 1;
+-}
+
+-static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND)
+-{
+- int retval = YAFFS_OK;
+- __u8 *data = yaffs_GetTempBuffer(dev, __LINE__);
+- yaffs_ExtendedTags tags;
+- int result;
++ if (requiredTallness > file_struct->top_level)
++ return NULL; /* Not tall enough, so we can't find it */
++
++ /* Traverse down to level 0 */
++ while (level > 0 && tn) {
++ tn = tn->internal[(chunk_id >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (level - 1) *
++ YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK];
++ level--;
++ }
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
++ return tn;
++}
+
+- if (tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
+- retval = YAFFS_FAIL;
++/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
++ * This happens in two steps:
++ * 1. If the tree isn't tall enough, then make it taller.
++ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
++ *
++ * Used when modifying the tree.
++ *
++ * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
++ * be plugged into the ttree.
++ */
+
+- if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) {
+- T(YAFFS_TRACE_NANDACCESS,
+- (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND));
+- retval = YAFFS_FAIL;
+- }
++yaffs_tnode_t *yaffs_add_find_tnode_0(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct,
++ __u32 chunk_id,
++ yaffs_tnode_t *passed_tn)
++{
++ int requiredTallness;
++ int i;
++ int l;
++ yaffs_tnode_t *tn;
+
+- yaffs_ReleaseTempBuffer(dev, data, __LINE__);
++ __u32 x;
+
+- return retval;
+
+-}
++ /* Check sane level and page Id */
++ if (file_struct->top_level < 0 || file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
++ return NULL;
+
+-static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+- const __u8 *data,
+- yaffs_ExtendedTags *tags,
+- int useReserve)
+-{
+- int attempts = 0;
+- int writeOk = 0;
+- int chunk;
++ if (chunk_id > YAFFS_MAX_CHUNK_ID)
++ return NULL;
+
+- yaffs_InvalidateCheckpoint(dev);
++ /* First check we're tall enough (ie enough top_level) */
+
+- do {
+- yaffs_BlockInfo *bi = 0;
+- int erasedOk = 0;
++ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++ requiredTallness = 0;
++ while (x) {
++ x >>= YAFFS_TNODES_INTERNAL_BITS;
++ requiredTallness++;
++ }
+
+- chunk = yaffs_AllocateChunk(dev, useReserve, &bi);
+- if (chunk < 0) {
+- /* no space */
+- break;
+- }
+
+- /* First check this chunk is erased, if it needs
+- * checking. The checking policy (unless forced
+- * always on) is as follows:
+- *
+- * Check the first page we try to write in a block.
+- * If the check passes then we don't need to check any
+- * more. If the check fails, we check again...
+- * If the block has been erased, we don't need to check.
+- *
+- * However, if the block has been prioritised for gc,
+- * then we think there might be something odd about
+- * this block and stop using it.
+- *
+- * Rationale: We should only ever see chunks that have
+- * not been erased if there was a partially written
+- * chunk due to power loss. This checking policy should
+- * catch that case with very few checks and thus save a
+- * lot of checks that are most likely not needed.
+- */
+- if (bi->gcPrioritise) {
+- yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
+- /* try another chunk */
+- continue;
+- }
++ if (requiredTallness > file_struct->top_level) {
++ /* Not tall enough, gotta make the tree taller */
++ for (i = file_struct->top_level; i < requiredTallness; i++) {
+
+- /* let's give it a try */
+- attempts++;
++ tn = yaffs_get_tnode(dev);
+
+-#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+- bi->skipErasedCheck = 0;
+-#endif
+- if (!bi->skipErasedCheck) {
+- erasedOk = yaffs_CheckChunkErased(dev, chunk);
+- if (erasedOk != YAFFS_OK) {
++ if (tn) {
++ tn->internal[0] = file_struct->top;
++ file_struct->top = tn;
++ file_struct->top_level++;
++ } else {
+ T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs chunk %d was not erased"
+- TENDSTR), chunk));
+-
+- /* try another chunk */
+- continue;
++ (TSTR("yaffs: no more tnodes" TENDSTR)));
++ return NULL;
+ }
+- bi->skipErasedCheck = 1;
+ }
++ }
+
+- writeOk = yaffs_WriteChunkWithTagsToNAND(dev, chunk,
+- data, tags);
+- if (writeOk != YAFFS_OK) {
+- yaffs_HandleWriteChunkError(dev, chunk, erasedOk);
+- /* try another chunk */
+- continue;
+- }
++ /* Traverse down to level 0, adding anything we need */
+
+- /* Copy the data into the robustification buffer */
+- yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
++ l = file_struct->top_level;
++ tn = file_struct->top;
+
+- } while (writeOk != YAFFS_OK &&
+- (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
++ if (l > 0) {
++ while (l > 0 && tn) {
++ x = (chunk_id >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK;
+
+- if (!writeOk)
+- chunk = -1;
+
+- if (attempts > 1) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> yaffs write required %d attempts" TENDSTR),
+- attempts));
++ if ((l > 1) && !tn->internal[x]) {
++ /* Add missing non-level-zero tnode */
++ tn->internal[x] = yaffs_get_tnode(dev);
++ if(!tn->internal[x])
++ return NULL;
++ } else if (l == 1) {
++ /* Looking from level 1 at level 0 */
++ if (passed_tn) {
++ /* If we already have one, then release it.*/
++ if (tn->internal[x])
++ yaffs_free_tnode(dev, tn->internal[x]);
++ tn->internal[x] = passed_tn;
++
++ } else if (!tn->internal[x]) {
++ /* Don't have one, none passed in */
++ tn->internal[x] = yaffs_get_tnode(dev);
++ if(!tn->internal[x])
++ return NULL;
++ }
++ }
+
+- dev->nRetriedWrites += (attempts - 1);
++ tn = tn->internal[x];
++ l--;
++ }
++ } else {
++ /* top is level 0 */
++ if (passed_tn) {
++ memcpy(tn, passed_tn, (dev->tnode_width * YAFFS_NTNODES_LEVEL0)/8);
++ yaffs_free_tnode(dev, passed_tn);
++ }
+ }
+
+- return chunk;
++ return tn;
+ }
+
+-/*
+- * Block retiring for handling a broken block.
+- */
+-
+-static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND)
++static int yaffs_find_chunk_in_group(yaffs_dev_t *dev, int theChunk,
++ yaffs_ext_tags *tags, int obj_id,
++ int inode_chunk)
+ {
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
++ int j;
+
+- yaffs_InvalidateCheckpoint(dev);
++ for (j = 0; theChunk && j < dev->chunk_grp_size; j++) {
++ if (yaffs_check_chunk_bit(dev, theChunk / dev->param.chunks_per_block,
++ theChunk % dev->param.chunks_per_block)) {
++
++ if(dev->chunk_grp_size == 1)
++ return theChunk;
++ else {
++ yaffs_rd_chunk_tags_nand(dev, theChunk, NULL,
++ tags);
++ if (yaffs_tags_match(tags, obj_id, inode_chunk)) {
++ /* found it; */
++ return theChunk;
++ }
++ }
++ }
++ theChunk++;
++ }
++ return -1;
++}
+
+- if (yaffs_MarkBlockBad(dev, blockInNAND) != YAFFS_OK) {
+- if (yaffs_EraseBlockInNAND(dev, blockInNAND) != YAFFS_OK) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR(
+- "yaffs: Failed to mark bad and erase block %d"
+- TENDSTR), blockInNAND));
+- } else {
+- yaffs_ExtendedTags tags;
+- int chunkId = blockInNAND * dev->nChunksPerBlock;
++#if 0
++/* Experimental code not being used yet. Might speed up file deletion */
++/* DeleteWorker scans backwards through the tnode tree and deletes all the
++ * chunks and tnodes in the file.
++ * Returns 1 if the tree was deleted.
++ * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
++ */
+
+- __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++static int yaffs_del_worker(yaffs_obj_t *in, yaffs_tnode_t *tn, __u32 level,
++ int chunk_offset, int *limit)
++{
++ int i;
++ int inode_chunk;
++ int theChunk;
++ yaffs_ext_tags tags;
++ int foundChunk;
++ yaffs_dev_t *dev = in->my_dev;
+
+- memset(buffer, 0xff, dev->nDataBytesPerChunk);
+- yaffs_InitialiseTags(&tags);
+- tags.sequenceNumber = YAFFS_SEQUENCE_BAD_BLOCK;
+- if (dev->writeChunkWithTagsToNAND(dev, chunkId -
+- dev->chunkOffset, buffer, &tags) != YAFFS_OK)
+- T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to "
+- TCONT("write bad block marker to block %d")
+- TENDSTR), blockInNAND));
++ int allDone = 1;
+
+- yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+- }
+- }
++ if (tn) {
++ if (level > 0) {
++ for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
++ i--) {
++ if (tn->internal[i]) {
++ if (limit && (*limit) < 0) {
++ allDone = 0;
++ } else {
++ allDone =
++ yaffs_del_worker(in,
++ tn->
++ internal
++ [i],
++ level -
++ 1,
++ (chunk_offset
++ <<
++ YAFFS_TNODES_INTERNAL_BITS)
++ + i,
++ limit);
++ }
++ if (allDone) {
++ yaffs_free_tnode(dev,
++ tn->
++ internal[i]);
++ tn->internal[i] = NULL;
++ }
++ }
++ }
++ return (allDone) ? 1 : 0;
++ } else if (level == 0) {
++ int hitLimit = 0;
+
+- bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+- bi->gcPrioritise = 0;
+- bi->needsRetiring = 0;
++ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
++ i--) {
++ theChunk = yaffs_get_group_base(dev, tn, i);
++ if (theChunk) {
+
+- dev->nRetiredBlocks++;
+-}
++ inode_chunk = (chunk_offset <<
++ YAFFS_TNODES_LEVEL0_BITS) + i;
+
+-/*
+- * Functions for robustisizing TODO
+- *
+- */
++ foundChunk =
++ yaffs_find_chunk_in_group(dev,
++ theChunk,
++ &tags,
++ in->obj_id,
++ inode_chunk);
+
+-static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
+- const __u8 *data,
+- const yaffs_ExtendedTags *tags)
+-{
+-}
++ if (foundChunk > 0) {
++ yaffs_chunk_del(dev,
++ foundChunk, 1,
++ __LINE__);
++ in->n_data_chunks--;
++ if (limit) {
++ *limit = *limit - 1;
++ if (*limit <= 0)
++ hitLimit = 1;
++ }
+
+-static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
+- const yaffs_ExtendedTags *tags)
+-{
+-}
++ }
+
+-void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
+-{
+- if (!bi->gcPrioritise) {
+- bi->gcPrioritise = 1;
+- dev->hasPendingPrioritisedGCs = 1;
+- bi->chunkErrorStrikes++;
++ yaffs_load_tnode_0(dev, tn, i, 0);
++ }
+
+- if (bi->chunkErrorStrikes > 3) {
+- bi->needsRetiring = 1; /* Too many stikes, so retire this */
+- T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
++ }
++ return (i < 0) ? 1 : 0;
+
+ }
++
+ }
++
++ return 1;
++
+ }
+
+-static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
+- int erasedOk)
++#endif
++
++static void yaffs_soft_del_chunk(yaffs_dev_t *dev, int chunk)
+ {
+- int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
++ yaffs_block_info_t *theBlock;
++ unsigned block_no;
+
+- yaffs_HandleChunkError(dev, bi);
++ T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
+
+- if (erasedOk) {
+- /* Was an actual write failure, so mark the block for retirement */
+- bi->needsRetiring = 1;
+- T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND));
++ block_no = chunk / dev->param.chunks_per_block;
++ theBlock = yaffs_get_block_info(dev, block_no);
++ if (theBlock) {
++ theBlock->soft_del_pages++;
++ dev->n_free_chunks++;
++ yaffs2_update_oldest_dirty_seq(dev, block_no, theBlock);
+ }
+-
+- /* Delete the chunk */
+- yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+ }
+
++/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
++ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
++ * of the tnode.
++ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
++ */
+
+-/*---------------- Name handling functions ------------*/
+-
+-static __u16 yaffs_CalcNameSum(const YCHAR *name)
++static int yaffs_soft_del_worker(yaffs_obj_t *in, yaffs_tnode_t *tn,
++ __u32 level, int chunk_offset)
+ {
+- __u16 sum = 0;
+- __u16 i = 1;
++ int i;
++ int theChunk;
++ int allDone = 1;
++ yaffs_dev_t *dev = in->my_dev;
+
+- const YUCHAR *bname = (const YUCHAR *) name;
+- if (bname) {
+- while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
++ if (tn) {
++ if (level > 0) {
++
++ for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
++ i--) {
++ if (tn->internal[i]) {
++ allDone =
++ yaffs_soft_del_worker(in,
++ tn->
++ internal[i],
++ level - 1,
++ (chunk_offset
++ <<
++ YAFFS_TNODES_INTERNAL_BITS)
++ + i);
++ if (allDone) {
++ yaffs_free_tnode(dev,
++ tn->
++ internal[i]);
++ tn->internal[i] = NULL;
++ } else {
++ /* Hoosterman... how could this happen? */
++ }
++ }
++ }
++ return (allDone) ? 1 : 0;
++ } else if (level == 0) {
++
++ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
++ theChunk = yaffs_get_group_base(dev, tn, i);
++ if (theChunk) {
++ /* Note this does not find the real chunk, only the chunk group.
++ * We make an assumption that a chunk group is not larger than
++ * a block.
++ */
++ yaffs_soft_del_chunk(dev, theChunk);
++ yaffs_load_tnode_0(dev, tn, i, 0);
++ }
++
++ }
++ return 1;
+
+-#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+- sum += yaffs_toupper(*bname) * i;
+-#else
+- sum += (*bname) * i;
+-#endif
+- i++;
+- bname++;
+ }
++
+ }
+- return sum;
++
++ return 1;
++
+ }
+
+-static void yaffs_SetObjectName(yaffs_Object *obj, const YCHAR *name)
++static void yaffs_soft_del_file(yaffs_obj_t *obj)
+ {
+-#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- memset(obj->shortName, 0, sizeof(YCHAR) * (YAFFS_SHORT_NAME_LENGTH+1));
+- if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH)
+- yaffs_strcpy(obj->shortName, name);
+- else
+- obj->shortName[0] = _Y('\0');
+-#endif
+- obj->sum = yaffs_CalcNameSum(name);
++ if (obj->deleted &&
++ obj->variant_type == YAFFS_OBJECT_TYPE_FILE && !obj->soft_del) {
++ if (obj->n_data_chunks <= 0) {
++ /* Empty file with no duplicate object headers, just delete it immediately */
++ yaffs_free_tnode(obj->my_dev,
++ obj->variant.file_variant.top);
++ obj->variant.file_variant.top = NULL;
++ T(YAFFS_TRACE_TRACING,
++ (TSTR("yaffs: Deleting empty file %d" TENDSTR),
++ obj->obj_id));
++ yaffs_generic_obj_del(obj);
++ } else {
++ yaffs_soft_del_worker(obj,
++ obj->variant.file_variant.top,
++ obj->variant.file_variant.
++ top_level, 0);
++ obj->soft_del = 1;
++ }
++ }
+ }
+
+-/*-------------------- TNODES -------------------
+-
+- * List of spare tnodes
+- * The list is hooked together using the first pointer
+- * in the tnode.
+- */
+-
+-/* yaffs_CreateTnodes creates a bunch more tnodes and
+- * adds them to the tnode free list.
+- * Don't use this function directly
++/* Pruning removes any part of the file structure tree that is beyond the
++ * bounds of the file (ie that does not point to chunks).
++ *
++ * A file should only get pruned when its size is reduced.
++ *
++ * Before pruning, the chunks must be pulled from the tree and the
++ * level 0 tnode entries must be zeroed out.
++ * Could also use this for file deletion, but that's probably better handled
++ * by a special case.
++ *
++ * This function is recursive. For levels > 0 the function is called again on
++ * any sub-tree. For level == 0 we just check if the sub-tree has data.
++ * If there is no data in a subtree then it is pruned.
+ */
+
+-static int yaffs_CreateTnodes(yaffs_Device *dev, int nTnodes)
++static yaffs_tnode_t *yaffs_prune_worker(yaffs_dev_t *dev, yaffs_tnode_t *tn,
++ __u32 level, int del0)
+ {
+ int i;
+- int tnodeSize;
+- yaffs_Tnode *newTnodes;
+- __u8 *mem;
+- yaffs_Tnode *curr;
+- yaffs_Tnode *next;
+- yaffs_TnodeList *tnl;
++ int hasData;
+
+- if (nTnodes < 1)
+- return YAFFS_OK;
++ if (tn) {
++ hasData = 0;
+
+- /* Calculate the tnode size in bytes for variable width tnode support.
+- * Must be a multiple of 32-bits */
+- tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++ if(level > 0){
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
++ if (tn->internal[i]) {
++ tn->internal[i] =
++ yaffs_prune_worker(dev, tn->internal[i],
++ level - 1,
++ (i == 0) ? del0 : 1);
++ }
+
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
++ if (tn->internal[i])
++ hasData++;
++ }
++ } else {
++ int tnode_size_u32 = dev->tnode_size/sizeof(__u32);
++ __u32 *map = (__u32 *)tn;
+
+- /* make these things */
++ for(i = 0; !hasData && i < tnode_size_u32; i++){
++ if(map[i])
++ hasData++;
++ }
++ }
+
+- newTnodes = YMALLOC(nTnodes * tnodeSize);
+- mem = (__u8 *)newTnodes;
++ if (hasData == 0 && del0) {
++ /* Free and return NULL */
+
+- if (!newTnodes) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
+- return YAFFS_FAIL;
+- }
++ yaffs_free_tnode(dev, tn);
++ tn = NULL;
++ }
+
+- /* Hook them into the free list */
+-#if 0
+- for (i = 0; i < nTnodes - 1; i++) {
+- newTnodes[i].internal[0] = &newTnodes[i + 1];
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- newTnodes[i].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+-#endif
+ }
+
+- newTnodes[nTnodes - 1].internal[0] = dev->freeTnodes;
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- newTnodes[nTnodes - 1].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+-#endif
+- dev->freeTnodes = newTnodes;
+-#else
+- /* New hookup for wide tnodes */
+- for (i = 0; i < nTnodes - 1; i++) {
+- curr = (yaffs_Tnode *) &mem[i * tnodeSize];
+- next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize];
+- curr->internal[0] = next;
+- }
++ return tn;
+
+- curr = (yaffs_Tnode *) &mem[(nTnodes - 1) * tnodeSize];
+- curr->internal[0] = dev->freeTnodes;
+- dev->freeTnodes = (yaffs_Tnode *)mem;
++}
+
+-#endif
++static int yaffs_prune_tree(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct)
++{
++ int i;
++ int hasData;
++ int done = 0;
++ yaffs_tnode_t *tn;
+
++ if (file_struct->top_level > 0) {
++ file_struct->top =
++ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
++
++ /* Now we have a tree with all the non-zero branches NULL but the height
++ * is the same as it was.
++ * Let's see if we can trim internal tnodes to shorten the tree.
++ * We can do this if only the 0th element in the tnode is in use
++ * (ie all the non-zero are NULL)
++ */
+
+- dev->nFreeTnodes += nTnodes;
+- dev->nTnodesCreated += nTnodes;
++ while (file_struct->top_level && !done) {
++ tn = file_struct->top;
+
+- /* Now add this bunch of tnodes to a list for freeing up.
+- * NB If we can't add this to the management list it isn't fatal
+- * but it just means we can't free this bunch of tnodes later.
+- */
++ hasData = 0;
++ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
++ if (tn->internal[i])
++ hasData++;
++ }
+
+- tnl = YMALLOC(sizeof(yaffs_TnodeList));
+- if (!tnl) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs: Could not add tnodes to management list" TENDSTR)));
+- return YAFFS_FAIL;
+- } else {
+- tnl->tnodes = newTnodes;
+- tnl->next = dev->allocatedTnodeList;
+- dev->allocatedTnodeList = tnl;
++ if (!hasData) {
++ file_struct->top = tn->internal[0];
++ file_struct->top_level--;
++ yaffs_free_tnode(dev, tn);
++ } else {
++ done = 1;
++ }
++ }
+ }
+
+- T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR)));
+-
+ return YAFFS_OK;
+ }
+
+-/* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */
++/*-------------------- End of File Structure functions.-------------------*/
++
+
+-static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device *dev)
++/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
++static yaffs_obj_t *yaffs_alloc_empty_obj(yaffs_dev_t *dev)
+ {
+- yaffs_Tnode *tn = NULL;
++ yaffs_obj_t *obj = yaffs_alloc_raw_obj(dev);
+
+- /* If there are none left make more */
+- if (!dev->freeTnodes)
+- yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES);
+-
+- if (dev->freeTnodes) {
+- tn = dev->freeTnodes;
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- if (tn->internal[YAFFS_NTNODES_INTERNAL] != (void *)1) {
+- /* Hoosterman, this thing looks like it isn't in the list */
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("yaffs: Tnode list bug 1" TENDSTR)));
+- }
+-#endif
+- dev->freeTnodes = dev->freeTnodes->internal[0];
+- dev->nFreeTnodes--;
+- }
++ if (obj) {
++ dev->n_obj++;
+
+- dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++ /* Now sweeten it up... */
+
+- return tn;
+-}
++ memset(obj, 0, sizeof(yaffs_obj_t));
++ obj->being_created = 1;
+
+-static yaffs_Tnode *yaffs_GetTnode(yaffs_Device *dev)
+-{
+- yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev);
+- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++ obj->my_dev = dev;
++ obj->hdr_chunk = 0;
++ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
++ YINIT_LIST_HEAD(&(obj->hard_links));
++ YINIT_LIST_HEAD(&(obj->hash_link));
++ YINIT_LIST_HEAD(&obj->siblings);
+
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
+
+- if (tn)
+- memset(tn, 0, tnodeSize);
++ /* Now make the directory sane */
++ if (dev->root_dir) {
++ obj->parent = dev->root_dir;
++ ylist_add(&(obj->siblings), &dev->root_dir->variant.dir_variant.children);
++ }
+
+- return tn;
++ /* Add it to the lost and found directory.
++ * NB Can't put root or lostNFound in lostNFound so
++ * check if lostNFound exists first
++ */
++ if (dev->lost_n_found)
++ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
++
++ obj->being_created = 0;
++ }
++
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
++
++ return obj;
+ }
+
+-/* FreeTnode frees up a tnode and puts it back on the free list */
+-static void yaffs_FreeTnode(yaffs_Device *dev, yaffs_Tnode *tn)
++static yaffs_obj_t *yaffs_create_fake_dir(yaffs_dev_t *dev, int number,
++ __u32 mode)
+ {
+- if (tn) {
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- if (tn->internal[YAFFS_NTNODES_INTERNAL] != 0) {
+- /* Hoosterman, this thing looks like it is already in the list */
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("yaffs: Tnode list bug 2" TENDSTR)));
+- }
+- tn->internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
+-#endif
+- tn->internal[0] = dev->freeTnodes;
+- dev->freeTnodes = tn;
+- dev->nFreeTnodes++;
++
++ yaffs_obj_t *obj =
++ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
++ if (obj) {
++ obj->fake = 1; /* it is fake so it might have no NAND presence... */
++ obj->rename_allowed = 0; /* ... and we're not allowed to rename it... */
++ obj->unlink_allowed = 0; /* ... or unlink it */
++ obj->deleted = 0;
++ obj->unlinked = 0;
++ obj->yst_mode = mode;
++ obj->my_dev = dev;
++ obj->hdr_chunk = 0; /* Not a valid chunk. */
++ }
++
++ return obj;
++
++}
++
++static void yaffs_unhash_obj(yaffs_obj_t *obj)
++{
++ int bucket;
++ yaffs_dev_t *dev = obj->my_dev;
++
++ /* If it is still linked into the bucket list, free from the list */
++ if (!ylist_empty(&obj->hash_link)) {
++ ylist_del_init(&obj->hash_link);
++ bucket = yaffs_hash_fn(obj->obj_id);
++ dev->obj_bucket[bucket].count--;
+ }
+- dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+ }
+
+-static void yaffs_DeinitialiseTnodes(yaffs_Device *dev)
++/* FreeObject frees up a Object and puts it back on the free list */
++static void yaffs_free_obj(yaffs_obj_t *obj)
+ {
+- /* Free the list of allocated tnodes */
+- yaffs_TnodeList *tmp;
++ yaffs_dev_t *dev = obj->my_dev;
++
++ T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), obj, obj->my_inode));
+
+- while (dev->allocatedTnodeList) {
+- tmp = dev->allocatedTnodeList->next;
++ if (!obj)
++ YBUG();
++ if (obj->parent)
++ YBUG();
++ if (!ylist_empty(&obj->siblings))
++ YBUG();
+
+- YFREE(dev->allocatedTnodeList->tnodes);
+- YFREE(dev->allocatedTnodeList);
+- dev->allocatedTnodeList = tmp;
+
++ if (obj->my_inode) {
++ /* We're still hooked up to a cached inode.
++ * Don't delete now, but mark for later deletion
++ */
++ obj->defered_free = 1;
++ return;
+ }
+
+- dev->freeTnodes = NULL;
+- dev->nFreeTnodes = 0;
+-}
++ yaffs_unhash_obj(obj);
+
+-static void yaffs_InitialiseTnodes(yaffs_Device *dev)
+-{
+- dev->allocatedTnodeList = NULL;
+- dev->freeTnodes = NULL;
+- dev->nFreeTnodes = 0;
+- dev->nTnodesCreated = 0;
++ yaffs_free_raw_obj(dev,obj);
++ dev->n_obj--;
++ dev->checkpoint_blocks_required = 0; /* force recalculation*/
+ }
+
+
+-void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos,
+- unsigned val)
++void yaffs_handle_defered_free(yaffs_obj_t *obj)
+ {
+- __u32 *map = (__u32 *)tn;
+- __u32 bitInMap;
+- __u32 bitInWord;
+- __u32 wordInMap;
+- __u32 mask;
+-
+- pos &= YAFFS_TNODES_LEVEL0_MASK;
+- val >>= dev->chunkGroupBits;
++ if (obj->defered_free)
++ yaffs_free_obj(obj);
++}
+
+- bitInMap = pos * dev->tnodeWidth;
+- wordInMap = bitInMap / 32;
+- bitInWord = bitInMap & (32 - 1);
++static void yaffs_init_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ int i;
+
+- mask = dev->tnodeMask << bitInWord;
++ dev->n_obj = 0;
++ dev->n_tnodes = 0;
+
+- map[wordInMap] &= ~mask;
+- map[wordInMap] |= (mask & (val << bitInWord));
++ yaffs_init_raw_tnodes_and_objs(dev);
+
+- if (dev->tnodeWidth > (32 - bitInWord)) {
+- bitInWord = (32 - bitInWord);
+- wordInMap++;;
+- mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
+- map[wordInMap] &= ~mask;
+- map[wordInMap] |= (mask & (val >> bitInWord));
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ YINIT_LIST_HEAD(&dev->obj_bucket[i].list);
++ dev->obj_bucket[i].count = 0;
+ }
+ }
+
+-static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
+- unsigned pos)
++static int yaffs_find_nice_bucket(yaffs_dev_t *dev)
+ {
+- __u32 *map = (__u32 *)tn;
+- __u32 bitInMap;
+- __u32 bitInWord;
+- __u32 wordInMap;
+- __u32 val;
++ int i;
++ int l = 999;
++ int lowest = 999999;
+
+- pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+- bitInMap = pos * dev->tnodeWidth;
+- wordInMap = bitInMap / 32;
+- bitInWord = bitInMap & (32 - 1);
++ /* Search for the shortest list or one that
++ * isn't too long.
++ */
+
+- val = map[wordInMap] >> bitInWord;
++ for (i = 0; i < 10 && lowest > 4; i++) {
++ dev->bucket_finder++;
++ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
++ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
++ lowest = dev->obj_bucket[dev->bucket_finder].count;
++ l = dev->bucket_finder;
++ }
+
+- if (dev->tnodeWidth > (32 - bitInWord)) {
+- bitInWord = (32 - bitInWord);
+- wordInMap++;;
+- val |= (map[wordInMap] << bitInWord);
+ }
+
+- val &= dev->tnodeMask;
+- val <<= dev->chunkGroupBits;
+-
+- return val;
++ return l;
+ }
+
+-/* ------------------- End of individual tnode manipulation -----------------*/
+-
+-/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+- * The look up tree is represented by the top tnode and the number of topLevel
+- * in the tree. 0 means only the level 0 tnode is in the tree.
+- */
+-
+-/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+-static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
+- yaffs_FileStructure *fStruct,
+- __u32 chunkId)
++static int yaffs_new_obj_id(yaffs_dev_t *dev)
+ {
+- yaffs_Tnode *tn = fStruct->top;
+- __u32 i;
+- int requiredTallness;
+- int level = fStruct->topLevel;
+-
+- /* Check sane level and chunk Id */
+- if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+- return NULL;
++ int bucket = yaffs_find_nice_bucket(dev);
+
+- if (chunkId > YAFFS_MAX_CHUNK_ID)
+- return NULL;
++ /* Now find an object value that has not already been taken
++ * by scanning the list.
++ */
+
+- /* First check we're tall enough (ie enough topLevel) */
++ int found = 0;
++ struct ylist_head *i;
+
+- i = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
+- requiredTallness = 0;
+- while (i) {
+- i >>= YAFFS_TNODES_INTERNAL_BITS;
+- requiredTallness++;
+- }
++ __u32 n = (__u32) bucket;
+
+- if (requiredTallness > fStruct->topLevel)
+- return NULL; /* Not tall enough, so we can't find it */
++ /* yaffs_check_obj_hash_sane(); */
+
+- /* Traverse down to level 0 */
+- while (level > 0 && tn) {
+- tn = tn->internal[(chunkId >>
+- (YAFFS_TNODES_LEVEL0_BITS +
+- (level - 1) *
+- YAFFS_TNODES_INTERNAL_BITS)) &
+- YAFFS_TNODES_INTERNAL_MASK];
+- level--;
++ while (!found) {
++ found = 1;
++ n += YAFFS_NOBJECT_BUCKETS;
++ if (1 || dev->obj_bucket[bucket].count > 0) {
++ ylist_for_each(i, &dev->obj_bucket[bucket].list) {
++ /* If there is already one in the list */
++ if (i && ylist_entry(i, yaffs_obj_t,
++ hash_link)->obj_id == n) {
++ found = 0;
++ }
++ }
++ }
+ }
+
+- return tn;
++ return n;
+ }
+
+-/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
+- * This happens in two steps:
+- * 1. If the tree isn't tall enough, then make it taller.
+- * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+- *
+- * Used when modifying the tree.
+- *
+- * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
+- * be plugged into the ttree.
+- */
+-
+-static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device *dev,
+- yaffs_FileStructure *fStruct,
+- __u32 chunkId,
+- yaffs_Tnode *passedTn)
++static void yaffs_hash_obj(yaffs_obj_t *in)
+ {
+- int requiredTallness;
+- int i;
+- int l;
+- yaffs_Tnode *tn;
+-
+- __u32 x;
++ int bucket = yaffs_hash_fn(in->obj_id);
++ yaffs_dev_t *dev = in->my_dev;
+
++ ylist_add(&in->hash_link, &dev->obj_bucket[bucket].list);
++ dev->obj_bucket[bucket].count++;
++}
+
+- /* Check sane level and page Id */
+- if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL)
+- return NULL;
++yaffs_obj_t *yaffs_find_by_number(yaffs_dev_t *dev, __u32 number)
++{
++ int bucket = yaffs_hash_fn(number);
++ struct ylist_head *i;
++ yaffs_obj_t *in;
+
+- if (chunkId > YAFFS_MAX_CHUNK_ID)
+- return NULL;
++ ylist_for_each(i, &dev->obj_bucket[bucket].list) {
++ /* Look if it is in the list */
++ if (i) {
++ in = ylist_entry(i, yaffs_obj_t, hash_link);
++ if (in->obj_id == number) {
+
+- /* First check we're tall enough (ie enough topLevel) */
++ /* Don't tell the VFS about this one if it is defered free */
++ if (in->defered_free)
++ return NULL;
+
+- x = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
+- requiredTallness = 0;
+- while (x) {
+- x >>= YAFFS_TNODES_INTERNAL_BITS;
+- requiredTallness++;
++ return in;
++ }
++ }
+ }
+
++ return NULL;
++}
+
+- if (requiredTallness > fStruct->topLevel) {
+- /* Not tall enough, gotta make the tree taller */
+- for (i = fStruct->topLevel; i < requiredTallness; i++) {
++yaffs_obj_t *yaffs_new_obj(yaffs_dev_t *dev, int number,
++ yaffs_obj_type type)
++{
++ yaffs_obj_t *theObject=NULL;
++ yaffs_tnode_t *tn = NULL;
+
+- tn = yaffs_GetTnode(dev);
++ if (number < 0)
++ number = yaffs_new_obj_id(dev);
+
+- if (tn) {
+- tn->internal[0] = fStruct->top;
+- fStruct->top = tn;
+- } else {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs: no more tnodes" TENDSTR)));
+- }
+- }
++ if (type == YAFFS_OBJECT_TYPE_FILE) {
++ tn = yaffs_get_tnode(dev);
++ if (!tn)
++ return NULL;
++ }
+
+- fStruct->topLevel = requiredTallness;
++ theObject = yaffs_alloc_empty_obj(dev);
++ if (!theObject){
++ if(tn)
++ yaffs_free_tnode(dev,tn);
++ return NULL;
+ }
+
+- /* Traverse down to level 0, adding anything we need */
+
+- l = fStruct->topLevel;
+- tn = fStruct->top;
++ if (theObject) {
++ theObject->fake = 0;
++ theObject->rename_allowed = 1;
++ theObject->unlink_allowed = 1;
++ theObject->obj_id = number;
++ yaffs_hash_obj(theObject);
++ theObject->variant_type = type;
++#ifdef CONFIG_YAFFS_WINCE
++ yfsd_win_file_time_now(theObject->win_atime);
++ theObject->win_ctime[0] = theObject->win_mtime[0] =
++ theObject->win_atime[0];
++ theObject->win_ctime[1] = theObject->win_mtime[1] =
++ theObject->win_atime[1];
++
++#else
+
+- if (l > 0) {
+- while (l > 0 && tn) {
+- x = (chunkId >>
+- (YAFFS_TNODES_LEVEL0_BITS +
+- (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+- YAFFS_TNODES_INTERNAL_MASK;
++ theObject->yst_atime = theObject->yst_mtime =
++ theObject->yst_ctime = Y_CURRENT_TIME;
++#endif
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ theObject->variant.file_variant.file_size = 0;
++ theObject->variant.file_variant.scanned_size = 0;
++ theObject->variant.file_variant.shrink_size = 0xFFFFFFFF; /* max __u32 */
++ theObject->variant.file_variant.top_level = 0;
++ theObject->variant.file_variant.top = tn;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ YINIT_LIST_HEAD(&theObject->variant.dir_variant.
++ children);
++ YINIT_LIST_HEAD(&theObject->variant.dir_variant.
++ dirty);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* No action required */
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* todo this should not happen */
++ break;
++ }
++ }
+
++ return theObject;
++}
+
+- if ((l > 1) && !tn->internal[x]) {
+- /* Add missing non-level-zero tnode */
+- tn->internal[x] = yaffs_GetTnode(dev);
++yaffs_obj_t *yaffs_find_or_create_by_number(yaffs_dev_t *dev,
++ int number,
++ yaffs_obj_type type)
++{
++ yaffs_obj_t *theObject = NULL;
+
+- } else if (l == 1) {
+- /* Looking from level 1 at level 0 */
+- if (passedTn) {
+- /* If we already have one, then release it.*/
+- if (tn->internal[x])
+- yaffs_FreeTnode(dev, tn->internal[x]);
+- tn->internal[x] = passedTn;
++ if (number > 0)
++ theObject = yaffs_find_by_number(dev, number);
+
+- } else if (!tn->internal[x]) {
+- /* Don't have one, none passed in */
+- tn->internal[x] = yaffs_GetTnode(dev);
+- }
+- }
++ if (!theObject)
++ theObject = yaffs_new_obj(dev, number, type);
+
+- tn = tn->internal[x];
+- l--;
+- }
+- } else {
+- /* top is level 0 */
+- if (passedTn) {
+- memcpy(tn, passedTn, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
+- yaffs_FreeTnode(dev, passedTn);
+- }
+- }
++ return theObject;
+
+- return tn;
+ }
+
+-static int yaffs_FindChunkInGroup(yaffs_Device *dev, int theChunk,
+- yaffs_ExtendedTags *tags, int objectId,
+- int chunkInInode)
++
++YCHAR *yaffs_clone_str(const YCHAR *str)
+ {
+- int j;
++ YCHAR *newStr = NULL;
++ int len;
+
+- for (j = 0; theChunk && j < dev->chunkGroupSize; j++) {
+- if (yaffs_CheckChunkBit(dev, theChunk / dev->nChunksPerBlock,
+- theChunk % dev->nChunksPerBlock)) {
+- yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL,
+- tags);
+- if (yaffs_TagsMatch(tags, objectId, chunkInInode)) {
+- /* found it; */
+- return theChunk;
+- }
+- }
+- theChunk++;
++ if (!str)
++ str = _Y("");
++
++ len = yaffs_strnlen(str,YAFFS_MAX_ALIAS_LENGTH);
++ newStr = YMALLOC((len + 1) * sizeof(YCHAR));
++ if (newStr){
++ yaffs_strncpy(newStr, str,len);
++ newStr[len] = 0;
+ }
+- return -1;
+-}
++ return newStr;
+
++}
+
+-/* DeleteWorker scans backwards through the tnode tree and deletes all the
+- * chunks and tnodes in the file
+- * Returns 1 if the tree was deleted.
+- * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
++/*
++ * Mknod (create) a new object.
++ * equiv_obj only has meaning for a hard link;
++ * aliasString only has meaning for a symlink.
++ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+-static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
+- int chunkOffset, int *limit)
++static yaffs_obj_t *yaffs_create_obj(yaffs_obj_type type,
++ yaffs_obj_t *parent,
++ const YCHAR *name,
++ __u32 mode,
++ __u32 uid,
++ __u32 gid,
++ yaffs_obj_t *equiv_obj,
++ const YCHAR *aliasString, __u32 rdev)
+ {
+- int i;
+- int chunkInInode;
+- int theChunk;
+- yaffs_ExtendedTags tags;
+- int foundChunk;
+- yaffs_Device *dev = in->myDev;
+-
+- int allDone = 1;
+-
+- if (tn) {
+- if (level > 0) {
+- for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
+- i--) {
+- if (tn->internal[i]) {
+- if (limit && (*limit) < 0) {
+- allDone = 0;
+- } else {
+- allDone =
+- yaffs_DeleteWorker(in,
+- tn->
+- internal
+- [i],
+- level -
+- 1,
+- (chunkOffset
+- <<
+- YAFFS_TNODES_INTERNAL_BITS)
+- + i,
+- limit);
+- }
+- if (allDone) {
+- yaffs_FreeTnode(dev,
+- tn->
+- internal[i]);
+- tn->internal[i] = NULL;
+- }
+- }
+- }
+- return (allDone) ? 1 : 0;
+- } else if (level == 0) {
+- int hitLimit = 0;
+-
+- for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
+- i--) {
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+- if (theChunk) {
+-
+- chunkInInode = (chunkOffset <<
+- YAFFS_TNODES_LEVEL0_BITS) + i;
+-
+- foundChunk =
+- yaffs_FindChunkInGroup(dev,
+- theChunk,
+- &tags,
+- in->objectId,
+- chunkInInode);
+-
+- if (foundChunk > 0) {
+- yaffs_DeleteChunk(dev,
+- foundChunk, 1,
+- __LINE__);
+- in->nDataChunks--;
+- if (limit) {
+- *limit = *limit - 1;
+- if (*limit <= 0)
+- hitLimit = 1;
+- }
+-
+- }
+-
+- yaffs_PutLevel0Tnode(dev, tn, i, 0);
+- }
+-
+- }
+- return (i < 0) ? 1 : 0;
+-
+- }
+-
+- }
+-
+- return 1;
+-
+-}
+-
+-static void yaffs_SoftDeleteChunk(yaffs_Device *dev, int chunk)
+-{
+- yaffs_BlockInfo *theBlock;
+-
+- T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
+-
+- theBlock = yaffs_GetBlockInfo(dev, chunk / dev->nChunksPerBlock);
+- if (theBlock) {
+- theBlock->softDeletions++;
+- dev->nFreeChunks++;
+- }
+-}
+-
+-/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
+- * All soft deleting does is increment the block's softdelete count and pulls the chunk out
+- * of the tnode.
+- * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+- */
+-
+-static int yaffs_SoftDeleteWorker(yaffs_Object *in, yaffs_Tnode *tn,
+- __u32 level, int chunkOffset)
+-{
+- int i;
+- int theChunk;
+- int allDone = 1;
+- yaffs_Device *dev = in->myDev;
+-
+- if (tn) {
+- if (level > 0) {
+-
+- for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
+- i--) {
+- if (tn->internal[i]) {
+- allDone =
+- yaffs_SoftDeleteWorker(in,
+- tn->
+- internal[i],
+- level - 1,
+- (chunkOffset
+- <<
+- YAFFS_TNODES_INTERNAL_BITS)
+- + i);
+- if (allDone) {
+- yaffs_FreeTnode(dev,
+- tn->
+- internal[i]);
+- tn->internal[i] = NULL;
+- } else {
+- /* Hoosterman... how could this happen? */
+- }
+- }
+- }
+- return (allDone) ? 1 : 0;
+- } else if (level == 0) {
+-
+- for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
+- if (theChunk) {
+- /* Note this does not find the real chunk, only the chunk group.
+- * We make an assumption that a chunk group is not larger than
+- * a block.
+- */
+- yaffs_SoftDeleteChunk(dev, theChunk);
+- yaffs_PutLevel0Tnode(dev, tn, i, 0);
+- }
+-
+- }
+- return 1;
+-
+- }
+-
+- }
+-
+- return 1;
+-
+-}
+-
+-static void yaffs_SoftDeleteFile(yaffs_Object *obj)
+-{
+- if (obj->deleted &&
+- obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) {
+- if (obj->nDataChunks <= 0) {
+- /* Empty file with no duplicate object headers, just delete it immediately */
+- yaffs_FreeTnode(obj->myDev,
+- obj->variant.fileVariant.top);
+- obj->variant.fileVariant.top = NULL;
+- T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: Deleting empty file %d" TENDSTR),
+- obj->objectId));
+- yaffs_DoGenericObjectDeletion(obj);
+- } else {
+- yaffs_SoftDeleteWorker(obj,
+- obj->variant.fileVariant.top,
+- obj->variant.fileVariant.
+- topLevel, 0);
+- obj->softDeleted = 1;
+- }
+- }
+-}
+-
+-/* Pruning removes any part of the file structure tree that is beyond the
+- * bounds of the file (ie that does not point to chunks).
+- *
+- * A file should only get pruned when its size is reduced.
+- *
+- * Before pruning, the chunks must be pulled from the tree and the
+- * level 0 tnode entries must be zeroed out.
+- * Could also use this for file deletion, but that's probably better handled
+- * by a special case.
+- */
+-
+-static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device *dev, yaffs_Tnode *tn,
+- __u32 level, int del0)
+-{
+- int i;
+- int hasData;
+-
+- if (tn) {
+- hasData = 0;
+-
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+- if (tn->internal[i] && level > 0) {
+- tn->internal[i] =
+- yaffs_PruneWorker(dev, tn->internal[i],
+- level - 1,
+- (i == 0) ? del0 : 1);
+- }
+-
+- if (tn->internal[i])
+- hasData++;
+- }
+-
+- if (hasData == 0 && del0) {
+- /* Free and return NULL */
+-
+- yaffs_FreeTnode(dev, tn);
+- tn = NULL;
+- }
+-
+- }
+-
+- return tn;
+-
+-}
+-
+-static int yaffs_PruneFileStructure(yaffs_Device *dev,
+- yaffs_FileStructure *fStruct)
+-{
+- int i;
+- int hasData;
+- int done = 0;
+- yaffs_Tnode *tn;
+-
+- if (fStruct->topLevel > 0) {
+- fStruct->top =
+- yaffs_PruneWorker(dev, fStruct->top, fStruct->topLevel, 0);
+-
+- /* Now we have a tree with all the non-zero branches NULL but the height
+- * is the same as it was.
+- * Let's see if we can trim internal tnodes to shorten the tree.
+- * We can do this if only the 0th element in the tnode is in use
+- * (ie all the non-zero are NULL)
+- */
+-
+- while (fStruct->topLevel && !done) {
+- tn = fStruct->top;
+-
+- hasData = 0;
+- for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+- if (tn->internal[i])
+- hasData++;
+- }
+-
+- if (!hasData) {
+- fStruct->top = tn->internal[0];
+- fStruct->topLevel--;
+- yaffs_FreeTnode(dev, tn);
+- } else {
+- done = 1;
+- }
+- }
+- }
+-
+- return YAFFS_OK;
+-}
+-
+-/*-------------------- End of File Structure functions.-------------------*/
+-
+-/* yaffs_CreateFreeObjects creates a bunch more objects and
+- * adds them to the object free list.
+- */
+-static int yaffs_CreateFreeObjects(yaffs_Device *dev, int nObjects)
+-{
+- int i;
+- yaffs_Object *newObjects;
+- yaffs_ObjectList *list;
+-
+- if (nObjects < 1)
+- return YAFFS_OK;
+-
+- /* make these things */
+- newObjects = YMALLOC(nObjects * sizeof(yaffs_Object));
+- list = YMALLOC(sizeof(yaffs_ObjectList));
+-
+- if (!newObjects || !list) {
+- if (newObjects)
+- YFREE(newObjects);
+- if (list)
+- YFREE(list);
+- T(YAFFS_TRACE_ALLOCATE,
+- (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
+- return YAFFS_FAIL;
+- }
+-
+- /* Hook them into the free list */
+- for (i = 0; i < nObjects - 1; i++) {
+- newObjects[i].siblings.next =
+- (struct ylist_head *)(&newObjects[i + 1]);
+- }
+-
+- newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects;
+- dev->freeObjects = newObjects;
+- dev->nFreeObjects += nObjects;
+- dev->nObjectsCreated += nObjects;
+-
+- /* Now add this bunch of Objects to a list for freeing up. */
+-
+- list->objects = newObjects;
+- list->next = dev->allocatedObjectList;
+- dev->allocatedObjectList = list;
+-
+- return YAFFS_OK;
+-}
+-
+-
+-/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+-static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device *dev)
+-{
+- yaffs_Object *tn = NULL;
+-
+-#ifdef VALGRIND_TEST
+- tn = YMALLOC(sizeof(yaffs_Object));
+-#else
+- /* If there are none left make more */
+- if (!dev->freeObjects)
+- yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS);
+-
+- if (dev->freeObjects) {
+- tn = dev->freeObjects;
+- dev->freeObjects =
+- (yaffs_Object *) (dev->freeObjects->siblings.next);
+- dev->nFreeObjects--;
+- }
+-#endif
+- if (tn) {
+- /* Now sweeten it up... */
+-
+- memset(tn, 0, sizeof(yaffs_Object));
+- tn->beingCreated = 1;
+-
+- tn->myDev = dev;
+- tn->hdrChunk = 0;
+- tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN;
+- YINIT_LIST_HEAD(&(tn->hardLinks));
+- YINIT_LIST_HEAD(&(tn->hashLink));
+- YINIT_LIST_HEAD(&tn->siblings);
+-
+-
+- /* Now make the directory sane */
+- if (dev->rootDir) {
+- tn->parent = dev->rootDir;
+- ylist_add(&(tn->siblings), &dev->rootDir->variant.directoryVariant.children);
+- }
+-
+- /* Add it to the lost and found directory.
+- * NB Can't put root or lostNFound in lostNFound so
+- * check if lostNFound exists first
+- */
+- if (dev->lostNFoundDir)
+- yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn);
+-
+- tn->beingCreated = 0;
+- }
+-
+- dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+-
+- return tn;
+-}
+-
+-static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device *dev, int number,
+- __u32 mode)
+-{
+-
+- yaffs_Object *obj =
+- yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+- if (obj) {
+- obj->fake = 1; /* it is fake so it might have no NAND presence... */
+- obj->renameAllowed = 0; /* ... and we're not allowed to rename it... */
+- obj->unlinkAllowed = 0; /* ... or unlink it */
+- obj->deleted = 0;
+- obj->unlinked = 0;
+- obj->yst_mode = mode;
+- obj->myDev = dev;
+- obj->hdrChunk = 0; /* Not a valid chunk. */
+- }
+-
+- return obj;
+-
+-}
+-
+-static void yaffs_UnhashObject(yaffs_Object *tn)
+-{
+- int bucket;
+- yaffs_Device *dev = tn->myDev;
+-
+- /* If it is still linked into the bucket list, free from the list */
+- if (!ylist_empty(&tn->hashLink)) {
+- ylist_del_init(&tn->hashLink);
+- bucket = yaffs_HashFunction(tn->objectId);
+- dev->objectBucket[bucket].count--;
+- }
+-}
+-
+-/* FreeObject frees up a Object and puts it back on the free list */
+-static void yaffs_FreeObject(yaffs_Object *tn)
+-{
+- yaffs_Device *dev = tn->myDev;
+-
+-#ifdef __KERNEL__
+- T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), tn, tn->myInode));
+-#endif
+-
+- if (tn->parent)
+- YBUG();
+- if (!ylist_empty(&tn->siblings))
+- YBUG();
+-
+-
+-#ifdef __KERNEL__
+- if (tn->myInode) {
+- /* We're still hooked up to a cached inode.
+- * Don't delete now, but mark for later deletion
+- */
+- tn->deferedFree = 1;
+- return;
+- }
+-#endif
+-
+- yaffs_UnhashObject(tn);
+-
+-#ifdef VALGRIND_TEST
+- YFREE(tn);
+-#else
+- /* Link into the free list. */
+- tn->siblings.next = (struct ylist_head *)(dev->freeObjects);
+- dev->freeObjects = tn;
+- dev->nFreeObjects++;
+-#endif
+- dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
+-}
+-
+-#ifdef __KERNEL__
+-
+-void yaffs_HandleDeferedFree(yaffs_Object *obj)
+-{
+- if (obj->deferedFree)
+- yaffs_FreeObject(obj);
+-}
+-
+-#endif
+-
+-static void yaffs_DeinitialiseObjects(yaffs_Device *dev)
+-{
+- /* Free the list of allocated Objects */
+-
+- yaffs_ObjectList *tmp;
+-
+- while (dev->allocatedObjectList) {
+- tmp = dev->allocatedObjectList->next;
+- YFREE(dev->allocatedObjectList->objects);
+- YFREE(dev->allocatedObjectList);
+-
+- dev->allocatedObjectList = tmp;
+- }
+-
+- dev->freeObjects = NULL;
+- dev->nFreeObjects = 0;
+-}
+-
+-static void yaffs_InitialiseObjects(yaffs_Device *dev)
+-{
+- int i;
+-
+- dev->allocatedObjectList = NULL;
+- dev->freeObjects = NULL;
+- dev->nFreeObjects = 0;
+-
+- for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+- YINIT_LIST_HEAD(&dev->objectBucket[i].list);
+- dev->objectBucket[i].count = 0;
+- }
+-}
+-
+-static int yaffs_FindNiceObjectBucket(yaffs_Device *dev)
+-{
+- static int x;
+- int i;
+- int l = 999;
+- int lowest = 999999;
+-
+- /* First let's see if we can find one that's empty. */
+-
+- for (i = 0; i < 10 && lowest > 0; i++) {
+- x++;
+- x %= YAFFS_NOBJECT_BUCKETS;
+- if (dev->objectBucket[x].count < lowest) {
+- lowest = dev->objectBucket[x].count;
+- l = x;
+- }
+-
+- }
+-
+- /* If we didn't find an empty list, then try
+- * looking a bit further for a short one
+- */
+-
+- for (i = 0; i < 10 && lowest > 3; i++) {
+- x++;
+- x %= YAFFS_NOBJECT_BUCKETS;
+- if (dev->objectBucket[x].count < lowest) {
+- lowest = dev->objectBucket[x].count;
+- l = x;
+- }
+-
+- }
+-
+- return l;
+-}
+-
+-static int yaffs_CreateNewObjectNumber(yaffs_Device *dev)
+-{
+- int bucket = yaffs_FindNiceObjectBucket(dev);
+-
+- /* Now find an object value that has not already been taken
+- * by scanning the list.
+- */
+-
+- int found = 0;
+- struct ylist_head *i;
+-
+- __u32 n = (__u32) bucket;
+-
+- /* yaffs_CheckObjectHashSanity(); */
+-
+- while (!found) {
+- found = 1;
+- n += YAFFS_NOBJECT_BUCKETS;
+- if (1 || dev->objectBucket[bucket].count > 0) {
+- ylist_for_each(i, &dev->objectBucket[bucket].list) {
+- /* If there is already one in the list */
+- if (i && ylist_entry(i, yaffs_Object,
+- hashLink)->objectId == n) {
+- found = 0;
+- }
+- }
+- }
+- }
+-
+- return n;
+-}
+-
+-static void yaffs_HashObject(yaffs_Object *in)
+-{
+- int bucket = yaffs_HashFunction(in->objectId);
+- yaffs_Device *dev = in->myDev;
+-
+- ylist_add(&in->hashLink, &dev->objectBucket[bucket].list);
+- dev->objectBucket[bucket].count++;
+-}
+-
+-yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number)
+-{
+- int bucket = yaffs_HashFunction(number);
+- struct ylist_head *i;
+- yaffs_Object *in;
+-
+- ylist_for_each(i, &dev->objectBucket[bucket].list) {
+- /* Look if it is in the list */
+- if (i) {
+- in = ylist_entry(i, yaffs_Object, hashLink);
+- if (in->objectId == number) {
+-#ifdef __KERNEL__
+- /* Don't tell the VFS about this one if it is defered free */
+- if (in->deferedFree)
+- return NULL;
+-#endif
+-
+- return in;
+- }
+- }
+- }
+-
+- return NULL;
+-}
+-
+-yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
+- yaffs_ObjectType type)
+-{
+- yaffs_Object *theObject;
+- yaffs_Tnode *tn = NULL;
+-
+- if (number < 0)
+- number = yaffs_CreateNewObjectNumber(dev);
+-
+- theObject = yaffs_AllocateEmptyObject(dev);
+- if (!theObject)
+- return NULL;
+-
+- if (type == YAFFS_OBJECT_TYPE_FILE) {
+- tn = yaffs_GetTnode(dev);
+- if (!tn) {
+- yaffs_FreeObject(theObject);
+- return NULL;
+- }
+- }
+-
+- if (theObject) {
+- theObject->fake = 0;
+- theObject->renameAllowed = 1;
+- theObject->unlinkAllowed = 1;
+- theObject->objectId = number;
+- yaffs_HashObject(theObject);
+- theObject->variantType = type;
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_WinFileTimeNow(theObject->win_atime);
+- theObject->win_ctime[0] = theObject->win_mtime[0] =
+- theObject->win_atime[0];
+- theObject->win_ctime[1] = theObject->win_mtime[1] =
+- theObject->win_atime[1];
+-
+-#else
+-
+- theObject->yst_atime = theObject->yst_mtime =
+- theObject->yst_ctime = Y_CURRENT_TIME;
+-#endif
+- switch (type) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- theObject->variant.fileVariant.fileSize = 0;
+- theObject->variant.fileVariant.scannedFileSize = 0;
+- theObject->variant.fileVariant.shrinkSize = 0xFFFFFFFF; /* max __u32 */
+- theObject->variant.fileVariant.topLevel = 0;
+- theObject->variant.fileVariant.top = tn;
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- YINIT_LIST_HEAD(&theObject->variant.directoryVariant.
+- children);
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- /* No action required */
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* todo this should not happen */
+- break;
+- }
+- }
+-
+- return theObject;
+-}
+-
+-static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device *dev,
+- int number,
+- yaffs_ObjectType type)
+-{
+- yaffs_Object *theObject = NULL;
+-
+- if (number > 0)
+- theObject = yaffs_FindObjectByNumber(dev, number);
+-
+- if (!theObject)
+- theObject = yaffs_CreateNewObject(dev, number, type);
+-
+- return theObject;
+-
+-}
+-
+-
+-static YCHAR *yaffs_CloneString(const YCHAR *str)
+-{
+- YCHAR *newStr = NULL;
+-
+- if (str && *str) {
+- newStr = YMALLOC((yaffs_strlen(str) + 1) * sizeof(YCHAR));
+- if (newStr)
+- yaffs_strcpy(newStr, str);
+- }
+-
+- return newStr;
+-
+-}
+-
+-/*
+- * Mknod (create) a new object.
+- * equivalentObject only has meaning for a hard link;
+- * aliasString only has meaning for a sumlink.
+- * rdev only has meaning for devices (a subset of special objects)
+- */
+-
+-static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type,
+- yaffs_Object *parent,
+- const YCHAR *name,
+- __u32 mode,
+- __u32 uid,
+- __u32 gid,
+- yaffs_Object *equivalentObject,
+- const YCHAR *aliasString, __u32 rdev)
+-{
+- yaffs_Object *in;
+- YCHAR *str = NULL;
+-
+- yaffs_Device *dev = parent->myDev;
+-
+- /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
+- if (yaffs_FindObjectByName(parent, name))
+- return NULL;
+-
+- in = yaffs_CreateNewObject(dev, -1, type);
+-
+- if (!in)
+- return YAFFS_FAIL;
+-
+- if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+- str = yaffs_CloneString(aliasString);
+- if (!str) {
+- yaffs_FreeObject(in);
+- return NULL;
+- }
+- }
+-
+-
+-
+- if (in) {
+- in->hdrChunk = 0;
+- in->valid = 1;
+- in->variantType = type;
+-
+- in->yst_mode = mode;
+-
+-#ifdef CONFIG_YAFFS_WINCE
+- yfsd_WinFileTimeNow(in->win_atime);
+- in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0];
+- in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1];
+-
+-#else
+- in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME;
+-
+- in->yst_rdev = rdev;
+- in->yst_uid = uid;
+- in->yst_gid = gid;
+-#endif
+- in->nDataChunks = 0;
+-
+- yaffs_SetObjectName(in, name);
+- in->dirty = 1;
+-
+- yaffs_AddObjectToDirectory(parent, in);
+-
+- in->myDev = parent->myDev;
+-
+- switch (type) {
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- in->variant.symLinkVariant.alias = str;
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- in->variant.hardLinkVariant.equivalentObject =
+- equivalentObject;
+- in->variant.hardLinkVariant.equivalentObjectId =
+- equivalentObject->objectId;
+- ylist_add(&in->hardLinks, &equivalentObject->hardLinks);
+- break;
+- case YAFFS_OBJECT_TYPE_FILE:
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* do nothing */
+- break;
+- }
+-
+- if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) {
+- /* Could not create the object header, fail the creation */
+- yaffs_DeleteObject(in);
+- in = NULL;
+- }
+-
+- }
+-
+- return in;
+-}
+-
+-yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
+- __u32 mode, __u32 uid, __u32 gid)
+-{
+- return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+- uid, gid, NULL, NULL, 0);
+-}
+-
+-yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
+- __u32 mode, __u32 uid, __u32 gid)
+-{
+- return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+- mode, uid, gid, NULL, NULL, 0);
+-}
+-
+-yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
+- __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
+-{
+- return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+- uid, gid, NULL, NULL, rdev);
+-}
+-
+-yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
+- __u32 mode, __u32 uid, __u32 gid,
+- const YCHAR *alias)
+-{
+- return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+- uid, gid, NULL, alias, 0);
+-}
+-
+-/* yaffs_Link returns the object id of the equivalent object.*/
+-yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
+- yaffs_Object *equivalentObject)
+-{
+- /* Get the real object in case we were fed a hard link as an equivalent object */
+- equivalentObject = yaffs_GetEquivalentObject(equivalentObject);
+-
+- if (yaffs_MknodObject
+- (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
+- equivalentObject, NULL, 0)) {
+- return equivalentObject;
+- } else {
+- return NULL;
+- }
+-
+-}
+-
+-static int yaffs_ChangeObjectName(yaffs_Object *obj, yaffs_Object *newDir,
+- const YCHAR *newName, int force, int shadows)
+-{
+- int unlinkOp;
+- int deleteOp;
+-
+- yaffs_Object *existingTarget;
+-
+- if (newDir == NULL)
+- newDir = obj->parent; /* use the old directory */
+-
+- if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR
+- ("tragedy: yaffs_ChangeObjectName: newDir is not a directory"
+- TENDSTR)));
+- YBUG();
+- }
+-
+- /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+- if (obj->myDev->isYaffs2)
+- unlinkOp = (newDir == obj->myDev->unlinkedDir);
+- else
+- unlinkOp = (newDir == obj->myDev->unlinkedDir
+- && obj->variantType == YAFFS_OBJECT_TYPE_FILE);
+-
+- deleteOp = (newDir == obj->myDev->deletedDir);
+-
+- existingTarget = yaffs_FindObjectByName(newDir, newName);
+-
+- /* If the object is a file going into the unlinked directory,
+- * then it is OK to just stuff it in since duplicate names are allowed.
+- * else only proceed if the new name does not exist and if we're putting
+- * it into a directory.
+- */
+- if ((unlinkOp ||
+- deleteOp ||
+- force ||
+- (shadows > 0) ||
+- !existingTarget) &&
+- newDir->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) {
+- yaffs_SetObjectName(obj, newName);
+- obj->dirty = 1;
+-
+- yaffs_AddObjectToDirectory(newDir, obj);
+-
+- if (unlinkOp)
+- obj->unlinked = 1;
+-
+- /* If it is a deletion then we mark it as a shrink for gc purposes. */
+- if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows) >= 0)
+- return YAFFS_OK;
+- }
+-
+- return YAFFS_FAIL;
+-}
+-
+-int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
+- yaffs_Object *newDir, const YCHAR *newName)
+-{
+- yaffs_Object *obj = NULL;
+- yaffs_Object *existingTarget = NULL;
+- int force = 0;
+-
+-
+- if (!oldDir || oldDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
+- YBUG();
+- if (!newDir || newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
+- YBUG();
+-
+-#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+- /* Special case for case insemsitive systems (eg. WinCE).
+- * While look-up is case insensitive, the name isn't.
+- * Therefore we might want to change x.txt to X.txt
+- */
+- if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0)
+- force = 1;
+-#endif
+-
+- else if (yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
+- /* ENAMETOOLONG */
+- return YAFFS_FAIL;
+-
+- obj = yaffs_FindObjectByName(oldDir, oldName);
+-
+- if (obj && obj->renameAllowed) {
+-
+- /* Now do the handling for an existing target, if there is one */
+-
+- existingTarget = yaffs_FindObjectByName(newDir, newName);
+- if (existingTarget &&
+- existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
+- !ylist_empty(&existingTarget->variant.directoryVariant.children)) {
+- /* There is a target that is a non-empty directory, so we fail */
+- return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */
+- } else if (existingTarget && existingTarget != obj) {
+- /* Nuke the target first, using shadowing,
+- * but only if it isn't the same object
+- */
+- yaffs_ChangeObjectName(obj, newDir, newName, force,
+- existingTarget->objectId);
+- yaffs_UnlinkObject(existingTarget);
+- }
+-
+- return yaffs_ChangeObjectName(obj, newDir, newName, 1, 0);
+- }
+- return YAFFS_FAIL;
+-}
+-
+-/*------------------------- Block Management and Page Allocation ----------------*/
+-
+-static int yaffs_InitialiseBlocks(yaffs_Device *dev)
+-{
+- int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+-
+- dev->blockInfo = NULL;
+- dev->chunkBits = NULL;
+-
+- dev->allocationBlock = -1; /* force it to get a new one */
+-
+- /* If the first allocation strategy fails, thry the alternate one */
+- dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo));
+- if (!dev->blockInfo) {
+- dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo));
+- dev->blockInfoAlt = 1;
+- } else
+- dev->blockInfoAlt = 0;
+-
+- if (dev->blockInfo) {
+- /* Set up dynamic blockinfo stuff. */
+- dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
+- dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
+- if (!dev->chunkBits) {
+- dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
+- dev->chunkBitsAlt = 1;
+- } else
+- dev->chunkBitsAlt = 0;
+- }
+-
+- if (dev->blockInfo && dev->chunkBits) {
+- memset(dev->blockInfo, 0, nBlocks * sizeof(yaffs_BlockInfo));
+- memset(dev->chunkBits, 0, dev->chunkBitmapStride * nBlocks);
+- return YAFFS_OK;
+- }
+-
+- return YAFFS_FAIL;
+-}
+-
+-static void yaffs_DeinitialiseBlocks(yaffs_Device *dev)
+-{
+- if (dev->blockInfoAlt && dev->blockInfo)
+- YFREE_ALT(dev->blockInfo);
+- else if (dev->blockInfo)
+- YFREE(dev->blockInfo);
+-
+- dev->blockInfoAlt = 0;
+-
+- dev->blockInfo = NULL;
+-
+- if (dev->chunkBitsAlt && dev->chunkBits)
+- YFREE_ALT(dev->chunkBits);
+- else if (dev->chunkBits)
+- YFREE(dev->chunkBits);
+- dev->chunkBitsAlt = 0;
+- dev->chunkBits = NULL;
+-}
+-
+-static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device *dev,
+- yaffs_BlockInfo *bi)
+-{
+- int i;
+- __u32 seq;
+- yaffs_BlockInfo *b;
+-
+- if (!dev->isYaffs2)
+- return 1; /* disqualification only applies to yaffs2. */
+-
+- if (!bi->hasShrinkHeader)
+- return 1; /* can gc */
+-
+- /* Find the oldest dirty sequence number if we don't know it and save it
+- * so we don't have to keep recomputing it.
+- */
+- if (!dev->oldestDirtySequence) {
+- seq = dev->sequenceNumber;
+-
+- for (i = dev->internalStartBlock; i <= dev->internalEndBlock;
+- i++) {
+- b = yaffs_GetBlockInfo(dev, i);
+- if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
+- (b->pagesInUse - b->softDeletions) <
+- dev->nChunksPerBlock && b->sequenceNumber < seq) {
+- seq = b->sequenceNumber;
+- }
+- }
+- dev->oldestDirtySequence = seq;
+- }
+-
+- /* Can't do gc of this block if there are any blocks older than this one that have
+- * discarded pages.
+- */
+- return (bi->sequenceNumber <= dev->oldestDirtySequence);
+-}
+-
+-/* FindDiretiestBlock is used to select the dirtiest block (or close enough)
+- * for garbage collection.
+- */
+-
+-static int yaffs_FindBlockForGarbageCollection(yaffs_Device *dev,
+- int aggressive)
+-{
+- int b = dev->currentDirtyChecker;
+-
+- int i;
+- int iterations;
+- int dirtiest = -1;
+- int pagesInUse = 0;
+- int prioritised = 0;
+- yaffs_BlockInfo *bi;
+- int pendingPrioritisedExist = 0;
+-
+- /* First let's see if we need to grab a prioritised block */
+- if (dev->hasPendingPrioritisedGCs) {
+- for (i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++) {
+-
+- bi = yaffs_GetBlockInfo(dev, i);
+- /* yaffs_VerifyBlock(dev,bi,i); */
+-
+- if (bi->gcPrioritise) {
+- pendingPrioritisedExist = 1;
+- if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+- yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+- pagesInUse = (bi->pagesInUse - bi->softDeletions);
+- dirtiest = i;
+- prioritised = 1;
+- aggressive = 1; /* Fool the non-aggressive skip logiv below */
+- }
+- }
+- }
+-
+- if (!pendingPrioritisedExist) /* None found, so we can clear this */
+- dev->hasPendingPrioritisedGCs = 0;
+- }
+-
+- /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
+- * search harder.
+- * else (we're doing a leasurely gc), then we only bother to do this if the
+- * block has only a few pages in use.
+- */
+-
+- dev->nonAggressiveSkip--;
+-
+- if (!aggressive && (dev->nonAggressiveSkip > 0))
+- return -1;
+-
+- if (!prioritised)
+- pagesInUse =
+- (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
+-
+- if (aggressive)
+- iterations =
+- dev->internalEndBlock - dev->internalStartBlock + 1;
+- else {
+- iterations =
+- dev->internalEndBlock - dev->internalStartBlock + 1;
+- iterations = iterations / 16;
+- if (iterations > 200)
+- iterations = 200;
+- }
+-
+- for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) {
+- b++;
+- if (b < dev->internalStartBlock || b > dev->internalEndBlock)
+- b = dev->internalStartBlock;
+-
+- if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("**>> Block %d is not valid" TENDSTR), b));
+- YBUG();
+- }
+-
+- bi = yaffs_GetBlockInfo(dev, b);
+-
+- if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+- (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
+- yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
+- dirtiest = b;
+- pagesInUse = (bi->pagesInUse - bi->softDeletions);
+- }
+- }
+-
+- dev->currentDirtyChecker = b;
+-
+- if (dirtiest > 0) {
+- T(YAFFS_TRACE_GC,
+- (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
+- dev->nChunksPerBlock - pagesInUse, prioritised));
+- }
+-
+- dev->oldestDirtySequence = 0;
+-
+- if (dirtiest > 0)
+- dev->nonAggressiveSkip = 4;
+-
+- return dirtiest;
+-}
+-
+-static void yaffs_BlockBecameDirty(yaffs_Device *dev, int blockNo)
+-{
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo);
+-
+- int erasedOk = 0;
+-
+- /* If the block is still healthy erase it and mark as clean.
+- * If the block has had a data failure, then retire it.
+- */
+-
+- T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+- (TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR),
+- blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : ""));
+-
+- bi->blockState = YAFFS_BLOCK_STATE_DIRTY;
+-
+- if (!bi->needsRetiring) {
+- yaffs_InvalidateCheckpoint(dev);
+- erasedOk = yaffs_EraseBlockInNAND(dev, blockNo);
+- if (!erasedOk) {
+- dev->nErasureFailures++;
+- T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("**>> Erasure failed %d" TENDSTR), blockNo));
+- }
+- }
+-
+- if (erasedOk &&
+- ((yaffs_traceMask & YAFFS_TRACE_ERASE) || !yaffs_SkipVerification(dev))) {
+- int i;
+- for (i = 0; i < dev->nChunksPerBlock; i++) {
+- if (!yaffs_CheckChunkErased
+- (dev, blockNo * dev->nChunksPerBlock + i)) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- (">>Block %d erasure supposedly OK, but chunk %d not erased"
+- TENDSTR), blockNo, i));
+- }
+- }
+- }
+-
+- if (erasedOk) {
+- /* Clean it up... */
+- bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
+- dev->nErasedBlocks++;
+- bi->pagesInUse = 0;
+- bi->softDeletions = 0;
+- bi->hasShrinkHeader = 0;
+- bi->skipErasedCheck = 1; /* This is clean, so no need to check */
+- bi->gcPrioritise = 0;
+- yaffs_ClearChunkBits(dev, blockNo);
+-
+- T(YAFFS_TRACE_ERASE,
+- (TSTR("Erased block %d" TENDSTR), blockNo));
+- } else {
+- dev->nFreeChunks -= dev->nChunksPerBlock; /* We lost a block of free space */
+-
+- yaffs_RetireBlock(dev, blockNo);
+- T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("**>> Block %d retired" TENDSTR), blockNo));
+- }
+-}
+-
+-static int yaffs_FindBlockForAllocation(yaffs_Device *dev)
+-{
+- int i;
+-
+- yaffs_BlockInfo *bi;
+-
+- if (dev->nErasedBlocks < 1) {
+- /* Hoosterman we've got a problem.
+- * Can't get space to gc
+- */
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs tragedy: no more erased blocks" TENDSTR)));
+-
+- return -1;
+- }
+-
+- /* Find an empty block. */
+-
+- for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
+- dev->allocationBlockFinder++;
+- if (dev->allocationBlockFinder < dev->internalStartBlock
+- || dev->allocationBlockFinder > dev->internalEndBlock) {
+- dev->allocationBlockFinder = dev->internalStartBlock;
+- }
+-
+- bi = yaffs_GetBlockInfo(dev, dev->allocationBlockFinder);
+-
+- if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
+- bi->blockState = YAFFS_BLOCK_STATE_ALLOCATING;
+- dev->sequenceNumber++;
+- bi->sequenceNumber = dev->sequenceNumber;
+- dev->nErasedBlocks--;
+- T(YAFFS_TRACE_ALLOCATE,
+- (TSTR("Allocated block %d, seq %d, %d left" TENDSTR),
+- dev->allocationBlockFinder, dev->sequenceNumber,
+- dev->nErasedBlocks));
+- return dev->allocationBlockFinder;
+- }
+- }
+-
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR
+- ("yaffs tragedy: no more erased blocks, but there should have been %d"
+- TENDSTR), dev->nErasedBlocks));
+-
+- return -1;
+-}
+-
+-
+-
+-static int yaffs_CalcCheckpointBlocksRequired(yaffs_Device *dev)
+-{
+- if (!dev->nCheckpointBlocksRequired &&
+- dev->isYaffs2) {
+- /* Not a valid value so recalculate */
+- int nBytes = 0;
+- int nBlocks;
+- int devBlocks = (dev->endBlock - dev->startBlock + 1);
+- int tnodeSize;
+-
+- tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+-
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
+-
+- nBytes += sizeof(yaffs_CheckpointValidity);
+- nBytes += sizeof(yaffs_CheckpointDevice);
+- nBytes += devBlocks * sizeof(yaffs_BlockInfo);
+- nBytes += devBlocks * dev->chunkBitmapStride;
+- nBytes += (sizeof(yaffs_CheckpointObject) + sizeof(__u32)) * (dev->nObjectsCreated - dev->nFreeObjects);
+- nBytes += (tnodeSize + sizeof(__u32)) * (dev->nTnodesCreated - dev->nFreeTnodes);
+- nBytes += sizeof(yaffs_CheckpointValidity);
+- nBytes += sizeof(__u32); /* checksum*/
+-
+- /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
+-
+- nBlocks = (nBytes/(dev->nDataBytesPerChunk * dev->nChunksPerBlock)) + 3;
+-
+- dev->nCheckpointBlocksRequired = nBlocks;
+- }
+-
+- return dev->nCheckpointBlocksRequired;
+-}
+-
+-/*
+- * Check if there's space to allocate...
+- * Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
+- */
+-static int yaffs_CheckSpaceForAllocation(yaffs_Device *dev)
+-{
+- int reservedChunks;
+- int reservedBlocks = dev->nReservedBlocks;
+- int checkpointBlocks;
+-
+- if (dev->isYaffs2) {
+- checkpointBlocks = yaffs_CalcCheckpointBlocksRequired(dev) -
+- dev->blocksInCheckpoint;
+- if (checkpointBlocks < 0)
+- checkpointBlocks = 0;
+- } else {
+- checkpointBlocks = 0;
+- }
+-
+- reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock);
+-
+- return (dev->nFreeChunks > reservedChunks);
+-}
+-
+-static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
+- yaffs_BlockInfo **blockUsedPtr)
+-{
+- int retVal;
+- yaffs_BlockInfo *bi;
+-
+- if (dev->allocationBlock < 0) {
+- /* Get next block to allocate off */
+- dev->allocationBlock = yaffs_FindBlockForAllocation(dev);
+- dev->allocationPage = 0;
+- }
+-
+- if (!useReserve && !yaffs_CheckSpaceForAllocation(dev)) {
+- /* Not enough space to allocate unless we're allowed to use the reserve. */
+- return -1;
+- }
+-
+- if (dev->nErasedBlocks < dev->nReservedBlocks
+- && dev->allocationPage == 0) {
+- T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
+- }
+-
+- /* Next page please.... */
+- if (dev->allocationBlock >= 0) {
+- bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
+-
+- retVal = (dev->allocationBlock * dev->nChunksPerBlock) +
+- dev->allocationPage;
+- bi->pagesInUse++;
+- yaffs_SetChunkBit(dev, dev->allocationBlock,
+- dev->allocationPage);
+-
+- dev->allocationPage++;
+-
+- dev->nFreeChunks--;
+-
+- /* If the block is full set the state to full */
+- if (dev->allocationPage >= dev->nChunksPerBlock) {
+- bi->blockState = YAFFS_BLOCK_STATE_FULL;
+- dev->allocationBlock = -1;
+- }
+-
+- if (blockUsedPtr)
+- *blockUsedPtr = bi;
+-
+- return retVal;
+- }
+-
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
+-
+- return -1;
+-}
+-
+-static int yaffs_GetErasedChunks(yaffs_Device *dev)
+-{
+- int n;
+-
+- n = dev->nErasedBlocks * dev->nChunksPerBlock;
+-
+- if (dev->allocationBlock > 0)
+- n += (dev->nChunksPerBlock - dev->allocationPage);
+-
+- return n;
+-
+-}
+-
+-static int yaffs_GarbageCollectBlock(yaffs_Device *dev, int block,
+- int wholeBlock)
+-{
+- int oldChunk;
+- int newChunk;
+- int markNAND;
+- int retVal = YAFFS_OK;
+- int cleanups = 0;
+- int i;
+- int isCheckpointBlock;
+- int matchingChunk;
+- int maxCopies;
+-
+- int chunksBefore = yaffs_GetErasedChunks(dev);
+- int chunksAfter;
+-
+- yaffs_ExtendedTags tags;
+-
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, block);
+-
+- yaffs_Object *object;
+-
+- isCheckpointBlock = (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT);
+-
+- bi->blockState = YAFFS_BLOCK_STATE_COLLECTING;
+-
+- T(YAFFS_TRACE_TRACING,
+- (TSTR("Collecting block %d, in use %d, shrink %d, wholeBlock %d" TENDSTR),
+- block,
+- bi->pagesInUse,
+- bi->hasShrinkHeader,
+- wholeBlock));
+-
+- /*yaffs_VerifyFreeChunks(dev); */
+-
+- bi->hasShrinkHeader = 0; /* clear the flag so that the block can erase */
+-
+- /* Take off the number of soft deleted entries because
+- * they're going to get really deleted during GC.
+- */
+- dev->nFreeChunks -= bi->softDeletions;
+-
+- dev->isDoingGC = 1;
+-
+- if (isCheckpointBlock ||
+- !yaffs_StillSomeChunkBits(dev, block)) {
+- T(YAFFS_TRACE_TRACING,
+- (TSTR
+- ("Collecting block %d that has no chunks in use" TENDSTR),
+- block));
+- yaffs_BlockBecameDirty(dev, block);
+- } else {
+-
+- __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
+-
+- yaffs_VerifyBlock(dev, bi, block);
+-
+- maxCopies = (wholeBlock) ? dev->nChunksPerBlock : 10;
+- oldChunk = block * dev->nChunksPerBlock + dev->gcChunk;
+-
+- for (/* init already done */;
+- retVal == YAFFS_OK &&
+- dev->gcChunk < dev->nChunksPerBlock &&
+- (bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) &&
+- maxCopies > 0;
+- dev->gcChunk++, oldChunk++) {
+- if (yaffs_CheckChunkBit(dev, block, dev->gcChunk)) {
+-
+- /* This page is in use and might need to be copied off */
+-
+- maxCopies--;
+-
+- markNAND = 1;
+-
+- yaffs_InitialiseTags(&tags);
+-
+- yaffs_ReadChunkWithTagsFromNAND(dev, oldChunk,
+- buffer, &tags);
+-
+- object =
+- yaffs_FindObjectByNumber(dev,
+- tags.objectId);
+-
+- T(YAFFS_TRACE_GC_DETAIL,
+- (TSTR
+- ("Collecting chunk in block %d, %d %d %d " TENDSTR),
+- dev->gcChunk, tags.objectId, tags.chunkId,
+- tags.byteCount));
+-
+- if (object && !yaffs_SkipVerification(dev)) {
+- if (tags.chunkId == 0)
+- matchingChunk = object->hdrChunk;
+- else if (object->softDeleted)
+- matchingChunk = oldChunk; /* Defeat the test */
+- else
+- matchingChunk = yaffs_FindChunkInFile(object, tags.chunkId, NULL);
+-
+- if (oldChunk != matchingChunk)
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
+- oldChunk, matchingChunk, tags.objectId, tags.chunkId));
+-
+- }
+-
+- if (!object) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("page %d in gc has no object: %d %d %d "
+- TENDSTR), oldChunk,
+- tags.objectId, tags.chunkId, tags.byteCount));
+- }
+-
+- if (object &&
+- object->deleted &&
+- object->softDeleted &&
+- tags.chunkId != 0) {
+- /* Data chunk in a soft deleted file, throw it away
+- * It's a soft deleted data chunk,
+- * No need to copy this, just forget about it and
+- * fix up the object.
+- */
+-
+- object->nDataChunks--;
+-
+- if (object->nDataChunks <= 0) {
+- /* remeber to clean up the object */
+- dev->gcCleanupList[cleanups] =
+- tags.objectId;
+- cleanups++;
+- }
+- markNAND = 0;
+- } else if (0) {
+- /* Todo object && object->deleted && object->nDataChunks == 0 */
+- /* Deleted object header with no data chunks.
+- * Can be discarded and the file deleted.
+- */
+- object->hdrChunk = 0;
+- yaffs_FreeTnode(object->myDev,
+- object->variant.
+- fileVariant.top);
+- object->variant.fileVariant.top = NULL;
+- yaffs_DoGenericObjectDeletion(object);
+-
+- } else if (object) {
+- /* It's either a data chunk in a live file or
+- * an ObjectHeader, so we're interested in it.
+- * NB Need to keep the ObjectHeaders of deleted files
+- * until the whole file has been deleted off
+- */
+- tags.serialNumber++;
+-
+- dev->nGCCopies++;
+-
+- if (tags.chunkId == 0) {
+- /* It is an object Id,
+- * We need to nuke the shrinkheader flags first
+- * We no longer want the shrinkHeader flag since its work is done
+- * and if it is left in place it will mess up scanning.
+- */
+-
+- yaffs_ObjectHeader *oh;
+- oh = (yaffs_ObjectHeader *)buffer;
+- oh->isShrink = 0;
+- tags.extraIsShrinkHeader = 0;
+-
+- yaffs_VerifyObjectHeader(object, oh, &tags, 1);
+- }
+-
+- newChunk =
+- yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1);
+-
+- if (newChunk < 0) {
+- retVal = YAFFS_FAIL;
+- } else {
+-
+- /* Ok, now fix up the Tnodes etc. */
+-
+- if (tags.chunkId == 0) {
+- /* It's a header */
+- object->hdrChunk = newChunk;
+- object->serial = tags.serialNumber;
+- } else {
+- /* It's a data chunk */
+- yaffs_PutChunkIntoFile
+- (object,
+- tags.chunkId,
+- newChunk, 0);
+- }
+- }
+- }
+-
+- if (retVal == YAFFS_OK)
+- yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
+-
+- }
+- }
+-
+- yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+-
+-
+- /* Do any required cleanups */
+- for (i = 0; i < cleanups; i++) {
+- /* Time to delete the file too */
+- object =
+- yaffs_FindObjectByNumber(dev,
+- dev->gcCleanupList[i]);
+- if (object) {
+- yaffs_FreeTnode(dev,
+- object->variant.fileVariant.
+- top);
+- object->variant.fileVariant.top = NULL;
+- T(YAFFS_TRACE_GC,
+- (TSTR
+- ("yaffs: About to finally delete object %d"
+- TENDSTR), object->objectId));
+- yaffs_DoGenericObjectDeletion(object);
+- object->myDev->nDeletedFiles--;
+- }
+-
+- }
+-
+- }
+-
+- yaffs_VerifyCollectedBlock(dev, bi, block);
+-
+- chunksAfter = yaffs_GetErasedChunks(dev);
+- if (chunksBefore >= chunksAfter) {
+- T(YAFFS_TRACE_GC,
+- (TSTR
+- ("gc did not increase free chunks before %d after %d"
+- TENDSTR), chunksBefore, chunksAfter));
+- }
+-
+- /* If the gc completed then clear the current gcBlock so that we find another. */
+- if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING) {
+- dev->gcBlock = -1;
+- dev->gcChunk = 0;
+- }
+-
+- dev->isDoingGC = 0;
+-
+- return retVal;
+-}
+-
+-/* New garbage collector
+- * If we're very low on erased blocks then we do aggressive garbage collection
+- * otherwise we do "leasurely" garbage collection.
+- * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+- * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+- *
+- * The idea is to help clear out space in a more spread-out manner.
+- * Dunno if it really does anything useful.
+- */
+-static int yaffs_CheckGarbageCollection(yaffs_Device *dev)
+-{
+- int block;
+- int aggressive;
+- int gcOk = YAFFS_OK;
+- int maxTries = 0;
+-
+- int checkpointBlockAdjust;
+-
+- if (dev->isDoingGC) {
+- /* Bail out so we don't get recursive gc */
+- return YAFFS_OK;
+- }
+-
+- /* This loop should pass the first time.
+- * We'll only see looping here if the erase of the collected block fails.
+- */
+-
+- do {
+- maxTries++;
+-
+- checkpointBlockAdjust = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
+- if (checkpointBlockAdjust < 0)
+- checkpointBlockAdjust = 0;
+-
+- if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) {
+- /* We need a block soon...*/
+- aggressive = 1;
+- } else {
+- /* We're in no hurry */
+- aggressive = 0;
+- }
+-
+- if (dev->gcBlock <= 0) {
+- dev->gcBlock = yaffs_FindBlockForGarbageCollection(dev, aggressive);
+- dev->gcChunk = 0;
+- }
+-
+- block = dev->gcBlock;
+-
+- if (block > 0) {
+- dev->garbageCollections++;
+- if (!aggressive)
+- dev->passiveGarbageCollections++;
+-
+- T(YAFFS_TRACE_GC,
+- (TSTR
+- ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
+- dev->nErasedBlocks, aggressive));
+-
+- gcOk = yaffs_GarbageCollectBlock(dev, block, aggressive);
+- }
+-
+- if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) {
+- T(YAFFS_TRACE_GC,
+- (TSTR
+- ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
+- TENDSTR), dev->nErasedBlocks, maxTries, block));
+- }
+- } while ((dev->nErasedBlocks < dev->nReservedBlocks) &&
+- (block > 0) &&
+- (maxTries < 2));
+-
+- return aggressive ? gcOk : YAFFS_OK;
+-}
+-
+-/*------------------------- TAGS --------------------------------*/
+-
+-static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
+- int chunkInObject)
+-{
+- return (tags->chunkId == chunkInObject &&
+- tags->objectId == objectId && !tags->chunkDeleted) ? 1 : 0;
+-
+-}
+-
+-
+-/*-------------------- Data file manipulation -----------------*/
+-
+-static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
+- yaffs_ExtendedTags *tags)
+-{
+- /*Get the Tnode, then get the level 0 offset chunk offset */
+- yaffs_Tnode *tn;
+- int theChunk = -1;
+- yaffs_ExtendedTags localTags;
+- int retVal = -1;
+-
+- yaffs_Device *dev = in->myDev;
+-
+- if (!tags) {
+- /* Passed a NULL, so use our own tags space */
+- tags = &localTags;
+- }
+-
+- tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
+-
+- if (tn) {
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+-
+- retVal =
+- yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+- chunkInInode);
+- }
+- return retVal;
+-}
+-
+-static int yaffs_FindAndDeleteChunkInFile(yaffs_Object *in, int chunkInInode,
+- yaffs_ExtendedTags *tags)
+-{
+- /* Get the Tnode, then get the level 0 offset chunk offset */
+- yaffs_Tnode *tn;
+- int theChunk = -1;
+- yaffs_ExtendedTags localTags;
+-
+- yaffs_Device *dev = in->myDev;
+- int retVal = -1;
+-
+- if (!tags) {
+- /* Passed a NULL, so use our own tags space */
+- tags = &localTags;
+- }
+-
+- tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
+-
+- if (tn) {
+-
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+-
+- retVal =
+- yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
+- chunkInInode);
+-
+- /* Delete the entry in the filestructure (if found) */
+- if (retVal != -1)
+- yaffs_PutLevel0Tnode(dev, tn, chunkInInode, 0);
+- }
+-
+- return retVal;
+-}
+-
+-#ifdef YAFFS_PARANOID
+-
+-static int yaffs_CheckFileSanity(yaffs_Object *in)
+-{
+- int chunk;
+- int nChunks;
+- int fSize;
+- int failed = 0;
+- int objId;
+- yaffs_Tnode *tn;
+- yaffs_Tags localTags;
+- yaffs_Tags *tags = &localTags;
+- int theChunk;
+- int chunkDeleted;
+-
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
+- return YAFFS_FAIL;
+-
+- objId = in->objectId;
+- fSize = in->variant.fileVariant.fileSize;
+- nChunks =
+- (fSize + in->myDev->nDataBytesPerChunk - 1) / in->myDev->nDataBytesPerChunk;
+-
+- for (chunk = 1; chunk <= nChunks; chunk++) {
+- tn = yaffs_FindLevel0Tnode(in->myDev, &in->variant.fileVariant,
+- chunk);
+-
+- if (tn) {
+-
+- theChunk = yaffs_GetChunkGroupBase(dev, tn, chunk);
+-
+- if (yaffs_CheckChunkBits
+- (dev, theChunk / dev->nChunksPerBlock,
+- theChunk % dev->nChunksPerBlock)) {
+-
+- yaffs_ReadChunkTagsFromNAND(in->myDev, theChunk,
+- tags,
+- &chunkDeleted);
+- if (yaffs_TagsMatch
+- (tags, in->objectId, chunk, chunkDeleted)) {
+- /* found it; */
+-
+- }
+- } else {
+-
+- failed = 1;
+- }
+-
+- } else {
+- /* T(("No level 0 found for %d\n", chunk)); */
+- }
+- }
+-
+- return failed ? YAFFS_FAIL : YAFFS_OK;
+-}
+-
+-#endif
+-
+-static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
+- int chunkInNAND, int inScan)
+-{
+- /* NB inScan is zero unless scanning.
+- * For forward scanning, inScan is > 0;
+- * for backward scanning inScan is < 0
+- */
+-
+- yaffs_Tnode *tn;
+- yaffs_Device *dev = in->myDev;
+- int existingChunk;
+- yaffs_ExtendedTags existingTags;
+- yaffs_ExtendedTags newTags;
+- unsigned existingSerial, newSerial;
+-
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
+- /* Just ignore an attempt at putting a chunk into a non-file during scanning
+- * If it is not during Scanning then something went wrong!
+- */
+- if (!inScan) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy:attempt to put data chunk into a non-file"
+- TENDSTR)));
+- YBUG();
+- }
+-
+- yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
+- return YAFFS_OK;
+- }
+-
+- tn = yaffs_AddOrFindLevel0Tnode(dev,
+- &in->variant.fileVariant,
+- chunkInInode,
+- NULL);
+- if (!tn)
+- return YAFFS_FAIL;
+-
+- existingChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
+-
+- if (inScan != 0) {
+- /* If we're scanning then we need to test for duplicates
+- * NB This does not need to be efficient since it should only ever
+- * happen when the power fails during a write, then only one
+- * chunk should ever be affected.
+- *
+- * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
+- * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+- */
+-
+- if (existingChunk > 0) {
+- /* NB Right now existing chunk will not be real chunkId if the device >= 32MB
+- * thus we have to do a FindChunkInFile to get the real chunk id.
+- *
+- * We have a duplicate now we need to decide which one to use:
+- *
+- * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
+- * Forward scanning YAFFS2: The new one is what we use, dump the old one.
+- * YAFFS1: Get both sets of tags and compare serial numbers.
+- */
+-
+- if (inScan > 0) {
+- /* Only do this for forward scanning */
+- yaffs_ReadChunkWithTagsFromNAND(dev,
+- chunkInNAND,
+- NULL, &newTags);
+-
+- /* Do a proper find */
+- existingChunk =
+- yaffs_FindChunkInFile(in, chunkInInode,
+- &existingTags);
+- }
+-
+- if (existingChunk <= 0) {
+- /*Hoosterman - how did this happen? */
+-
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy: existing chunk < 0 in scan"
+- TENDSTR)));
+-
+- }
+-
+- /* NB The deleted flags should be false, otherwise the chunks will
+- * not be loaded during a scan
+- */
+-
+- if (inScan > 0) {
+- newSerial = newTags.serialNumber;
+- existingSerial = existingTags.serialNumber;
+- }
+-
+- if ((inScan > 0) &&
+- (in->myDev->isYaffs2 ||
+- existingChunk <= 0 ||
+- ((existingSerial + 1) & 3) == newSerial)) {
+- /* Forward scanning.
+- * Use new
+- * Delete the old one and drop through to update the tnode
+- */
+- yaffs_DeleteChunk(dev, existingChunk, 1,
+- __LINE__);
+- } else {
+- /* Backward scanning or we want to use the existing one
+- * Use existing.
+- * Delete the new one and return early so that the tnode isn't changed
+- */
+- yaffs_DeleteChunk(dev, chunkInNAND, 1,
+- __LINE__);
+- return YAFFS_OK;
+- }
+- }
+-
+- }
+-
+- if (existingChunk == 0)
+- in->nDataChunks++;
+-
+- yaffs_PutLevel0Tnode(dev, tn, chunkInInode, chunkInNAND);
+-
+- return YAFFS_OK;
+-}
+-
+-static int yaffs_ReadChunkDataFromObject(yaffs_Object *in, int chunkInInode,
+- __u8 *buffer)
+-{
+- int chunkInNAND = yaffs_FindChunkInFile(in, chunkInInode, NULL);
+-
+- if (chunkInNAND >= 0)
+- return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND,
+- buffer, NULL);
+- else {
+- T(YAFFS_TRACE_NANDACCESS,
+- (TSTR("Chunk %d not found zero instead" TENDSTR),
+- chunkInNAND));
+- /* get sane (zero) data if you read a hole */
+- memset(buffer, 0, in->myDev->nDataBytesPerChunk);
+- return 0;
+- }
+-
+-}
+-
+-void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn)
+-{
+- int block;
+- int page;
+- yaffs_ExtendedTags tags;
+- yaffs_BlockInfo *bi;
+-
+- if (chunkId <= 0)
+- return;
+-
+- dev->nDeletions++;
+- block = chunkId / dev->nChunksPerBlock;
+- page = chunkId % dev->nChunksPerBlock;
+-
+-
+- if (!yaffs_CheckChunkBit(dev, block, page))
+- T(YAFFS_TRACE_VERIFY,
+- (TSTR("Deleting invalid chunk %d"TENDSTR),
+- chunkId));
+-
+- bi = yaffs_GetBlockInfo(dev, block);
+-
+- T(YAFFS_TRACE_DELETION,
+- (TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunkId));
+-
+- if (markNAND &&
+- bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && !dev->isYaffs2) {
+-
+- yaffs_InitialiseTags(&tags);
+-
+- tags.chunkDeleted = 1;
+-
+- yaffs_WriteChunkWithTagsToNAND(dev, chunkId, NULL, &tags);
+- yaffs_HandleUpdateChunk(dev, chunkId, &tags);
+- } else {
+- dev->nUnmarkedDeletions++;
+- }
+-
+- /* Pull out of the management area.
+- * If the whole block became dirty, this will kick off an erasure.
+- */
+- if (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING ||
+- bi->blockState == YAFFS_BLOCK_STATE_FULL ||
+- bi->blockState == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+- bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) {
+- dev->nFreeChunks++;
+-
+- yaffs_ClearChunkBit(dev, block, page);
+-
+- bi->pagesInUse--;
+-
+- if (bi->pagesInUse == 0 &&
+- !bi->hasShrinkHeader &&
+- bi->blockState != YAFFS_BLOCK_STATE_ALLOCATING &&
+- bi->blockState != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+- yaffs_BlockBecameDirty(dev, block);
+- }
+-
+- }
+-
+-}
+-
+-static int yaffs_WriteChunkDataToObject(yaffs_Object *in, int chunkInInode,
+- const __u8 *buffer, int nBytes,
+- int useReserve)
+-{
+- /* Find old chunk Need to do this to get serial number
+- * Write new one and patch into tree.
+- * Invalidate old tags.
+- */
+-
+- int prevChunkId;
+- yaffs_ExtendedTags prevTags;
+-
+- int newChunkId;
+- yaffs_ExtendedTags newTags;
+-
+- yaffs_Device *dev = in->myDev;
+-
+- yaffs_CheckGarbageCollection(dev);
+-
+- /* Get the previous chunk at this location in the file if it exists */
+- prevChunkId = yaffs_FindChunkInFile(in, chunkInInode, &prevTags);
+-
+- /* Set up new tags */
+- yaffs_InitialiseTags(&newTags);
+-
+- newTags.chunkId = chunkInInode;
+- newTags.objectId = in->objectId;
+- newTags.serialNumber =
+- (prevChunkId >= 0) ? prevTags.serialNumber + 1 : 1;
+- newTags.byteCount = nBytes;
+-
+- if (nBytes < 1 || nBytes > dev->totalBytesPerChunk) {
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), nBytes));
+- YBUG();
+- }
+-
+- newChunkId =
+- yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
+- useReserve);
+-
+- if (newChunkId >= 0) {
+- yaffs_PutChunkIntoFile(in, chunkInInode, newChunkId, 0);
+-
+- if (prevChunkId >= 0)
+- yaffs_DeleteChunk(dev, prevChunkId, 1, __LINE__);
+-
+- yaffs_CheckFileSanity(in);
+- }
+- return newChunkId;
+-
+-}
+-
+-/* UpdateObjectHeader updates the header on NAND for an object.
+- * If name is not NULL, then that new name is used.
+- */
+-int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name, int force,
+- int isShrink, int shadows)
+-{
+-
+- yaffs_BlockInfo *bi;
+-
+- yaffs_Device *dev = in->myDev;
+-
+- int prevChunkId;
+- int retVal = 0;
+- int result = 0;
+-
+- int newChunkId;
+- yaffs_ExtendedTags newTags;
+- yaffs_ExtendedTags oldTags;
+-
+- __u8 *buffer = NULL;
+- YCHAR oldName[YAFFS_MAX_NAME_LENGTH + 1];
+-
+- yaffs_ObjectHeader *oh = NULL;
++ yaffs_obj_t *in;
++ YCHAR *str = NULL;
+
+- yaffs_strcpy(oldName, _Y("silly old name"));
++ yaffs_dev_t *dev = parent->my_dev;
+
++ /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
++ if (yaffs_find_by_name(parent, name))
++ return NULL;
+
+- if (!in->fake ||
+- in == dev->rootDir || /* The rootDir should also be saved */
+- force) {
++ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ str = yaffs_clone_str(aliasString);
++ if (!str)
++ return NULL;
++ }
+
+- yaffs_CheckGarbageCollection(dev);
+- yaffs_CheckObjectDetailsLoaded(in);
++ in = yaffs_new_obj(dev, -1, type);
+
+- buffer = yaffs_GetTempBuffer(in->myDev, __LINE__);
+- oh = (yaffs_ObjectHeader *) buffer;
++ if (!in){
++ if(str)
++ YFREE(str);
++ return NULL;
++ }
+
+- prevChunkId = in->hdrChunk;
+
+- if (prevChunkId > 0) {
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
+- buffer, &oldTags);
+
+- yaffs_VerifyObjectHeader(in, oh, &oldTags, 0);
+
+- memcpy(oldName, oh->name, sizeof(oh->name));
+- }
+
+- memset(buffer, 0xFF, dev->nDataBytesPerChunk);
++ if (in) {
++ in->hdr_chunk = 0;
++ in->valid = 1;
++ in->variant_type = type;
+
+- oh->type = in->variantType;
+- oh->yst_mode = in->yst_mode;
+- oh->shadowsObject = oh->inbandShadowsObject = shadows;
++ in->yst_mode = mode;
+
+ #ifdef CONFIG_YAFFS_WINCE
+- oh->win_atime[0] = in->win_atime[0];
+- oh->win_ctime[0] = in->win_ctime[0];
+- oh->win_mtime[0] = in->win_mtime[0];
+- oh->win_atime[1] = in->win_atime[1];
+- oh->win_ctime[1] = in->win_ctime[1];
+- oh->win_mtime[1] = in->win_mtime[1];
++ yfsd_win_file_time_now(in->win_atime);
++ in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0];
++ in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1];
++
+ #else
+- oh->yst_uid = in->yst_uid;
+- oh->yst_gid = in->yst_gid;
+- oh->yst_atime = in->yst_atime;
+- oh->yst_mtime = in->yst_mtime;
+- oh->yst_ctime = in->yst_ctime;
+- oh->yst_rdev = in->yst_rdev;
++ in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME;
++
++ in->yst_rdev = rdev;
++ in->yst_uid = uid;
++ in->yst_gid = gid;
+ #endif
+- if (in->parent)
+- oh->parentObjectId = in->parent->objectId;
+- else
+- oh->parentObjectId = 0;
++ in->n_data_chunks = 0;
+
+- if (name && *name) {
+- memset(oh->name, 0, sizeof(oh->name));
+- yaffs_strncpy(oh->name, name, YAFFS_MAX_NAME_LENGTH);
+- } else if (prevChunkId >= 0)
+- memcpy(oh->name, oldName, sizeof(oh->name));
+- else
+- memset(oh->name, 0, sizeof(oh->name));
++ yaffs_set_obj_name(in, name);
++ in->dirty = 1;
+
+- oh->isShrink = isShrink;
++ yaffs_add_obj_to_dir(parent, in);
+
+- switch (in->variantType) {
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* Should not happen */
+- break;
+- case YAFFS_OBJECT_TYPE_FILE:
+- oh->fileSize =
+- (oh->parentObjectId == YAFFS_OBJECTID_DELETED
+- || oh->parentObjectId ==
+- YAFFS_OBJECTID_UNLINKED) ? 0 : in->variant.
+- fileVariant.fileSize;
++ in->my_dev = parent->my_dev;
++
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+- oh->equivalentObjectId =
+- in->variant.hardLinkVariant.equivalentObjectId;
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- /* Do nothing */
++ in->variant.hardlink_variant.equiv_obj =
++ equiv_obj;
++ in->variant.hardlink_variant.equiv_id =
++ equiv_obj->obj_id;
++ ylist_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
++ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- yaffs_strncpy(oh->alias,
+- in->variant.symLinkVariant.alias,
+- YAFFS_MAX_ALIAS_LENGTH);
+- oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* do nothing */
+ break;
+ }
+
+- /* Tags */
+- yaffs_InitialiseTags(&newTags);
+- in->serial++;
+- newTags.chunkId = 0;
+- newTags.objectId = in->objectId;
+- newTags.serialNumber = in->serial;
+-
+- /* Add extra info for file header */
+-
+- newTags.extraHeaderInfoAvailable = 1;
+- newTags.extraParentObjectId = oh->parentObjectId;
+- newTags.extraFileLength = oh->fileSize;
+- newTags.extraIsShrinkHeader = oh->isShrink;
+- newTags.extraEquivalentObjectId = oh->equivalentObjectId;
+- newTags.extraShadows = (oh->shadowsObject > 0) ? 1 : 0;
+- newTags.extraObjectType = in->variantType;
+-
+- yaffs_VerifyObjectHeader(in, oh, &newTags, 1);
+-
+- /* Create new chunk in NAND */
+- newChunkId =
+- yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
+- (prevChunkId >= 0) ? 1 : 0);
+-
+- if (newChunkId >= 0) {
+-
+- in->hdrChunk = newChunkId;
+-
+- if (prevChunkId >= 0) {
+- yaffs_DeleteChunk(dev, prevChunkId, 1,
+- __LINE__);
+- }
+-
+- if (!yaffs_ObjectHasCachedWriteData(in))
+- in->dirty = 0;
+-
+- /* If this was a shrink, then mark the block that the chunk lives on */
+- if (isShrink) {
+- bi = yaffs_GetBlockInfo(in->myDev,
+- newChunkId / in->myDev->nChunksPerBlock);
+- bi->hasShrinkHeader = 1;
+- }
+-
++ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
++ /* Could not create the object header, fail the creation */
++ yaffs_del_obj(in);
++ in = NULL;
+ }
+
+- retVal = newChunkId;
+-
++ yaffs_update_parent(parent);
+ }
+
+- if (buffer)
+- yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
+-
+- return retVal;
++ return in;
+ }
+
+-/*------------------------ Short Operations Cache ----------------------------------------
+- * In many situations where there is no high level buffering (eg WinCE) a lot of
+- * reads might be short sequential reads, and a lot of writes may be short
+- * sequential writes. eg. scanning/writing a jpeg file.
+- * In these cases, a short read/write cache can provide a huge perfomance benefit
+- * with dumb-as-a-rock code.
+- * In Linux, the page cache provides read buffering aand the short op cache provides write
+- * buffering.
+- *
+- * There are a limited number (~10) of cache chunks per device so that we don't
+- * need a very intelligent search.
+- */
+-
+-static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj)
++yaffs_obj_t *yaffs_create_file(yaffs_obj_t *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid)
+ {
+- yaffs_Device *dev = obj->myDev;
+- int i;
+- yaffs_ChunkCache *cache;
+- int nCaches = obj->myDev->nShortOpCaches;
+-
+- for (i = 0; i < nCaches; i++) {
+- cache = &dev->srCache[i];
+- if (cache->object == obj &&
+- cache->dirty)
+- return 1;
+- }
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
++ uid, gid, NULL, NULL, 0);
++}
+
+- return 0;
++yaffs_obj_t *yaffs_create_dir(yaffs_obj_t *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
++ mode, uid, gid, NULL, NULL, 0);
+ }
+
++yaffs_obj_t *yaffs_create_special(yaffs_obj_t *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
++ uid, gid, NULL, NULL, rdev);
++}
+
+-static void yaffs_FlushFilesChunkCache(yaffs_Object *obj)
++yaffs_obj_t *yaffs_create_symlink(yaffs_obj_t *parent, const YCHAR *name,
++ __u32 mode, __u32 uid, __u32 gid,
++ const YCHAR *alias)
+ {
+- yaffs_Device *dev = obj->myDev;
+- int lowest = -99; /* Stop compiler whining. */
+- int i;
+- yaffs_ChunkCache *cache;
+- int chunkWritten = 0;
+- int nCaches = obj->myDev->nShortOpCaches;
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
++ uid, gid, NULL, alias, 0);
++}
+
+- if (nCaches > 0) {
+- do {
+- cache = NULL;
++/* yaffs_link_obj returns the object id of the equivalent object.*/
++yaffs_obj_t *yaffs_link_obj(yaffs_obj_t *parent, const YCHAR *name,
++ yaffs_obj_t *equiv_obj)
++{
++ /* Get the real object in case we were fed a hard link as an equivalent object */
++ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+- /* Find the dirty cache for this object with the lowest chunk id. */
+- for (i = 0; i < nCaches; i++) {
+- if (dev->srCache[i].object == obj &&
+- dev->srCache[i].dirty) {
+- if (!cache
+- || dev->srCache[i].chunkId <
+- lowest) {
+- cache = &dev->srCache[i];
+- lowest = cache->chunkId;
+- }
+- }
+- }
++ if (yaffs_create_obj
++ (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
++ equiv_obj, NULL, 0)) {
++ return equiv_obj;
++ } else {
++ return NULL;
++ }
+
+- if (cache && !cache->locked) {
+- /* Write it out and free it up */
++}
+
+- chunkWritten =
+- yaffs_WriteChunkDataToObject(cache->object,
+- cache->chunkId,
+- cache->data,
+- cache->nBytes,
+- 1);
+- cache->dirty = 0;
+- cache->object = NULL;
+- }
++static int yaffs_change_obj_name(yaffs_obj_t *obj, yaffs_obj_t *new_dir,
++ const YCHAR *new_name, int force, int shadows)
++{
++ int unlinkOp;
++ int deleteOp;
+
+- } while (cache && chunkWritten > 0);
++ yaffs_obj_t *existingTarget;
+
+- if (cache) {
+- /* Hoosterman, disk full while writing cache out. */
+- T(YAFFS_TRACE_ERROR,
+- (TSTR("yaffs tragedy: no space during cache write" TENDSTR)));
++ if (new_dir == NULL)
++ new_dir = obj->parent; /* use the old directory */
+
+- }
++ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR
++ ("tragedy: yaffs_change_obj_name: new_dir is not a directory"
++ TENDSTR)));
++ YBUG();
+ }
+
+-}
++ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
++ if (obj->my_dev->param.is_yaffs2)
++ unlinkOp = (new_dir == obj->my_dev->unlinked_dir);
++ else
++ unlinkOp = (new_dir == obj->my_dev->unlinked_dir
++ && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
+
+-/*yaffs_FlushEntireDeviceCache(dev)
+- *
+- *
+- */
++ deleteOp = (new_dir == obj->my_dev->del_dir);
+
+-void yaffs_FlushEntireDeviceCache(yaffs_Device *dev)
+-{
+- yaffs_Object *obj;
+- int nCaches = dev->nShortOpCaches;
+- int i;
++ existingTarget = yaffs_find_by_name(new_dir, new_name);
+
+- /* Find a dirty object in the cache and flush it...
+- * until there are no further dirty objects.
++ /* If the object is a file going into the unlinked directory,
++ * then it is OK to just stuff it in since duplicate names are allowed.
++ * else only proceed if the new name does not exist and if we're putting
++ * it into a directory.
+ */
+- do {
+- obj = NULL;
+- for (i = 0; i < nCaches && !obj; i++) {
+- if (dev->srCache[i].object &&
+- dev->srCache[i].dirty)
+- obj = dev->srCache[i].object;
+-
+- }
+- if (obj)
+- yaffs_FlushFilesChunkCache(obj);
+-
+- } while (obj);
+-
+-}
++ if ((unlinkOp ||
++ deleteOp ||
++ force ||
++ (shadows > 0) ||
++ !existingTarget) &&
++ new_dir->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_set_obj_name(obj, new_name);
++ obj->dirty = 1;
+
++ yaffs_add_obj_to_dir(new_dir, obj);
+
+-/* Grab us a cache chunk for use.
+- * First look for an empty one.
+- * Then look for the least recently used non-dirty one.
+- * Then look for the least recently used dirty one...., flush and look again.
+- */
+-static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device *dev)
+-{
+- int i;
++ if (unlinkOp)
++ obj->unlinked = 1;
+
+- if (dev->nShortOpCaches > 0) {
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (!dev->srCache[i].object)
+- return &dev->srCache[i];
+- }
++ /* If it is a deletion then we mark it as a shrink for gc purposes. */
++ if (yaffs_update_oh(obj, new_name, 0, deleteOp, shadows, NULL) >= 0)
++ return YAFFS_OK;
+ }
+
+- return NULL;
++ return YAFFS_FAIL;
+ }
+
+-static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device *dev)
++int yaffs_rename_obj(yaffs_obj_t *old_dir, const YCHAR *old_name,
++ yaffs_obj_t *new_dir, const YCHAR *new_name)
+ {
+- yaffs_ChunkCache *cache;
+- yaffs_Object *theObj;
+- int usage;
+- int i;
+- int pushout;
+-
+- if (dev->nShortOpCaches > 0) {
+- /* Try find a non-dirty one... */
+-
+- cache = yaffs_GrabChunkCacheWorker(dev);
++ yaffs_obj_t *obj = NULL;
++ yaffs_obj_t *existingTarget = NULL;
++ int force = 0;
++ int result;
++ yaffs_dev_t *dev;
+
+- if (!cache) {
+- /* They were all dirty, find the last recently used object and flush
+- * its cache, then find again.
+- * NB what's here is not very accurate, we actually flush the object
+- * the last recently used page.
+- */
+
+- /* With locking we can't assume we can use entry zero */
++ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
++ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
+
+- theObj = NULL;
+- usage = -1;
+- cache = NULL;
+- pushout = -1;
++ dev = old_dir->my_dev;
+
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].object &&
+- !dev->srCache[i].locked &&
+- (dev->srCache[i].lastUse < usage || !cache)) {
+- usage = dev->srCache[i].lastUse;
+- theObj = dev->srCache[i].object;
+- cache = &dev->srCache[i];
+- pushout = i;
+- }
+- }
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++ /* Special case for case insemsitive systems (eg. WinCE).
++ * While look-up is case insensitive, the name isn't.
++ * Therefore we might want to change x.txt to X.txt
++ */
++ if (old_dir == new_dir && yaffs_strcmp(old_name, new_name) == 0)
++ force = 1;
++#endif
+
+- if (!cache || cache->dirty) {
+- /* Flush and try again */
+- yaffs_FlushFilesChunkCache(theObj);
+- cache = yaffs_GrabChunkCacheWorker(dev);
+- }
++ if(yaffs_strnlen(new_name,YAFFS_MAX_NAME_LENGTH+1) > YAFFS_MAX_NAME_LENGTH)
++ /* ENAMETOOLONG */
++ return YAFFS_FAIL;
+
+- }
+- return cache;
+- } else
+- return NULL;
++ obj = yaffs_find_by_name(old_dir, old_name);
+
+-}
++ if (obj && obj->rename_allowed) {
+
+-/* Find a cached chunk */
+-static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object *obj,
+- int chunkId)
+-{
+- yaffs_Device *dev = obj->myDev;
+- int i;
+- if (dev->nShortOpCaches > 0) {
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].object == obj &&
+- dev->srCache[i].chunkId == chunkId) {
+- dev->cacheHits++;
++ /* Now do the handling for an existing target, if there is one */
+
+- return &dev->srCache[i];
+- }
++ existingTarget = yaffs_find_by_name(new_dir, new_name);
++ if (existingTarget &&
++ existingTarget->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !ylist_empty(&existingTarget->variant.dir_variant.children)) {
++ /* There is a target that is a non-empty directory, so we fail */
++ return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */
++ } else if (existingTarget && existingTarget != obj) {
++ /* Nuke the target first, using shadowing,
++ * but only if it isn't the same object.
++ *
++ * Note we must disable gc otherwise it can mess up the shadowing.
++ *
++ */
++ dev->gc_disable=1;
++ yaffs_change_obj_name(obj, new_dir, new_name, force,
++ existingTarget->obj_id);
++ existingTarget->is_shadowed = 1;
++ yaffs_unlink_obj(existingTarget);
++ dev->gc_disable=0;
+ }
++
++ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
++
++ yaffs_update_parent(old_dir);
++ if(new_dir != old_dir)
++ yaffs_update_parent(new_dir);
++
++ return result;
+ }
+- return NULL;
++ return YAFFS_FAIL;
+ }
+
+-/* Mark the chunk for the least recently used algorithym */
+-static void yaffs_UseChunkCache(yaffs_Device *dev, yaffs_ChunkCache *cache,
+- int isAWrite)
++/*------------------------- Block Management and Page Allocation ----------------*/
++
++static int yaffs_init_blocks(yaffs_dev_t *dev)
+ {
++ int nBlocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+- if (dev->nShortOpCaches > 0) {
+- if (dev->srLastUse < 0 || dev->srLastUse > 100000000) {
+- /* Reset the cache usages */
+- int i;
+- for (i = 1; i < dev->nShortOpCaches; i++)
+- dev->srCache[i].lastUse = 0;
++ dev->block_info = NULL;
++ dev->chunk_bits = NULL;
+
+- dev->srLastUse = 0;
+- }
++ dev->alloc_block = -1; /* force it to get a new one */
+
+- dev->srLastUse++;
++ /* If the first allocation strategy fails, thry the alternate one */
++ dev->block_info = YMALLOC(nBlocks * sizeof(yaffs_block_info_t));
++ if (!dev->block_info) {
++ dev->block_info = YMALLOC_ALT(nBlocks * sizeof(yaffs_block_info_t));
++ dev->block_info_alt = 1;
++ } else
++ dev->block_info_alt = 0;
+
+- cache->lastUse = dev->srLastUse;
++ if (dev->block_info) {
++ /* Set up dynamic blockinfo stuff. */
++ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8; /* round up bytes */
++ dev->chunk_bits = YMALLOC(dev->chunk_bit_stride * nBlocks);
++ if (!dev->chunk_bits) {
++ dev->chunk_bits = YMALLOC_ALT(dev->chunk_bit_stride * nBlocks);
++ dev->chunk_bits_alt = 1;
++ } else
++ dev->chunk_bits_alt = 0;
++ }
+
+- if (isAWrite)
+- cache->dirty = 1;
++ if (dev->block_info && dev->chunk_bits) {
++ memset(dev->block_info, 0, nBlocks * sizeof(yaffs_block_info_t));
++ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * nBlocks);
++ return YAFFS_OK;
+ }
++
++ return YAFFS_FAIL;
+ }
+
+-/* Invalidate a single cache page.
+- * Do this when a whole page gets written,
+- * ie the short cache for this page is no longer valid.
+- */
+-static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId)
++static void yaffs_deinit_blocks(yaffs_dev_t *dev)
+ {
+- if (object->myDev->nShortOpCaches > 0) {
+- yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId);
++ if (dev->block_info_alt && dev->block_info)
++ YFREE_ALT(dev->block_info);
++ else if (dev->block_info)
++ YFREE(dev->block_info);
+
+- if (cache)
+- cache->object = NULL;
+- }
+-}
++ dev->block_info_alt = 0;
+
+-/* Invalidate all the cache pages associated with this object
+- * Do this whenever ther file is deleted or resized.
+- */
+-static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in)
+-{
+- int i;
+- yaffs_Device *dev = in->myDev;
++ dev->block_info = NULL;
+
+- if (dev->nShortOpCaches > 0) {
+- /* Invalidate it. */
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].object == in)
+- dev->srCache[i].object = NULL;
+- }
+- }
++ if (dev->chunk_bits_alt && dev->chunk_bits)
++ YFREE_ALT(dev->chunk_bits);
++ else if (dev->chunk_bits)
++ YFREE(dev->chunk_bits);
++ dev->chunk_bits_alt = 0;
++ dev->chunk_bits = NULL;
+ }
+
+-/*--------------------- Checkpointing --------------------*/
++void yaffs_block_became_dirty(yaffs_dev_t *dev, int block_no)
++{
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, block_no);
+
++ int erasedOk = 0;
+
+-static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev, int head)
+-{
+- yaffs_CheckpointValidity cp;
++ /* If the block is still healthy erase it and mark as clean.
++ * If the block has had a data failure, then retire it.
++ */
+
+- memset(&cp, 0, sizeof(cp));
++ T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
++ (TSTR("yaffs_block_became_dirty block %d state %d %s"TENDSTR),
++ block_no, bi->block_state, (bi->needs_retiring) ? "needs retiring" : ""));
+
+- cp.structType = sizeof(cp);
+- cp.magic = YAFFS_MAGIC;
+- cp.version = YAFFS_CHECKPOINT_VERSION;
+- cp.head = (head) ? 1 : 0;
++ yaffs2_clear_oldest_dirty_seq(dev,bi);
+
+- return (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)) ?
+- 1 : 0;
+-}
++ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+-static int yaffs_ReadCheckpointValidityMarker(yaffs_Device *dev, int head)
+-{
+- yaffs_CheckpointValidity cp;
+- int ok;
++ /* If this is the block being garbage collected then stop gc'ing this block */
++ if(block_no == dev->gc_block)
++ dev->gc_block = 0;
+
+- ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++ /* If this block is currently the best candidate for gc then drop as a candidate */
++ if(block_no == dev->gc_dirtiest){
++ dev->gc_dirtiest = 0;
++ dev->gc_pages_in_use = 0;
++ }
+
+- if (ok)
+- ok = (cp.structType == sizeof(cp)) &&
+- (cp.magic == YAFFS_MAGIC) &&
+- (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+- (cp.head == ((head) ? 1 : 0));
+- return ok ? 1 : 0;
+-}
++ if (!bi->needs_retiring) {
++ yaffs2_checkpt_invalidate(dev);
++ erasedOk = yaffs_erase_block(dev, block_no);
++ if (!erasedOk) {
++ dev->n_erase_failures++;
++ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("**>> Erasure failed %d" TENDSTR), block_no));
++ }
++ }
+
+-static void yaffs_DeviceToCheckpointDevice(yaffs_CheckpointDevice *cp,
+- yaffs_Device *dev)
+-{
+- cp->nErasedBlocks = dev->nErasedBlocks;
+- cp->allocationBlock = dev->allocationBlock;
+- cp->allocationPage = dev->allocationPage;
+- cp->nFreeChunks = dev->nFreeChunks;
++ if (erasedOk &&
++ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) || !yaffs_skip_verification(dev))) {
++ int i;
++ for (i = 0; i < dev->param.chunks_per_block; i++) {
++ if (!yaffs_check_chunk_erased
++ (dev, block_no * dev->param.chunks_per_block + i)) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ (">>Block %d erasure supposedly OK, but chunk %d not erased"
++ TENDSTR), block_no, i));
++ }
++ }
++ }
++
++ if (erasedOk) {
++ /* Clean it up... */
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ bi->seq_number = 0;
++ dev->n_erased_blocks++;
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++ bi->has_shrink_hdr = 0;
++ bi->skip_erased_check = 1; /* This is clean, so no need to check */
++ bi->gc_prioritise = 0;
++ yaffs_clear_chunk_bits(dev, block_no);
+
+- cp->nDeletedFiles = dev->nDeletedFiles;
+- cp->nUnlinkedFiles = dev->nUnlinkedFiles;
+- cp->nBackgroundDeletions = dev->nBackgroundDeletions;
+- cp->sequenceNumber = dev->sequenceNumber;
+- cp->oldestDirtySequence = dev->oldestDirtySequence;
++ T(YAFFS_TRACE_ERASE,
++ (TSTR("Erased block %d" TENDSTR), block_no));
++ } else {
++ dev->n_free_chunks -= dev->param.chunks_per_block; /* We lost a block of free space */
+
++ yaffs_retire_block(dev, block_no);
++ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("**>> Block %d retired" TENDSTR), block_no));
++ }
+ }
+
+-static void yaffs_CheckpointDeviceToDevice(yaffs_Device *dev,
+- yaffs_CheckpointDevice *cp)
++static int yaffs_find_alloc_block(yaffs_dev_t *dev)
+ {
+- dev->nErasedBlocks = cp->nErasedBlocks;
+- dev->allocationBlock = cp->allocationBlock;
+- dev->allocationPage = cp->allocationPage;
+- dev->nFreeChunks = cp->nFreeChunks;
+-
+- dev->nDeletedFiles = cp->nDeletedFiles;
+- dev->nUnlinkedFiles = cp->nUnlinkedFiles;
+- dev->nBackgroundDeletions = cp->nBackgroundDeletions;
+- dev->sequenceNumber = cp->sequenceNumber;
+- dev->oldestDirtySequence = cp->oldestDirtySequence;
+-}
++ int i;
+
++ yaffs_block_info_t *bi;
+
+-static int yaffs_WriteCheckpointDevice(yaffs_Device *dev)
+-{
+- yaffs_CheckpointDevice cp;
+- __u32 nBytes;
+- __u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
++ if (dev->n_erased_blocks < 1) {
++ /* Hoosterman we've got a problem.
++ * Can't get space to gc
++ */
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("yaffs tragedy: no more erased blocks" TENDSTR)));
+
+- int ok;
++ return -1;
++ }
+
+- /* Write device runtime values*/
+- yaffs_DeviceToCheckpointDevice(&cp, dev);
+- cp.structType = sizeof(cp);
++ /* Find an empty block. */
+
+- ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ dev->alloc_block_finder++;
++ if (dev->alloc_block_finder < dev->internal_start_block
++ || dev->alloc_block_finder > dev->internal_end_block) {
++ dev->alloc_block_finder = dev->internal_start_block;
++ }
+
+- /* Write block info */
+- if (ok) {
+- nBytes = nBlocks * sizeof(yaffs_BlockInfo);
+- ok = (yaffs_CheckpointWrite(dev, dev->blockInfo, nBytes) == nBytes);
++ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->seq_number++;
++ bi->seq_number = dev->seq_number;
++ dev->n_erased_blocks--;
++ T(YAFFS_TRACE_ALLOCATE,
++ (TSTR("Allocated block %d, seq %d, %d left" TENDSTR),
++ dev->alloc_block_finder, dev->seq_number,
++ dev->n_erased_blocks));
++ return dev->alloc_block_finder;
++ }
+ }
+
+- /* Write chunk bits */
+- if (ok) {
+- nBytes = nBlocks * dev->chunkBitmapStride;
+- ok = (yaffs_CheckpointWrite(dev, dev->chunkBits, nBytes) == nBytes);
+- }
+- return ok ? 1 : 0;
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR
++ ("yaffs tragedy: no more erased blocks, but there should have been %d"
++ TENDSTR), dev->n_erased_blocks));
+
++ return -1;
+ }
+
+-static int yaffs_ReadCheckpointDevice(yaffs_Device *dev)
++
++/*
++ * Check if there's space to allocate...
++ * Thinks.... do we need top make this ths same as yaffs_get_free_chunks()?
++ */
++int yaffs_check_alloc_available(yaffs_dev_t *dev, int n_chunks)
+ {
+- yaffs_CheckpointDevice cp;
+- __u32 nBytes;
+- __u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
++ int reservedChunks;
++ int reservedBlocks = dev->param.n_reserved_blocks;
++ int checkpointBlocks;
+
+- int ok;
++ checkpointBlocks = yaffs_calc_checkpt_blocks_required(dev);
+
+- ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
+- if (!ok)
+- return 0;
++ reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->param.chunks_per_block);
+
+- if (cp.structType != sizeof(cp))
+- return 0;
++ return (dev->n_free_chunks > (reservedChunks + n_chunks));
++}
++
++static int yaffs_alloc_chunk(yaffs_dev_t *dev, int useReserve,
++ yaffs_block_info_t **blockUsedPtr)
++{
++ int retVal;
++ yaffs_block_info_t *bi;
++
++ if (dev->alloc_block < 0) {
++ /* Get next block to allocate off */
++ dev->alloc_block = yaffs_find_alloc_block(dev);
++ dev->alloc_page = 0;
++ }
+
++ if (!useReserve && !yaffs_check_alloc_available(dev, 1)) {
++ /* Not enough space to allocate unless we're allowed to use the reserve. */
++ return -1;
++ }
+
+- yaffs_CheckpointDeviceToDevice(dev, &cp);
++ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
++ && dev->alloc_page == 0) {
++ T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
++ }
+
+- nBytes = nBlocks * sizeof(yaffs_BlockInfo);
++ /* Next page please.... */
++ if (dev->alloc_block >= 0) {
++ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+- ok = (yaffs_CheckpointRead(dev, dev->blockInfo, nBytes) == nBytes);
++ retVal = (dev->alloc_block * dev->param.chunks_per_block) +
++ dev->alloc_page;
++ bi->pages_in_use++;
++ yaffs_set_chunk_bit(dev, dev->alloc_block,
++ dev->alloc_page);
+
+- if (!ok)
+- return 0;
+- nBytes = nBlocks * dev->chunkBitmapStride;
++ dev->alloc_page++;
+
+- ok = (yaffs_CheckpointRead(dev, dev->chunkBits, nBytes) == nBytes);
++ dev->n_free_chunks--;
+
+- return ok ? 1 : 0;
+-}
++ /* If the block is full set the state to full */
++ if (dev->alloc_page >= dev->param.chunks_per_block) {
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
+
+-static void yaffs_ObjectToCheckpointObject(yaffs_CheckpointObject *cp,
+- yaffs_Object *obj)
+-{
++ if (blockUsedPtr)
++ *blockUsedPtr = bi;
++
++ return retVal;
++ }
+
+- cp->objectId = obj->objectId;
+- cp->parentId = (obj->parent) ? obj->parent->objectId : 0;
+- cp->hdrChunk = obj->hdrChunk;
+- cp->variantType = obj->variantType;
+- cp->deleted = obj->deleted;
+- cp->softDeleted = obj->softDeleted;
+- cp->unlinked = obj->unlinked;
+- cp->fake = obj->fake;
+- cp->renameAllowed = obj->renameAllowed;
+- cp->unlinkAllowed = obj->unlinkAllowed;
+- cp->serial = obj->serial;
+- cp->nDataChunks = obj->nDataChunks;
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+- cp->fileSizeOrEquivalentObjectId = obj->variant.fileVariant.fileSize;
+- else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+- cp->fileSizeOrEquivalentObjectId = obj->variant.hardLinkVariant.equivalentObjectId;
++ return -1;
+ }
+
+-static int yaffs_CheckpointObjectToObject(yaffs_Object *obj, yaffs_CheckpointObject *cp)
++static int yaffs_get_erased_chunks(yaffs_dev_t *dev)
+ {
++ int n;
+
+- yaffs_Object *parent;
++ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+- if (obj->variantType != cp->variantType) {
+- T(YAFFS_TRACE_ERROR, (TSTR("Checkpoint read object %d type %d "
+- TCONT("chunk %d does not match existing object type %d")
+- TENDSTR), cp->objectId, cp->variantType, cp->hdrChunk,
+- obj->variantType));
+- return 0;
+- }
++ if (dev->alloc_block > 0)
++ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+- obj->objectId = cp->objectId;
++ return n;
+
+- if (cp->parentId)
+- parent = yaffs_FindOrCreateObjectByNumber(
+- obj->myDev,
+- cp->parentId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
+- else
+- parent = NULL;
++}
+
+- if (parent) {
+- if (parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Checkpoint read object %d parent %d type %d"
+- TCONT(" chunk %d Parent type, %d, not directory")
+- TENDSTR),
+- cp->objectId, cp->parentId, cp->variantType,
+- cp->hdrChunk, parent->variantType));
+- return 0;
++/*
++ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
++ * if we don't want to write to it.
++ */
++void yaffs_skip_rest_of_block(yaffs_dev_t *dev)
++{
++ if(dev->alloc_block > 0){
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, dev->alloc_block);
++ if(bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING){
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
+ }
+- yaffs_AddObjectToDirectory(parent, obj);
+ }
+-
+- obj->hdrChunk = cp->hdrChunk;
+- obj->variantType = cp->variantType;
+- obj->deleted = cp->deleted;
+- obj->softDeleted = cp->softDeleted;
+- obj->unlinked = cp->unlinked;
+- obj->fake = cp->fake;
+- obj->renameAllowed = cp->renameAllowed;
+- obj->unlinkAllowed = cp->unlinkAllowed;
+- obj->serial = cp->serial;
+- obj->nDataChunks = cp->nDataChunks;
+-
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+- obj->variant.fileVariant.fileSize = cp->fileSizeOrEquivalentObjectId;
+- else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
+- obj->variant.hardLinkVariant.equivalentObjectId = cp->fileSizeOrEquivalentObjectId;
+-
+- if (obj->hdrChunk > 0)
+- obj->lazyLoaded = 1;
+- return 1;
+ }
+
+
+-
+-static int yaffs_CheckpointTnodeWorker(yaffs_Object *in, yaffs_Tnode *tn,
+- __u32 level, int chunkOffset)
++static int yaffs_gc_block(yaffs_dev_t *dev, int block,
++ int wholeBlock)
+ {
++ int oldChunk;
++ int newChunk;
++ int mark_flash;
++ int retVal = YAFFS_OK;
+ int i;
+- yaffs_Device *dev = in->myDev;
+- int ok = 1;
+- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
+-
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
++ int isCheckpointBlock;
++ int matchingChunk;
++ int maxCopies;
+
++ int chunksBefore = yaffs_get_erased_chunks(dev);
++ int chunksAfter;
+
+- if (tn) {
+- if (level > 0) {
++ yaffs_ext_tags tags;
+
+- for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+- if (tn->internal[i]) {
+- ok = yaffs_CheckpointTnodeWorker(in,
+- tn->internal[i],
+- level - 1,
+- (chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
+- }
+- }
+- } else if (level == 0) {
+- __u32 baseOffset = chunkOffset << YAFFS_TNODES_LEVEL0_BITS;
+- ok = (yaffs_CheckpointWrite(dev, &baseOffset, sizeof(baseOffset)) == sizeof(baseOffset));
+- if (ok)
+- ok = (yaffs_CheckpointWrite(dev, tn, tnodeSize) == tnodeSize);
+- }
+- }
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, block);
+
+- return ok;
++ yaffs_obj_t *object;
+
+-}
++ isCheckpointBlock = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+-static int yaffs_WriteCheckpointTnodes(yaffs_Object *obj)
+-{
+- __u32 endMarker = ~0;
+- int ok = 1;
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+- ok = yaffs_CheckpointTnodeWorker(obj,
+- obj->variant.fileVariant.top,
+- obj->variant.fileVariant.topLevel,
+- 0);
+- if (ok)
+- ok = (yaffs_CheckpointWrite(obj->myDev, &endMarker, sizeof(endMarker)) ==
+- sizeof(endMarker));
+- }
++ T(YAFFS_TRACE_TRACING,
++ (TSTR("Collecting block %d, in use %d, shrink %d, wholeBlock %d" TENDSTR),
++ block,
++ bi->pages_in_use,
++ bi->has_shrink_hdr,
++ wholeBlock));
+
+- return ok ? 1 : 0;
+-}
++ /*yaffs_verify_free_chunks(dev); */
+
+-static int yaffs_ReadCheckpointTnodes(yaffs_Object *obj)
+-{
+- __u32 baseChunk;
+- int ok = 1;
+- yaffs_Device *dev = obj->myDev;
+- yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant;
+- yaffs_Tnode *tn;
+- int nread = 0;
+- int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++ if(bi->block_state == YAFFS_BLOCK_STATE_FULL)
++ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
++
++ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+- if (tnodeSize < sizeof(yaffs_Tnode))
+- tnodeSize = sizeof(yaffs_Tnode);
++ dev->gc_disable = 1;
+
+- ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++ if (isCheckpointBlock ||
++ !yaffs_still_some_chunks(dev, block)) {
++ T(YAFFS_TRACE_TRACING,
++ (TSTR
++ ("Collecting block %d that has no chunks in use" TENDSTR),
++ block));
++ yaffs_block_became_dirty(dev, block);
++ } else {
+
+- while (ok && (~baseChunk)) {
+- nread++;
+- /* Read level 0 tnode */
++ __u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
++ yaffs_verify_blk(dev, bi, block);
+
+- tn = yaffs_GetTnodeRaw(dev);
+- if (tn)
+- ok = (yaffs_CheckpointRead(dev, tn, tnodeSize) == tnodeSize);
+- else
+- ok = 0;
++ maxCopies = (wholeBlock) ? dev->param.chunks_per_block : 5;
++ oldChunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+- if (tn && ok)
+- ok = yaffs_AddOrFindLevel0Tnode(dev,
+- fileStructPtr,
+- baseChunk,
+- tn) ? 1 : 0;
++ for (/* init already done */;
++ retVal == YAFFS_OK &&
++ dev->gc_chunk < dev->param.chunks_per_block &&
++ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
++ maxCopies > 0;
++ dev->gc_chunk++, oldChunk++) {
++ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+
+- if (ok)
+- ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++ /* This page is in use and might need to be copied off */
+
+- }
++ maxCopies--;
+
+- T(YAFFS_TRACE_CHECKPOINT, (
+- TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
+- nread, baseChunk, ok));
++ mark_flash = 1;
+
+- return ok ? 1 : 0;
+-}
++ yaffs_init_tags(&tags);
+
++ yaffs_rd_chunk_tags_nand(dev, oldChunk,
++ buffer, &tags);
+
+-static int yaffs_WriteCheckpointObjects(yaffs_Device *dev)
+-{
+- yaffs_Object *obj;
+- yaffs_CheckpointObject cp;
+- int i;
+- int ok = 1;
+- struct ylist_head *lh;
++ object =
++ yaffs_find_by_number(dev,
++ tags.obj_id);
+
++ T(YAFFS_TRACE_GC_DETAIL,
++ (TSTR
++ ("Collecting chunk in block %d, %d %d %d " TENDSTR),
++ dev->gc_chunk, tags.obj_id, tags.chunk_id,
++ tags.n_bytes));
+
+- /* Iterate through the objects in each hash entry,
+- * dumping them to the checkpointing stream.
+- */
++ if (object && !yaffs_skip_verification(dev)) {
++ if (tags.chunk_id == 0)
++ matchingChunk = object->hdr_chunk;
++ else if (object->soft_del)
++ matchingChunk = oldChunk; /* Defeat the test */
++ else
++ matchingChunk = yaffs_find_chunk_in_file(object, tags.chunk_id, NULL);
+
+- for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+- ylist_for_each(lh, &dev->objectBucket[i].list) {
+- if (lh) {
+- obj = ylist_entry(lh, yaffs_Object, hashLink);
+- if (!obj->deferedFree) {
+- yaffs_ObjectToCheckpointObject(&cp, obj);
+- cp.structType = sizeof(cp);
+-
+- T(YAFFS_TRACE_CHECKPOINT, (
+- TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
+- cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk, (unsigned) obj));
++ if (oldChunk != matchingChunk)
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
++ oldChunk, matchingChunk, tags.obj_id, tags.chunk_id));
+
+- ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++ }
+
+- if (ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+- ok = yaffs_WriteCheckpointTnodes(obj);
++ if (!object) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("page %d in gc has no object: %d %d %d "
++ TENDSTR), oldChunk,
++ tags.obj_id, tags.chunk_id, tags.n_bytes));
+ }
+- }
+- }
+- }
+
+- /* Dump end of list */
+- memset(&cp, 0xFF, sizeof(yaffs_CheckpointObject));
+- cp.structType = sizeof(cp);
++ if (object &&
++ object->deleted &&
++ object->soft_del &&
++ tags.chunk_id != 0) {
++ /* Data chunk in a soft deleted file, throw it away
++ * It's a soft deleted data chunk,
++ * No need to copy this, just forget about it and
++ * fix up the object.
++ */
++
++ /* Free chunks already includes softdeleted chunks.
++ * How ever this chunk is going to soon be really deleted
++ * which will increment free chunks.
++ * We have to decrement free chunks so this works out properly.
++ */
++ dev->n_free_chunks--;
++ bi->soft_del_pages--;
++
++ object->n_data_chunks--;
+
+- if (ok)
+- ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (object->n_data_chunks <= 0) {
++ /* remeber to clean up the object */
++ dev->gc_cleanup_list[dev->n_clean_ups] =
++ tags.obj_id;
++ dev->n_clean_ups++;
++ }
++ mark_flash = 0;
++ } else if (0) {
++ /* Todo object && object->deleted && object->n_data_chunks == 0 */
++ /* Deleted object header with no data chunks.
++ * Can be discarded and the file deleted.
++ */
++ object->hdr_chunk = 0;
++ yaffs_free_tnode(object->my_dev,
++ object->variant.
++ file_variant.top);
++ object->variant.file_variant.top = NULL;
++ yaffs_generic_obj_del(object);
+
+- return ok ? 1 : 0;
+-}
++ } else if (object) {
++ /* It's either a data chunk in a live file or
++ * an ObjectHeader, so we're interested in it.
++ * NB Need to keep the ObjectHeaders of deleted files
++ * until the whole file has been deleted off
++ */
++ tags.serial_number++;
+
+-static int yaffs_ReadCheckpointObjects(yaffs_Device *dev)
+-{
+- yaffs_Object *obj;
+- yaffs_CheckpointObject cp;
+- int ok = 1;
+- int done = 0;
+- yaffs_Object *hardList = NULL;
++ dev->n_gc_copies++;
+
+- while (ok && !done) {
+- ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
+- if (cp.structType != sizeof(cp)) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR),
+- cp.structType, sizeof(cp), ok));
+- ok = 0;
+- }
+-
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
+- cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk));
+-
+- if (ok && cp.objectId == ~0)
+- done = 1;
+- else if (ok) {
+- obj = yaffs_FindOrCreateObjectByNumber(dev, cp.objectId, cp.variantType);
+- if (obj) {
+- ok = yaffs_CheckpointObjectToObject(obj, &cp);
+- if (!ok)
+- break;
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
+- ok = yaffs_ReadCheckpointTnodes(obj);
+- } else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+- obj->hardLinks.next =
+- (struct ylist_head *) hardList;
+- hardList = obj;
+- }
+- } else
+- ok = 0;
+- }
+- }
++ if (tags.chunk_id == 0) {
++ /* It is an object Id,
++ * We need to nuke the shrinkheader flags first
++ * Also need to clean up shadowing.
++ * We no longer want the shrinkHeader flag since its work is done
++ * and if it is left in place it will mess up scanning.
++ */
+
+- if (ok)
+- yaffs_HardlinkFixup(dev, hardList);
++ yaffs_obj_header *oh;
++ oh = (yaffs_obj_header *)buffer;
+
+- return ok ? 1 : 0;
+-}
++ oh->is_shrink = 0;
++ tags.extra_is_shrink = 0;
+
+-static int yaffs_WriteCheckpointSum(yaffs_Device *dev)
+-{
+- __u32 checkpointSum;
+- int ok;
++ oh->shadows_obj = 0;
++ oh->inband_shadowed_obj_id = 0;
++ tags.extra_shadows = 0;
++
++ /* Update file size */
++ if(object->variant_type == YAFFS_OBJECT_TYPE_FILE){
++ oh->file_size = object->variant.file_variant.file_size;
++ tags.extra_length = oh->file_size;
++ }
++
++ yaffs_verify_oh(object, oh, &tags, 1);
++ newChunk =
++ yaffs_write_new_chunk(dev,(__u8 *) oh, &tags, 1);
++ } else
++ newChunk =
++ yaffs_write_new_chunk(dev, buffer, &tags, 1);
++
++ if (newChunk < 0) {
++ retVal = YAFFS_FAIL;
++ } else {
+
+- yaffs_GetCheckpointSum(dev, &checkpointSum);
++ /* Ok, now fix up the Tnodes etc. */
+
+- ok = (yaffs_CheckpointWrite(dev, &checkpointSum, sizeof(checkpointSum)) == sizeof(checkpointSum));
++ if (tags.chunk_id == 0) {
++ /* It's a header */
++ object->hdr_chunk = newChunk;
++ object->serial = tags.serial_number;
++ } else {
++ /* It's a data chunk */
++ int ok;
++ ok = yaffs_put_chunk_in_file
++ (object,
++ tags.chunk_id,
++ newChunk, 0);
++ }
++ }
++ }
+
+- if (!ok)
+- return 0;
++ if (retVal == YAFFS_OK)
++ yaffs_chunk_del(dev, oldChunk, mark_flash, __LINE__);
+
+- return 1;
+-}
++ }
++ }
+
+-static int yaffs_ReadCheckpointSum(yaffs_Device *dev)
+-{
+- __u32 checkpointSum0;
+- __u32 checkpointSum1;
+- int ok;
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+- yaffs_GetCheckpointSum(dev, &checkpointSum0);
+
+- ok = (yaffs_CheckpointRead(dev, &checkpointSum1, sizeof(checkpointSum1)) == sizeof(checkpointSum1));
+
+- if (!ok)
+- return 0;
++ }
+
+- if (checkpointSum0 != checkpointSum1)
+- return 0;
++ yaffs_verify_collected_blk(dev, bi, block);
+
+- return 1;
+-}
+
+
+-static int yaffs_WriteCheckpointData(yaffs_Device *dev)
+-{
+- int ok = 1;
++ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++ /*
++ * The gc did not complete. Set block state back to FULL
++ * because checkpointing does not restore gc.
++ */
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ } else {
++ /* The gc completed. */
++ /* Do any required cleanups */
++ for (i = 0; i < dev->n_clean_ups; i++) {
++ /* Time to delete the file too */
++ object =
++ yaffs_find_by_number(dev,
++ dev->gc_cleanup_list[i]);
++ if (object) {
++ yaffs_free_tnode(dev,
++ object->variant.file_variant.
++ top);
++ object->variant.file_variant.top = NULL;
++ T(YAFFS_TRACE_GC,
++ (TSTR
++ ("yaffs: About to finally delete object %d"
++ TENDSTR), object->obj_id));
++ yaffs_generic_obj_del(object);
++ object->my_dev->n_deleted_files--;
++ }
+
+- if (dev->skipCheckpointWrite || !dev->isYaffs2) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR)));
+- ok = 0;
+- }
++ }
+
+- if (ok)
+- ok = yaffs_CheckpointOpen(dev, 1);
+
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
+- ok = yaffs_WriteCheckpointValidityMarker(dev, 1);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint device" TENDSTR)));
+- ok = yaffs_WriteCheckpointDevice(dev);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint objects" TENDSTR)));
+- ok = yaffs_WriteCheckpointObjects(dev);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
+- ok = yaffs_WriteCheckpointValidityMarker(dev, 0);
++ chunksAfter = yaffs_get_erased_chunks(dev);
++ if (chunksBefore >= chunksAfter) {
++ T(YAFFS_TRACE_GC,
++ (TSTR
++ ("gc did not increase free chunks before %d after %d"
++ TENDSTR), chunksBefore, chunksAfter));
++ }
++ dev->gc_block = 0;
++ dev->gc_chunk = 0;
++ dev->n_clean_ups = 0;
+ }
+
+- if (ok)
+- ok = yaffs_WriteCheckpointSum(dev);
+-
+- if (!yaffs_CheckpointClose(dev))
+- ok = 0;
+-
+- if (ok)
+- dev->isCheckpointed = 1;
+- else
+- dev->isCheckpointed = 0;
++ dev->gc_disable = 0;
+
+- return dev->isCheckpointed;
++ return retVal;
+ }
+
+-static int yaffs_ReadCheckpointData(yaffs_Device *dev)
++/*
++ * FindBlockForgarbageCollection is used to select the dirtiest block (or close enough)
++ * for garbage collection.
++ */
++
++static unsigned yaffs_find_gc_block(yaffs_dev_t *dev,
++ int aggressive,
++ int background)
+ {
+- int ok = 1;
++ int i;
++ int iterations;
++ unsigned selected = 0;
++ int prioritised = 0;
++ int prioritisedExists = 0;
++ yaffs_block_info_t *bi;
++ int threshold;
+
+- if (dev->skipCheckpointRead || !dev->isYaffs2) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR)));
+- ok = 0;
+- }
++ /* First let's see if we need to grab a prioritised block */
++ if (dev->has_pending_prioritised_gc && !aggressive) {
++ dev->gc_dirtiest = 0;
++ bi = dev->block_info;
++ for (i = dev->internal_start_block;
++ i <= dev->internal_end_block && !selected;
++ i++) {
++
++ if (bi->gc_prioritise) {
++ prioritisedExists = 1;
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++ yaffs_block_ok_for_gc(dev, bi)) {
++ selected = i;
++ prioritised = 1;
++ }
++ }
++ bi++;
++ }
+
+- if (ok)
+- ok = yaffs_CheckpointOpen(dev, 0); /* open for read */
++ /*
++ * If there is a prioritised block and none was selected then
++ * this happened because there is at least one old dirty block gumming
++ * up the works. Let's gc the oldest dirty block.
++ */
+
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
+- ok = yaffs_ReadCheckpointValidityMarker(dev, 1);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint device" TENDSTR)));
+- ok = yaffs_ReadCheckpointDevice(dev);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint objects" TENDSTR)));
+- ok = yaffs_ReadCheckpointObjects(dev);
+- }
+- if (ok) {
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
+- ok = yaffs_ReadCheckpointValidityMarker(dev, 0);
+- }
++ if(prioritisedExists &&
++ !selected &&
++ dev->oldest_dirty_block > 0)
++ selected = dev->oldest_dirty_block;
+
+- if (ok) {
+- ok = yaffs_ReadCheckpointSum(dev);
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint checksum %d" TENDSTR), ok));
++ if (!prioritisedExists) /* None found, so we can clear this */
++ dev->has_pending_prioritised_gc = 0;
+ }
+
+- if (!yaffs_CheckpointClose(dev))
+- ok = 0;
+-
+- if (ok)
+- dev->isCheckpointed = 1;
+- else
+- dev->isCheckpointed = 0;
+-
+- return ok ? 1 : 0;
++ /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
++ * search harder.
++ * else (we're doing a leasurely gc), then we only bother to do this if the
++ * block has only a few pages in use.
++ */
+
+-}
++ if (!selected){
++ int pagesUsed;
++ int nBlocks = dev->internal_end_block - dev->internal_start_block + 1;
++ if (aggressive){
++ threshold = dev->param.chunks_per_block;
++ iterations = nBlocks;
++ } else {
++ int maxThreshold;
+
+-static void yaffs_InvalidateCheckpoint(yaffs_Device *dev)
+-{
+- if (dev->isCheckpointed ||
+- dev->blocksInCheckpoint > 0) {
+- dev->isCheckpointed = 0;
+- yaffs_CheckpointInvalidateStream(dev);
+- if (dev->superBlock && dev->markSuperBlockDirty)
+- dev->markSuperBlockDirty(dev->superBlock);
+- }
+-}
++ if(background)
++ maxThreshold = dev->param.chunks_per_block/2;
++ else
++ maxThreshold = dev->param.chunks_per_block/8;
+
++ if(maxThreshold < YAFFS_GC_PASSIVE_THRESHOLD)
++ maxThreshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+-int yaffs_CheckpointSave(yaffs_Device *dev)
+-{
++ threshold = background ?
++ (dev->gc_not_done + 2) * 2 : 0;
++ if(threshold <YAFFS_GC_PASSIVE_THRESHOLD)
++ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
++ if(threshold > maxThreshold)
++ threshold = maxThreshold;
+
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("save entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++ iterations = nBlocks / 16 + 1;
++ if (iterations > 100)
++ iterations = 100;
++ }
+
+- yaffs_VerifyObjects(dev);
+- yaffs_VerifyBlocks(dev);
+- yaffs_VerifyFreeChunks(dev);
++ for (i = 0;
++ i < iterations &&
++ (dev->gc_dirtiest < 1 ||
++ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
++ i++) {
++ dev->gc_block_finder++;
++ if (dev->gc_block_finder < dev->internal_start_block ||
++ dev->gc_block_finder > dev->internal_end_block)
++ dev->gc_block_finder = dev->internal_start_block;
+
+- if (!dev->isCheckpointed) {
+- yaffs_InvalidateCheckpoint(dev);
+- yaffs_WriteCheckpointData(dev);
+- }
++ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+- T(YAFFS_TRACE_ALWAYS, (TSTR("save exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++ pagesUsed = bi->pages_in_use - bi->soft_del_pages;
+
+- return dev->isCheckpointed;
+-}
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++ pagesUsed < dev->param.chunks_per_block &&
++ (dev->gc_dirtiest < 1 || pagesUsed < dev->gc_pages_in_use) &&
++ yaffs_block_ok_for_gc(dev, bi)) {
++ dev->gc_dirtiest = dev->gc_block_finder;
++ dev->gc_pages_in_use = pagesUsed;
++ }
++ }
+
+-int yaffs_CheckpointRestore(yaffs_Device *dev)
+-{
+- int retval;
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++ if(dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
++ selected = dev->gc_dirtiest;
++ }
+
+- retval = yaffs_ReadCheckpointData(dev);
++ /*
++ * If nothing has been selected for a while, try selecting the oldest dirty
++ * because that's gumming up the works.
++ */
+
+- if (dev->isCheckpointed) {
+- yaffs_VerifyObjects(dev);
+- yaffs_VerifyBlocks(dev);
+- yaffs_VerifyFreeChunks(dev);
++ if(!selected && dev->param.is_yaffs2 &&
++ dev->gc_not_done >= ( background ? 10 : 20)){
++ yaffs2_find_oldest_dirty_seq(dev);
++ if(dev->oldest_dirty_block > 0) {
++ selected = dev->oldest_dirty_block;
++ dev->gc_dirtiest = selected;
++ dev->oldest_dirty_gc_count++;
++ bi = yaffs_get_block_info(dev, selected);
++ dev->gc_pages_in_use = bi->pages_in_use - bi->soft_del_pages;
++ } else
++ dev->gc_not_done = 0;
+ }
+
+- T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++ if(selected){
++ T(YAFFS_TRACE_GC,
++ (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR),
++ selected,
++ dev->param.chunks_per_block - dev->gc_pages_in_use,
++ prioritised));
++
++ dev->n_gc_blocks++;
++ if(background)
++ dev->bg_gcs++;
++
++ dev->gc_dirtiest = 0;
++ dev->gc_pages_in_use = 0;
++ dev->gc_not_done = 0;
++ if(dev->refresh_skip > 0)
++ dev->refresh_skip--;
++ } else{
++ dev->gc_not_done++;
++ T(YAFFS_TRACE_GC,
++ (TSTR("GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s" TENDSTR),
++ dev->gc_block_finder, dev->gc_not_done,
++ threshold,
++ dev->gc_dirtiest, dev->gc_pages_in_use,
++ dev->oldest_dirty_block,
++ background ? " bg" : ""));
++ }
+
+- return retval;
++ return selected;
+ }
+
+-/*--------------------- File read/write ------------------------
+- * Read and write have very similar structures.
+- * In general the read/write has three parts to it
+- * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+- * Some complete chunks
+- * An incomplete chunk to end off with
++/* New garbage collector
++ * If we're very low on erased blocks then we do aggressive garbage collection
++ * otherwise we do "leasurely" garbage collection.
++ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
++ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+ *
+- * Curve-balls: the first chunk might also be the last chunk.
++ * The idea is to help clear out space in a more spread-out manner.
++ * Dunno if it really does anything useful.
+ */
+-
+-int yaffs_ReadDataFromFile(yaffs_Object *in, __u8 *buffer, loff_t offset,
+- int nBytes)
++static int yaffs_check_gc(yaffs_dev_t *dev, int background)
+ {
++ int aggressive = 0;
++ int gcOk = YAFFS_OK;
++ int maxTries = 0;
++ int minErased;
++ int erasedChunks;
++ int checkpointBlockAdjust;
+
+- int chunk;
+- __u32 start;
+- int nToCopy;
+- int n = nBytes;
+- int nDone = 0;
+- yaffs_ChunkCache *cache;
+-
+- yaffs_Device *dev;
+-
+- dev = in->myDev;
+-
+- while (n > 0) {
+- /* chunk = offset / dev->nDataBytesPerChunk + 1; */
+- /* start = offset % dev->nDataBytesPerChunk; */
+- yaffs_AddrToChunk(dev, offset, &chunk, &start);
+- chunk++;
+-
+- /* OK now check for the curveball where the start and end are in
+- * the same chunk.
+- */
+- if ((start + n) < dev->nDataBytesPerChunk)
+- nToCopy = n;
+- else
+- nToCopy = dev->nDataBytesPerChunk - start;
+-
+- cache = yaffs_FindChunkCache(in, chunk);
+-
+- /* If the chunk is already in the cache or it is less than a whole chunk
+- * or we're using inband tags then use the cache (if there is caching)
+- * else bypass the cache.
+- */
+- if (cache || nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
+- if (dev->nShortOpCaches > 0) {
+-
+- /* If we can't find the data in the cache, then load it up. */
++ if(dev->param.gc_control &&
++ (dev->param.gc_control(dev) & 1) == 0)
++ return YAFFS_OK;
+
+- if (!cache) {
+- cache = yaffs_GrabChunkCache(in->myDev);
+- cache->object = in;
+- cache->chunkId = chunk;
+- cache->dirty = 0;
+- cache->locked = 0;
+- yaffs_ReadChunkDataFromObject(in, chunk,
+- cache->
+- data);
+- cache->nBytes = 0;
+- }
++ if (dev->gc_disable) {
++ /* Bail out so we don't get recursive gc */
++ return YAFFS_OK;
++ }
+
+- yaffs_UseChunkCache(dev, cache, 0);
++ /* This loop should pass the first time.
++ * We'll only see looping here if the collection does not increase space.
++ */
+
+- cache->locked = 1;
++ do {
++ maxTries++;
+
++ checkpointBlockAdjust = yaffs_calc_checkpt_blocks_required(dev);
+
+- memcpy(buffer, &cache->data[start], nToCopy);
++ minErased = dev->param.n_reserved_blocks + checkpointBlockAdjust + 1;
++ erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+- cache->locked = 0;
+- } else {
+- /* Read into the local buffer then copy..*/
++ /* If we need a block soon then do aggressive gc.*/
++ if (dev->n_erased_blocks < minErased)
++ aggressive = 1;
++ else {
++ if(!background && erasedChunks > (dev->n_free_chunks / 4))
++ break;
+
+- __u8 *localBuffer =
+- yaffs_GetTempBuffer(dev, __LINE__);
+- yaffs_ReadChunkDataFromObject(in, chunk,
+- localBuffer);
++ if(dev->gc_skip > 20)
++ dev->gc_skip = 20;
++ if(erasedChunks < dev->n_free_chunks/2 ||
++ dev->gc_skip < 1 ||
++ background)
++ aggressive = 0;
++ else {
++ dev->gc_skip--;
++ break;
++ }
++ }
+
+- memcpy(buffer, &localBuffer[start], nToCopy);
++ dev->gc_skip = 5;
+
++ /* If we don't already have a block being gc'd then see if we should start another */
+
+- yaffs_ReleaseTempBuffer(dev, localBuffer,
+- __LINE__);
+- }
++ if (dev->gc_block < 1 && !aggressive) {
++ dev->gc_block = yaffs2_find_refresh_block(dev);
++ dev->gc_chunk = 0;
++ dev->n_clean_ups=0;
++ }
++ if (dev->gc_block < 1) {
++ dev->gc_block = yaffs_find_gc_block(dev, aggressive, background);
++ dev->gc_chunk = 0;
++ dev->n_clean_ups=0;
++ }
+
+- } else {
++ if (dev->gc_block > 0) {
++ dev->all_gcs++;
++ if (!aggressive)
++ dev->passive_gc_count++;
+
+- /* A full chunk. Read directly into the supplied buffer. */
+- yaffs_ReadChunkDataFromObject(in, chunk, buffer);
++ T(YAFFS_TRACE_GC,
++ (TSTR
++ ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
++ dev->n_erased_blocks, aggressive));
+
++ gcOk = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+- n -= nToCopy;
+- offset += nToCopy;
+- buffer += nToCopy;
+- nDone += nToCopy;
+-
+- }
++ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) && dev->gc_block > 0) {
++ T(YAFFS_TRACE_GC,
++ (TSTR
++ ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
++ TENDSTR), dev->n_erased_blocks, maxTries, dev->gc_block));
++ }
++ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
++ (dev->gc_block > 0) &&
++ (maxTries < 2));
+
+- return nDone;
++ return aggressive ? gcOk : YAFFS_OK;
+ }
+
+-int yaffs_WriteDataToFile(yaffs_Object *in, const __u8 *buffer, loff_t offset,
+- int nBytes, int writeThrough)
++/*
++ * yaffs_bg_gc()
++ * Garbage collects. Intended to be called from a background thread.
++ * Returns non-zero if at least half the free chunks are erased.
++ */
++int yaffs_bg_gc(yaffs_dev_t *dev, unsigned urgency)
+ {
++ int erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+- int chunk;
+- __u32 start;
+- int nToCopy;
+- int n = nBytes;
+- int nDone = 0;
+- int nToWriteBack;
+- int startOfWrite = offset;
+- int chunkWritten = 0;
+- __u32 nBytesRead;
+- __u32 chunkStart;
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Background gc %u" TENDSTR),urgency));
+
+- yaffs_Device *dev;
++ yaffs_check_gc(dev, 1);
++ return erasedChunks > dev->n_free_chunks/2;
++}
+
+- dev = in->myDev;
++/*------------------------- TAGS --------------------------------*/
+
+- while (n > 0 && chunkWritten >= 0) {
+- /* chunk = offset / dev->nDataBytesPerChunk + 1; */
+- /* start = offset % dev->nDataBytesPerChunk; */
+- yaffs_AddrToChunk(dev, offset, &chunk, &start);
++static int yaffs_tags_match(const yaffs_ext_tags *tags, int obj_id,
++ int chunkInObject)
++{
++ return (tags->chunk_id == chunkInObject &&
++ tags->obj_id == obj_id && !tags->is_deleted) ? 1 : 0;
+
+- if (chunk * dev->nDataBytesPerChunk + start != offset ||
+- start >= dev->nDataBytesPerChunk) {
+- T(YAFFS_TRACE_ERROR, (
+- TSTR("AddrToChunk of offset %d gives chunk %d start %d"
+- TENDSTR),
+- (int)offset, chunk, start));
+- }
+- chunk++;
++}
+
+- /* OK now check for the curveball where the start and end are in
+- * the same chunk.
+- */
+
+- if ((start + n) < dev->nDataBytesPerChunk) {
+- nToCopy = n;
++/*-------------------- Data file manipulation -----------------*/
+
+- /* Now folks, to calculate how many bytes to write back....
+- * If we're overwriting and not writing to then end of file then
+- * we need to write back as much as was there before.
+- */
++static int yaffs_find_chunk_in_file(yaffs_obj_t *in, int inode_chunk,
++ yaffs_ext_tags *tags)
++{
++ /*Get the Tnode, then get the level 0 offset chunk offset */
++ yaffs_tnode_t *tn;
++ int theChunk = -1;
++ yaffs_ext_tags localTags;
++ int retVal = -1;
+
+- chunkStart = ((chunk - 1) * dev->nDataBytesPerChunk);
++ yaffs_dev_t *dev = in->my_dev;
+
+- if (chunkStart > in->variant.fileVariant.fileSize)
+- nBytesRead = 0; /* Past end of file */
+- else
+- nBytesRead = in->variant.fileVariant.fileSize - chunkStart;
++ if (!tags) {
++ /* Passed a NULL, so use our own tags space */
++ tags = &localTags;
++ }
+
+- if (nBytesRead > dev->nDataBytesPerChunk)
+- nBytesRead = dev->nDataBytesPerChunk;
++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+- nToWriteBack =
+- (nBytesRead >
+- (start + n)) ? nBytesRead : (start + n);
++ if (tn) {
++ theChunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+- if (nToWriteBack < 0 || nToWriteBack > dev->nDataBytesPerChunk)
+- YBUG();
++ retVal =
++ yaffs_find_chunk_in_group(dev, theChunk, tags, in->obj_id,
++ inode_chunk);
++ }
++ return retVal;
++}
+
+- } else {
+- nToCopy = dev->nDataBytesPerChunk - start;
+- nToWriteBack = dev->nDataBytesPerChunk;
+- }
++static int yaffs_find_del_file_chunk(yaffs_obj_t *in, int inode_chunk,
++ yaffs_ext_tags *tags)
++{
++ /* Get the Tnode, then get the level 0 offset chunk offset */
++ yaffs_tnode_t *tn;
++ int theChunk = -1;
++ yaffs_ext_tags localTags;
+
+- if (nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
+- /* An incomplete start or end chunk (or maybe both start and end chunk),
+- * or we're using inband tags, so we want to use the cache buffers.
+- */
+- if (dev->nShortOpCaches > 0) {
+- yaffs_ChunkCache *cache;
+- /* If we can't find the data in the cache, then load the cache */
+- cache = yaffs_FindChunkCache(in, chunk);
++ yaffs_dev_t *dev = in->my_dev;
++ int retVal = -1;
+
+- if (!cache
+- && yaffs_CheckSpaceForAllocation(in->
+- myDev)) {
+- cache = yaffs_GrabChunkCache(in->myDev);
+- cache->object = in;
+- cache->chunkId = chunk;
+- cache->dirty = 0;
+- cache->locked = 0;
+- yaffs_ReadChunkDataFromObject(in, chunk,
+- cache->
+- data);
+- } else if (cache &&
+- !cache->dirty &&
+- !yaffs_CheckSpaceForAllocation(in->myDev)) {
+- /* Drop the cache if it was a read cache item and
+- * no space check has been made for it.
+- */
+- cache = NULL;
+- }
++ if (!tags) {
++ /* Passed a NULL, so use our own tags space */
++ tags = &localTags;
++ }
+
+- if (cache) {
+- yaffs_UseChunkCache(dev, cache, 1);
+- cache->locked = 1;
++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
++ if (tn) {
+
+- memcpy(&cache->data[start], buffer,
+- nToCopy);
++ theChunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
++ retVal =
++ yaffs_find_chunk_in_group(dev, theChunk, tags, in->obj_id,
++ inode_chunk);
+
+- cache->locked = 0;
+- cache->nBytes = nToWriteBack;
++ /* Delete the entry in the filestructure (if found) */
++ if (retVal != -1)
++ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
++ }
+
+- if (writeThrough) {
+- chunkWritten =
+- yaffs_WriteChunkDataToObject
+- (cache->object,
+- cache->chunkId,
+- cache->data, cache->nBytes,
+- 1);
+- cache->dirty = 0;
+- }
++ return retVal;
++}
+
+- } else {
+- chunkWritten = -1; /* fail the write */
+- }
+- } else {
+- /* An incomplete start or end chunk (or maybe both start and end chunk)
+- * Read into the local buffer then copy, then copy over and write back.
+- */
+
+- __u8 *localBuffer =
+- yaffs_GetTempBuffer(dev, __LINE__);
++int yaffs_put_chunk_in_file(yaffs_obj_t *in, int inode_chunk,
++ int nand_chunk, int in_scan)
++{
++ /* NB in_scan is zero unless scanning.
++ * For forward scanning, in_scan is > 0;
++ * for backward scanning in_scan is < 0
++ *
++ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
++ */
+
+- yaffs_ReadChunkDataFromObject(in, chunk,
+- localBuffer);
++ yaffs_tnode_t *tn;
++ yaffs_dev_t *dev = in->my_dev;
++ int existingChunk;
++ yaffs_ext_tags existingTags;
++ yaffs_ext_tags newTags;
++ unsigned existingSerial, newSerial;
+
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
++ /* Just ignore an attempt at putting a chunk into a non-file during scanning
++ * If it is not during Scanning then something went wrong!
++ */
++ if (!in_scan) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy:attempt to put data chunk into a non-file"
++ TENDSTR)));
++ YBUG();
++ }
+
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ return YAFFS_OK;
++ }
+
+- memcpy(&localBuffer[start], buffer, nToCopy);
++ tn = yaffs_add_find_tnode_0(dev,
++ &in->variant.file_variant,
++ inode_chunk,
++ NULL);
++ if (!tn)
++ return YAFFS_FAIL;
++
++ if(!nand_chunk)
++ /* Dummy insert, bail now */
++ return YAFFS_OK;
+
+- chunkWritten =
+- yaffs_WriteChunkDataToObject(in, chunk,
+- localBuffer,
+- nToWriteBack,
+- 0);
++ existingChunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+- yaffs_ReleaseTempBuffer(dev, localBuffer,
+- __LINE__);
++ if (in_scan != 0) {
++ /* If we're scanning then we need to test for duplicates
++ * NB This does not need to be efficient since it should only ever
++ * happen when the power fails during a write, then only one
++ * chunk should ever be affected.
++ *
++ * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
++ * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
++ */
++
++ if (existingChunk > 0) {
++ /* NB Right now existing chunk will not be real chunk_id if the chunk group size > 1
++ * thus we have to do a FindChunkInFile to get the real chunk id.
++ *
++ * We have a duplicate now we need to decide which one to use:
++ *
++ * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
++ * Forward scanning YAFFS2: The new one is what we use, dump the old one.
++ * YAFFS1: Get both sets of tags and compare serial numbers.
++ */
++
++ if (in_scan > 0) {
++ /* Only do this for forward scanning */
++ yaffs_rd_chunk_tags_nand(dev,
++ nand_chunk,
++ NULL, &newTags);
+
++ /* Do a proper find */
++ existingChunk =
++ yaffs_find_chunk_in_file(in, inode_chunk,
++ &existingTags);
+ }
+
+- } else {
+- /* A full chunk. Write directly from the supplied buffer. */
++ if (existingChunk <= 0) {
++ /*Hoosterman - how did this happen? */
+
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy: existing chunk < 0 in scan"
++ TENDSTR)));
+
++ }
+
+- chunkWritten =
+- yaffs_WriteChunkDataToObject(in, chunk, buffer,
+- dev->nDataBytesPerChunk,
+- 0);
++ /* NB The deleted flags should be false, otherwise the chunks will
++ * not be loaded during a scan
++ */
+
+- /* Since we've overwritten the cached data, we better invalidate it. */
+- yaffs_InvalidateChunkCache(in, chunk);
+- }
++ if (in_scan > 0) {
++ newSerial = newTags.serial_number;
++ existingSerial = existingTags.serial_number;
++ }
+
+- if (chunkWritten >= 0) {
+- n -= nToCopy;
+- offset += nToCopy;
+- buffer += nToCopy;
+- nDone += nToCopy;
++ if ((in_scan > 0) &&
++ (existingChunk <= 0 ||
++ ((existingSerial + 1) & 3) == newSerial)) {
++ /* Forward scanning.
++ * Use new
++ * Delete the old one and drop through to update the tnode
++ */
++ yaffs_chunk_del(dev, existingChunk, 1,
++ __LINE__);
++ } else {
++ /* Backward scanning or we want to use the existing one
++ * Use existing.
++ * Delete the new one and return early so that the tnode isn't changed
++ */
++ yaffs_chunk_del(dev, nand_chunk, 1,
++ __LINE__);
++ return YAFFS_OK;
++ }
+ }
+
+ }
+
+- /* Update file object */
+-
+- if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize)
+- in->variant.fileVariant.fileSize = (startOfWrite + nDone);
++ if (existingChunk == 0)
++ in->n_data_chunks++;
+
+- in->dirty = 1;
++ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+- return nDone;
++ return YAFFS_OK;
+ }
+
++static int yaffs_rd_data_obj(yaffs_obj_t *in, int inode_chunk,
++ __u8 *buffer)
++{
++ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+-/* ---------------------- File resizing stuff ------------------ */
++ if (nand_chunk >= 0)
++ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
++ buffer, NULL);
++ else {
++ T(YAFFS_TRACE_NANDACCESS,
++ (TSTR("Chunk %d not found zero instead" TENDSTR),
++ nand_chunk));
++ /* get sane (zero) data if you read a hole */
++ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
++ return 0;
++ }
++
++}
+
+-static void yaffs_PruneResizedChunks(yaffs_Object *in, int newSize)
++void yaffs_chunk_del(yaffs_dev_t *dev, int chunk_id, int mark_flash, int lyn)
+ {
++ int block;
++ int page;
++ yaffs_ext_tags tags;
++ yaffs_block_info_t *bi;
+
+- yaffs_Device *dev = in->myDev;
+- int oldFileSize = in->variant.fileVariant.fileSize;
++ if (chunk_id <= 0)
++ return;
+
+- int lastDel = 1 + (oldFileSize - 1) / dev->nDataBytesPerChunk;
++ dev->n_deletions++;
++ block = chunk_id / dev->param.chunks_per_block;
++ page = chunk_id % dev->param.chunks_per_block;
+
+- int startDel = 1 + (newSize + dev->nDataBytesPerChunk - 1) /
+- dev->nDataBytesPerChunk;
+- int i;
+- int chunkId;
+
+- /* Delete backwards so that we don't end up with holes if
+- * power is lost part-way through the operation.
++ if (!yaffs_check_chunk_bit(dev, block, page))
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Deleting invalid chunk %d"TENDSTR),
++ chunk_id));
++
++ bi = yaffs_get_block_info(dev, block);
++
++ yaffs2_update_oldest_dirty_seq(dev, block, bi);
++
++ T(YAFFS_TRACE_DELETION,
++ (TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunk_id));
++
++ if (!dev->param.is_yaffs2 && mark_flash &&
++ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
++
++ yaffs_init_tags(&tags);
++
++ tags.is_deleted = 1;
++
++ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
++ yaffs_handle_chunk_update(dev, chunk_id, &tags);
++ } else {
++ dev->n_unmarked_deletions++;
++ }
++
++ /* Pull out of the management area.
++ * If the whole block became dirty, this will kick off an erasure.
+ */
+- for (i = lastDel; i >= startDel; i--) {
+- /* NB this could be optimised somewhat,
+- * eg. could retrieve the tags and write them without
+- * using yaffs_DeleteChunk
+- */
++ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
++ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
++ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++ dev->n_free_chunks++;
+
+- chunkId = yaffs_FindAndDeleteChunkInFile(in, i, NULL);
+- if (chunkId > 0) {
+- if (chunkId <
+- (dev->internalStartBlock * dev->nChunksPerBlock)
+- || chunkId >=
+- ((dev->internalEndBlock +
+- 1) * dev->nChunksPerBlock)) {
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("Found daft chunkId %d for %d" TENDSTR),
+- chunkId, i));
+- } else {
+- in->nDataChunks--;
+- yaffs_DeleteChunk(dev, chunkId, 1, __LINE__);
+- }
++ yaffs_clear_chunk_bit(dev, block, page);
++
++ bi->pages_in_use--;
++
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
++ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++ yaffs_block_became_dirty(dev, block);
+ }
++
+ }
+
+ }
+
+-int yaffs_ResizeFile(yaffs_Object *in, loff_t newSize)
++static int yaffs_wr_data_obj(yaffs_obj_t *in, int inode_chunk,
++ const __u8 *buffer, int n_bytes,
++ int useReserve)
+ {
++ /* Find old chunk Need to do this to get serial number
++ * Write new one and patch into tree.
++ * Invalidate old tags.
++ */
+
+- int oldFileSize = in->variant.fileVariant.fileSize;
+- __u32 newSizeOfPartialChunk;
+- int newFullChunks;
++ int prevChunkId;
++ yaffs_ext_tags prevTags;
+
+- yaffs_Device *dev = in->myDev;
++ int newChunkId;
++ yaffs_ext_tags newTags;
+
+- yaffs_AddrToChunk(dev, newSize, &newFullChunks, &newSizeOfPartialChunk);
++ yaffs_dev_t *dev = in->my_dev;
+
+- yaffs_FlushFilesChunkCache(in);
+- yaffs_InvalidateWholeChunkCache(in);
++ yaffs_check_gc(dev,0);
+
+- yaffs_CheckGarbageCollection(dev);
++ /* Get the previous chunk at this location in the file if it exists.
++ * If it does not exist then put a zero into the tree. This creates
++ * the tnode now, rather than later when it is harder to clean up.
++ */
++ prevChunkId = yaffs_find_chunk_in_file(in, inode_chunk, &prevTags);
++ if(prevChunkId < 1 &&
++ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
++ return 0;
+
+- if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
+- return YAFFS_FAIL;
++ /* Set up new tags */
++ yaffs_init_tags(&newTags);
+
+- if (newSize == oldFileSize)
+- return YAFFS_OK;
++ newTags.chunk_id = inode_chunk;
++ newTags.obj_id = in->obj_id;
++ newTags.serial_number =
++ (prevChunkId > 0) ? prevTags.serial_number + 1 : 1;
++ newTags.n_bytes = n_bytes;
+
+- if (newSize < oldFileSize) {
++ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), n_bytes));
++ YBUG();
++ }
++
++
++ newChunkId =
++ yaffs_write_new_chunk(dev, buffer, &newTags,
++ useReserve);
+
+- yaffs_PruneResizedChunks(in, newSize);
++ if (newChunkId > 0) {
++ yaffs_put_chunk_in_file(in, inode_chunk, newChunkId, 0);
+
+- if (newSizeOfPartialChunk != 0) {
+- int lastChunk = 1 + newFullChunks;
++ if (prevChunkId > 0)
++ yaffs_chunk_del(dev, prevChunkId, 1, __LINE__);
+
+- __u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
++ yaffs_verify_file_sane(in);
++ }
++ return newChunkId;
+
+- /* Got to read and rewrite the last chunk with its new size and zero pad */
+- yaffs_ReadChunkDataFromObject(in, lastChunk,
+- localBuffer);
++}
+
+- memset(localBuffer + newSizeOfPartialChunk, 0,
+- dev->nDataBytesPerChunk - newSizeOfPartialChunk);
++/* UpdateObjectHeader updates the header on NAND for an object.
++ * If name is not NULL, then that new name is used.
++ */
++int yaffs_update_oh(yaffs_obj_t *in, const YCHAR *name, int force,
++ int is_shrink, int shadows, yaffs_xattr_mod *xmod)
++{
+
+- yaffs_WriteChunkDataToObject(in, lastChunk, localBuffer,
+- newSizeOfPartialChunk, 1);
++ yaffs_block_info_t *bi;
+
+- yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
+- }
++ yaffs_dev_t *dev = in->my_dev;
+
+- in->variant.fileVariant.fileSize = newSize;
++ int prevChunkId;
++ int retVal = 0;
++ int result = 0;
+
+- yaffs_PruneFileStructure(dev, &in->variant.fileVariant);
+- } else {
+- /* newsSize > oldFileSize */
+- in->variant.fileVariant.fileSize = newSize;
+- }
++ int newChunkId;
++ yaffs_ext_tags newTags;
++ yaffs_ext_tags oldTags;
++ const YCHAR *alias = NULL;
+
++ __u8 *buffer = NULL;
++ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+
+- /* Write a new object header.
+- * show we've shrunk the file, if need be
+- * Do this only if the file is not in the deleted directories.
+- */
+- if (in->parent &&
+- in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
+- in->parent->objectId != YAFFS_OBJECTID_DELETED)
+- yaffs_UpdateObjectHeader(in, NULL, 0,
+- (newSize < oldFileSize) ? 1 : 0, 0);
++ yaffs_obj_header *oh = NULL;
+
+- return YAFFS_OK;
+-}
++ yaffs_strcpy(old_name, _Y("silly old name"));
+
+-loff_t yaffs_GetFileSize(yaffs_Object *obj)
+-{
+- obj = yaffs_GetEquivalentObject(obj);
+
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- return obj->variant.fileVariant.fileSize;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- return yaffs_strlen(obj->variant.symLinkVariant.alias);
+- default:
+- return 0;
+- }
+-}
++ if (!in->fake ||
++ in == dev->root_dir || /* The root_dir should also be saved */
++ force || xmod) {
+
++ yaffs_check_gc(dev,0);
++ yaffs_check_obj_details_loaded(in);
+
++ buffer = yaffs_get_temp_buffer(in->my_dev, __LINE__);
++ oh = (yaffs_obj_header *) buffer;
++
++ prevChunkId = in->hdr_chunk;
++
++ if (prevChunkId > 0) {
++ result = yaffs_rd_chunk_tags_nand(dev, prevChunkId,
++ buffer, &oldTags);
++
++ yaffs_verify_oh(in, oh, &oldTags, 0);
++
++ memcpy(old_name, oh->name, sizeof(oh->name));
++ memset(buffer, 0xFF, sizeof(yaffs_obj_header));
++ } else
++ memset(buffer, 0xFF, dev->data_bytes_per_chunk);
++
++ oh->type = in->variant_type;
++ oh->yst_mode = in->yst_mode;
++ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+-int yaffs_FlushFile(yaffs_Object *in, int updateTime)
+-{
+- int retVal;
+- if (in->dirty) {
+- yaffs_FlushFilesChunkCache(in);
+- if (updateTime) {
+ #ifdef CONFIG_YAFFS_WINCE
+- yfsd_WinFileTimeNow(in->win_mtime);
++ oh->win_atime[0] = in->win_atime[0];
++ oh->win_ctime[0] = in->win_ctime[0];
++ oh->win_mtime[0] = in->win_mtime[0];
++ oh->win_atime[1] = in->win_atime[1];
++ oh->win_ctime[1] = in->win_ctime[1];
++ oh->win_mtime[1] = in->win_mtime[1];
+ #else
++ oh->yst_uid = in->yst_uid;
++ oh->yst_gid = in->yst_gid;
++ oh->yst_atime = in->yst_atime;
++ oh->yst_mtime = in->yst_mtime;
++ oh->yst_ctime = in->yst_ctime;
++ oh->yst_rdev = in->yst_rdev;
++#endif
++ if (in->parent)
++ oh->parent_obj_id = in->parent->obj_id;
++ else
++ oh->parent_obj_id = 0;
++
++ if (name && *name) {
++ memset(oh->name, 0, sizeof(oh->name));
++ yaffs_load_oh_from_name(dev,oh->name,name);
++ } else if (prevChunkId > 0)
++ memcpy(oh->name, old_name, sizeof(oh->name));
++ else
++ memset(oh->name, 0, sizeof(oh->name));
+
+- in->yst_mtime = Y_CURRENT_TIME;
++ oh->is_shrink = is_shrink;
+
+-#endif
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Should not happen */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ oh->file_size =
++ (oh->parent_obj_id == YAFFS_OBJECTID_DELETED
++ || oh->parent_obj_id ==
++ YAFFS_OBJECTID_UNLINKED) ? 0 : in->variant.
++ file_variant.file_size;
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ oh->equiv_id =
++ in->variant.hardlink_variant.equiv_id;
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ alias = in->variant.symlink_variant.alias;
++ if(!alias)
++ alias = _Y("no alias");
++ yaffs_strncpy(oh->alias,
++ alias,
++ YAFFS_MAX_ALIAS_LENGTH);
++ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++ break;
+ }
+
+- retVal = (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
+- 0) ? YAFFS_OK : YAFFS_FAIL;
+- } else {
+- retVal = YAFFS_OK;
+- }
++ /* process any xattrib modifications */
++ if(xmod)
++ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+- return retVal;
+
+-}
++ /* Tags */
++ yaffs_init_tags(&newTags);
++ in->serial++;
++ newTags.chunk_id = 0;
++ newTags.obj_id = in->obj_id;
++ newTags.serial_number = in->serial;
+
+-static int yaffs_DoGenericObjectDeletion(yaffs_Object *in)
+-{
++ /* Add extra info for file header */
+
+- /* First off, invalidate the file's data in the cache, without flushing. */
+- yaffs_InvalidateWholeChunkCache(in);
++ newTags.extra_available = 1;
++ newTags.extra_parent_id = oh->parent_obj_id;
++ newTags.extra_length = oh->file_size;
++ newTags.extra_is_shrink = oh->is_shrink;
++ newTags.extra_equiv_id = oh->equiv_id;
++ newTags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
++ newTags.extra_obj_type = in->variant_type;
+
+- if (in->myDev->isYaffs2 && (in->parent != in->myDev->deletedDir)) {
+- /* Move to the unlinked directory so we have a record that it was deleted. */
+- yaffs_ChangeObjectName(in, in->myDev->deletedDir, _Y("deleted"), 0, 0);
++ yaffs_verify_oh(in, oh, &newTags, 1);
+
+- }
++ /* Create new chunk in NAND */
++ newChunkId =
++ yaffs_write_new_chunk(dev, buffer, &newTags,
++ (prevChunkId > 0) ? 1 : 0);
+
+- yaffs_RemoveObjectFromDirectory(in);
+- yaffs_DeleteChunk(in->myDev, in->hdrChunk, 1, __LINE__);
+- in->hdrChunk = 0;
++ if (newChunkId >= 0) {
+
+- yaffs_FreeObject(in);
+- return YAFFS_OK;
++ in->hdr_chunk = newChunkId;
+
+-}
++ if (prevChunkId > 0) {
++ yaffs_chunk_del(dev, prevChunkId, 1,
++ __LINE__);
++ }
+
+-/* yaffs_DeleteFile deletes the whole file data
+- * and the inode associated with the file.
+- * It does not delete the links associated with the file.
+- */
+-static int yaffs_UnlinkFileIfNeeded(yaffs_Object *in)
+-{
++ if (!yaffs_obj_cache_dirty(in))
++ in->dirty = 0;
+
+- int retVal;
+- int immediateDeletion = 0;
++ /* If this was a shrink, then mark the block that the chunk lives on */
++ if (is_shrink) {
++ bi = yaffs_get_block_info(in->my_dev,
++ newChunkId / in->my_dev->param.chunks_per_block);
++ bi->has_shrink_hdr = 1;
++ }
+
+-#ifdef __KERNEL__
+- if (!in->myInode)
+- immediateDeletion = 1;
+-#else
+- if (in->inUse <= 0)
+- immediateDeletion = 1;
+-#endif
++ }
++
++ retVal = newChunkId;
+
+- if (immediateDeletion) {
+- retVal =
+- yaffs_ChangeObjectName(in, in->myDev->deletedDir,
+- _Y("deleted"), 0, 0);
+- T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
+- in->objectId));
+- in->deleted = 1;
+- in->myDev->nDeletedFiles++;
+- if (1 || in->myDev->isYaffs2)
+- yaffs_ResizeFile(in, 0);
+- yaffs_SoftDeleteFile(in);
+- } else {
+- retVal =
+- yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
+- _Y("unlinked"), 0, 0);
+ }
+
++ if (buffer)
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ return retVal;
+ }
+
+-int yaffs_DeleteFile(yaffs_Object *in)
+-{
+- int retVal = YAFFS_OK;
+- int deleted = in->deleted;
+-
+- yaffs_ResizeFile(in, 0);
+-
+- if (in->nDataChunks > 0) {
+- /* Use soft deletion if there is data in the file.
+- * That won't be the case if it has been resized to zero.
+- */
+- if (!in->unlinked)
+- retVal = yaffs_UnlinkFileIfNeeded(in);
+-
+- if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
+- in->deleted = 1;
+- deleted = 1;
+- in->myDev->nDeletedFiles++;
+- yaffs_SoftDeleteFile(in);
+- }
+- return deleted ? YAFFS_OK : YAFFS_FAIL;
+- } else {
+- /* The file has no data chunks so we toss it immediately */
+- yaffs_FreeTnode(in->myDev, in->variant.fileVariant.top);
+- in->variant.fileVariant.top = NULL;
+- yaffs_DoGenericObjectDeletion(in);
+-
+- return YAFFS_OK;
+- }
+-}
++/*------------------------ Short Operations Cache ----------------------------------------
++ * In many situations where there is no high level buffering (eg WinCE) a lot of
++ * reads might be short sequential reads, and a lot of writes may be short
++ * sequential writes. eg. scanning/writing a jpeg file.
++ * In these cases, a short read/write cache can provide a huge perfomance benefit
++ * with dumb-as-a-rock code.
++ * In Linux, the page cache provides read buffering aand the short op cache provides write
++ * buffering.
++ *
++ * There are a limited number (~10) of cache chunks per device so that we don't
++ * need a very intelligent search.
++ */
+
+-static int yaffs_DeleteDirectory(yaffs_Object *in)
++static int yaffs_obj_cache_dirty(yaffs_obj_t *obj)
+ {
+- /* First check that the directory is empty. */
+- if (ylist_empty(&in->variant.directoryVariant.children))
+- return yaffs_DoGenericObjectDeletion(in);
++ yaffs_dev_t *dev = obj->my_dev;
++ int i;
++ yaffs_cache_t *cache;
++ int nCaches = obj->my_dev->param.n_caches;
+
+- return YAFFS_FAIL;
++ for (i = 0; i < nCaches; i++) {
++ cache = &dev->cache[i];
++ if (cache->object == obj &&
++ cache->dirty)
++ return 1;
++ }
+
++ return 0;
+ }
+
+-static int yaffs_DeleteSymLink(yaffs_Object *in)
+-{
+- YFREE(in->variant.symLinkVariant.alias);
+-
+- return yaffs_DoGenericObjectDeletion(in);
+-}
+
+-static int yaffs_DeleteHardLink(yaffs_Object *in)
++static void yaffs_flush_file_cache(yaffs_obj_t *obj)
+ {
+- /* remove this hardlink from the list assocaited with the equivalent
+- * object
+- */
+- ylist_del_init(&in->hardLinks);
+- return yaffs_DoGenericObjectDeletion(in);
+-}
++ yaffs_dev_t *dev = obj->my_dev;
++ int lowest = -99; /* Stop compiler whining. */
++ int i;
++ yaffs_cache_t *cache;
++ int chunkWritten = 0;
++ int nCaches = obj->my_dev->param.n_caches;
+
+-int yaffs_DeleteObject(yaffs_Object *obj)
+-{
+-int retVal = -1;
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- retVal = yaffs_DeleteFile(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- return yaffs_DeleteDirectory(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- retVal = yaffs_DeleteSymLink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- retVal = yaffs_DeleteHardLink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- retVal = yaffs_DoGenericObjectDeletion(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- retVal = 0;
+- break; /* should not happen. */
+- }
++ if (nCaches > 0) {
++ do {
++ cache = NULL;
+
+- return retVal;
+-}
++ /* Find the dirty cache for this object with the lowest chunk id. */
++ for (i = 0; i < nCaches; i++) {
++ if (dev->cache[i].object == obj &&
++ dev->cache[i].dirty) {
++ if (!cache
++ || dev->cache[i].chunk_id <
++ lowest) {
++ cache = &dev->cache[i];
++ lowest = cache->chunk_id;
++ }
++ }
++ }
+
+-static int yaffs_UnlinkWorker(yaffs_Object *obj)
+-{
++ if (cache && !cache->locked) {
++ /* Write it out and free it up */
+
+- int immediateDeletion = 0;
++ chunkWritten =
++ yaffs_wr_data_obj(cache->object,
++ cache->chunk_id,
++ cache->data,
++ cache->n_bytes,
++ 1);
++ cache->dirty = 0;
++ cache->object = NULL;
++ }
+
+-#ifdef __KERNEL__
+- if (!obj->myInode)
+- immediateDeletion = 1;
+-#else
+- if (obj->inUse <= 0)
+- immediateDeletion = 1;
+-#endif
++ } while (cache && chunkWritten > 0);
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
+- return yaffs_DeleteHardLink(obj);
+- } else if (!ylist_empty(&obj->hardLinks)) {
+- /* Curve ball: We're unlinking an object that has a hardlink.
+- *
+- * This problem arises because we are not strictly following
+- * The Linux link/inode model.
+- *
+- * We can't really delete the object.
+- * Instead, we do the following:
+- * - Select a hardlink.
+- * - Unhook it from the hard links
+- * - Unhook it from its parent directory (so that the rename can work)
+- * - Rename the object to the hardlink's name.
+- * - Delete the hardlink
+- */
++ if (cache) {
++ /* Hoosterman, disk full while writing cache out. */
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("yaffs tragedy: no space during cache write" TENDSTR)));
+
+- yaffs_Object *hl;
+- int retVal;
+- YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++ }
++ }
+
+- hl = ylist_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
++}
+
+- ylist_del_init(&hl->hardLinks);
+- ylist_del_init(&hl->siblings);
++/*yaffs_flush_whole_cache(dev)
++ *
++ *
++ */
+
+- yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++void yaffs_flush_whole_cache(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ int nCaches = dev->param.n_caches;
++ int i;
+
+- retVal = yaffs_ChangeObjectName(obj, hl->parent, name, 0, 0);
++ /* Find a dirty object in the cache and flush it...
++ * until there are no further dirty objects.
++ */
++ do {
++ obj = NULL;
++ for (i = 0; i < nCaches && !obj; i++) {
++ if (dev->cache[i].object &&
++ dev->cache[i].dirty)
++ obj = dev->cache[i].object;
+
+- if (retVal == YAFFS_OK)
+- retVal = yaffs_DoGenericObjectDeletion(hl);
++ }
++ if (obj)
++ yaffs_flush_file_cache(obj);
+
+- return retVal;
++ } while (obj);
+
+- } else if (immediateDeletion) {
+- switch (obj->variantType) {
+- case YAFFS_OBJECT_TYPE_FILE:
+- return yaffs_DeleteFile(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- return yaffs_DeleteDirectory(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- return yaffs_DeleteSymLink(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- return yaffs_DoGenericObjectDeletion(obj);
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- default:
+- return YAFFS_FAIL;
+- }
+- } else
+- return yaffs_ChangeObjectName(obj, obj->myDev->unlinkedDir,
+- _Y("unlinked"), 0, 0);
+ }
+
+
+-static int yaffs_UnlinkObject(yaffs_Object *obj)
++/* Grab us a cache chunk for use.
++ * First look for an empty one.
++ * Then look for the least recently used non-dirty one.
++ * Then look for the least recently used dirty one...., flush and look again.
++ */
++static yaffs_cache_t *yaffs_grab_chunk_worker(yaffs_dev_t *dev)
+ {
++ int i;
+
+- if (obj && obj->unlinkAllowed)
+- return yaffs_UnlinkWorker(obj);
+-
+- return YAFFS_FAIL;
+-
+-}
+-int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name)
+-{
+- yaffs_Object *obj;
++ if (dev->param.n_caches > 0) {
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (!dev->cache[i].object)
++ return &dev->cache[i];
++ }
++ }
+
+- obj = yaffs_FindObjectByName(dir, name);
+- return yaffs_UnlinkObject(obj);
++ return NULL;
+ }
+
+-/*----------------------- Initialisation Scanning ---------------------- */
+-
+-static void yaffs_HandleShadowedObject(yaffs_Device *dev, int objId,
+- int backwardScanning)
++static yaffs_cache_t *yaffs_grab_chunk_cache(yaffs_dev_t *dev)
+ {
+- yaffs_Object *obj;
++ yaffs_cache_t *cache;
++ yaffs_obj_t *theObj;
++ int usage;
++ int i;
++ int pushout;
+
+- if (!backwardScanning) {
+- /* Handle YAFFS1 forward scanning case
+- * For YAFFS1 we always do the deletion
+- */
++ if (dev->param.n_caches > 0) {
++ /* Try find a non-dirty one... */
+
+- } else {
+- /* Handle YAFFS2 case (backward scanning)
+- * If the shadowed object exists then ignore.
+- */
+- if (yaffs_FindObjectByNumber(dev, objId))
+- return;
+- }
++ cache = yaffs_grab_chunk_worker(dev);
+
+- /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+- * We put it in unlinked dir to be cleaned up after the scanning
+- */
+- obj =
+- yaffs_FindOrCreateObjectByNumber(dev, objId,
+- YAFFS_OBJECT_TYPE_FILE);
+- if (!obj)
+- return;
+- yaffs_AddObjectToDirectory(dev->unlinkedDir, obj);
+- obj->variant.fileVariant.shrinkSize = 0;
+- obj->valid = 1; /* So that we don't read any other info for this file */
++ if (!cache) {
++ /* They were all dirty, find the last recently used object and flush
++ * its cache, then find again.
++ * NB what's here is not very accurate, we actually flush the object
++ * the last recently used page.
++ */
+
+-}
++ /* With locking we can't assume we can use entry zero */
+
+-typedef struct {
+- int seq;
+- int block;
+-} yaffs_BlockIndex;
++ theObj = NULL;
++ usage = -1;
++ cache = NULL;
++ pushout = -1;
+
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object &&
++ !dev->cache[i].locked &&
++ (dev->cache[i].last_use < usage || !cache)) {
++ usage = dev->cache[i].last_use;
++ theObj = dev->cache[i].object;
++ cache = &dev->cache[i];
++ pushout = i;
++ }
++ }
+
+-static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList)
+-{
+- yaffs_Object *hl;
+- yaffs_Object *in;
++ if (!cache || cache->dirty) {
++ /* Flush and try again */
++ yaffs_flush_file_cache(theObj);
++ cache = yaffs_grab_chunk_worker(dev);
++ }
+
+- while (hardList) {
+- hl = hardList;
+- hardList = (yaffs_Object *) (hardList->hardLinks.next);
++ }
++ return cache;
++ } else
++ return NULL;
+
+- in = yaffs_FindObjectByNumber(dev,
+- hl->variant.hardLinkVariant.
+- equivalentObjectId);
++}
+
+- if (in) {
+- /* Add the hardlink pointers */
+- hl->variant.hardLinkVariant.equivalentObject = in;
+- ylist_add(&hl->hardLinks, &in->hardLinks);
+- } else {
+- /* Todo Need to report/handle this better.
+- * Got a problem... hardlink to a non-existant object
+- */
+- hl->variant.hardLinkVariant.equivalentObject = NULL;
+- YINIT_LIST_HEAD(&hl->hardLinks);
++/* Find a cached chunk */
++static yaffs_cache_t *yaffs_find_chunk_cache(const yaffs_obj_t *obj,
++ int chunk_id)
++{
++ yaffs_dev_t *dev = obj->my_dev;
++ int i;
++ if (dev->param.n_caches > 0) {
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object == obj &&
++ dev->cache[i].chunk_id == chunk_id) {
++ dev->cache_hits++;
+
++ return &dev->cache[i];
++ }
+ }
+ }
++ return NULL;
+ }
+
++/* Mark the chunk for the least recently used algorithym */
++static void yaffs_use_cache(yaffs_dev_t *dev, yaffs_cache_t *cache,
++ int isAWrite)
++{
++
++ if (dev->param.n_caches > 0) {
++ if (dev->cache_last_use < 0 || dev->cache_last_use > 100000000) {
++ /* Reset the cache usages */
++ int i;
++ for (i = 1; i < dev->param.n_caches; i++)
++ dev->cache[i].last_use = 0;
+
++ dev->cache_last_use = 0;
++ }
+
++ dev->cache_last_use++;
+
++ cache->last_use = dev->cache_last_use;
+
+-static int ybicmp(const void *a, const void *b)
+-{
+- register int aseq = ((yaffs_BlockIndex *)a)->seq;
+- register int bseq = ((yaffs_BlockIndex *)b)->seq;
+- register int ablock = ((yaffs_BlockIndex *)a)->block;
+- register int bblock = ((yaffs_BlockIndex *)b)->block;
+- if (aseq == bseq)
+- return ablock - bblock;
+- else
+- return aseq - bseq;
++ if (isAWrite)
++ cache->dirty = 1;
++ }
+ }
+
++/* Invalidate a single cache page.
++ * Do this when a whole page gets written,
++ * ie the short cache for this page is no longer valid.
++ */
++static void yaffs_invalidate_chunk_cache(yaffs_obj_t *object, int chunk_id)
++{
++ if (object->my_dev->param.n_caches > 0) {
++ yaffs_cache_t *cache = yaffs_find_chunk_cache(object, chunk_id);
+
+-struct yaffs_ShadowFixerStruct {
+- int objectId;
+- int shadowedId;
+- struct yaffs_ShadowFixerStruct *next;
+-};
+-
++ if (cache)
++ cache->object = NULL;
++ }
++}
+
+-static void yaffs_StripDeletedObjects(yaffs_Device *dev)
++/* Invalidate all the cache pages associated with this object
++ * Do this whenever ther file is deleted or resized.
++ */
++static void yaffs_invalidate_whole_cache(yaffs_obj_t *in)
+ {
+- /*
+- * Sort out state of unlinked and deleted objects after scanning.
+- */
+- struct ylist_head *i;
+- struct ylist_head *n;
+- yaffs_Object *l;
++ int i;
++ yaffs_dev_t *dev = in->my_dev;
+
+- /* Soft delete all the unlinked files */
+- ylist_for_each_safe(i, n,
+- &dev->unlinkedDir->variant.directoryVariant.children) {
+- if (i) {
+- l = ylist_entry(i, yaffs_Object, siblings);
+- yaffs_DeleteObject(l);
++ if (dev->param.n_caches > 0) {
++ /* Invalidate it. */
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object == in)
++ dev->cache[i].object = NULL;
+ }
+ }
++}
+
+- ylist_for_each_safe(i, n,
+- &dev->deletedDir->variant.directoryVariant.children) {
+- if (i) {
+- l = ylist_entry(i, yaffs_Object, siblings);
+- yaffs_DeleteObject(l);
+- }
+- }
+
+-}
++/*--------------------- File read/write ------------------------
++ * Read and write have very similar structures.
++ * In general the read/write has three parts to it
++ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
++ * Some complete chunks
++ * An incomplete chunk to end off with
++ *
++ * Curve-balls: the first chunk might also be the last chunk.
++ */
+
+-static int yaffs_Scan(yaffs_Device *dev)
++int yaffs_file_rd(yaffs_obj_t *in, __u8 *buffer, loff_t offset,
++ int n_bytes)
+ {
+- yaffs_ExtendedTags tags;
+- int blk;
+- int blockIterator;
+- int startIterator;
+- int endIterator;
+- int result;
+
+ int chunk;
+- int c;
+- int deleted;
+- yaffs_BlockState state;
+- yaffs_Object *hardList = NULL;
+- yaffs_BlockInfo *bi;
+- __u32 sequenceNumber;
+- yaffs_ObjectHeader *oh;
+- yaffs_Object *in;
+- yaffs_Object *parent;
++ __u32 start;
++ int nToCopy;
++ int n = n_bytes;
++ int nDone = 0;
++ yaffs_cache_t *cache;
+
+- int alloc_failed = 0;
++ yaffs_dev_t *dev;
+
+- struct yaffs_ShadowFixerStruct *shadowFixerList = NULL;
++ dev = in->my_dev;
+
++ while (n > 0) {
++ /* chunk = offset / dev->data_bytes_per_chunk + 1; */
++ /* start = offset % dev->data_bytes_per_chunk; */
++ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
++ chunk++;
+
+- __u8 *chunkData;
++ /* OK now check for the curveball where the start and end are in
++ * the same chunk.
++ */
++ if ((start + n) < dev->data_bytes_per_chunk)
++ nToCopy = n;
++ else
++ nToCopy = dev->data_bytes_per_chunk - start;
+
++ cache = yaffs_find_chunk_cache(in, chunk);
+
++ /* If the chunk is already in the cache or it is less than a whole chunk
++ * or we're using inband tags then use the cache (if there is caching)
++ * else bypass the cache.
++ */
++ if (cache || nToCopy != dev->data_bytes_per_chunk || dev->param.inband_tags) {
++ if (dev->param.n_caches > 0) {
+
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("yaffs_Scan starts intstartblk %d intendblk %d..." TENDSTR),
+- dev->internalStartBlock, dev->internalEndBlock));
++ /* If we can't find the data in the cache, then load it up. */
+
+- chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++ if (!cache) {
++ cache = yaffs_grab_chunk_cache(in->my_dev);
++ cache->object = in;
++ cache->chunk_id = chunk;
++ cache->dirty = 0;
++ cache->locked = 0;
++ yaffs_rd_data_obj(in, chunk,
++ cache->
++ data);
++ cache->n_bytes = 0;
++ }
+
+- dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
++ yaffs_use_cache(dev, cache, 0);
+
+- /* Scan all the blocks to determine their state */
+- for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
+- bi = yaffs_GetBlockInfo(dev, blk);
+- yaffs_ClearChunkBits(dev, blk);
+- bi->pagesInUse = 0;
+- bi->softDeletions = 0;
++ cache->locked = 1;
+
+- yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
+
+- bi->blockState = state;
+- bi->sequenceNumber = sequenceNumber;
++ memcpy(buffer, &cache->data[start], nToCopy);
+
+- if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
+- bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
++ cache->locked = 0;
++ } else {
++ /* Read into the local buffer then copy..*/
+
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+- state, sequenceNumber));
++ __u8 *localBuffer =
++ yaffs_get_temp_buffer(dev, __LINE__);
++ yaffs_rd_data_obj(in, chunk,
++ localBuffer);
+
+- if (state == YAFFS_BLOCK_STATE_DEAD) {
+- T(YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("block %d is bad" TENDSTR), blk));
+- } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("Block empty " TENDSTR)));
+- dev->nErasedBlocks++;
+- dev->nFreeChunks += dev->nChunksPerBlock;
+- }
+- }
++ memcpy(buffer, &localBuffer[start], nToCopy);
+
+- startIterator = dev->internalStartBlock;
+- endIterator = dev->internalEndBlock;
+
+- /* For each block.... */
+- for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
+- blockIterator++) {
++ yaffs_release_temp_buffer(dev, localBuffer,
++ __LINE__);
++ }
+
+- YYIELD();
++ } else {
+
+- YYIELD();
++ /* A full chunk. Read directly into the supplied buffer. */
++ yaffs_rd_data_obj(in, chunk, buffer);
+
+- blk = blockIterator;
++ }
+
+- bi = yaffs_GetBlockInfo(dev, blk);
+- state = bi->blockState;
++ n -= nToCopy;
++ offset += nToCopy;
++ buffer += nToCopy;
++ nDone += nToCopy;
+
+- deleted = 0;
++ }
+
+- /* For each chunk in each block that needs scanning....*/
+- for (c = 0; !alloc_failed && c < dev->nChunksPerBlock &&
+- state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
+- /* Read the tags and decide what to do */
+- chunk = blk * dev->nChunksPerBlock + c;
++ return nDone;
++}
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
+- &tags);
++int yaffs_do_file_wr(yaffs_obj_t *in, const __u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough)
++{
+
+- /* Let's have a good look at this chunk... */
++ int chunk;
++ __u32 start;
++ int nToCopy;
++ int n = n_bytes;
++ int nDone = 0;
++ int nToWriteBack;
++ int startOfWrite = offset;
++ int chunkWritten = 0;
++ __u32 n_bytesRead;
++ __u32 chunkStart;
+
+- if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED || tags.chunkDeleted) {
+- /* YAFFS1 only...
+- * A deleted chunk
+- */
+- deleted++;
+- dev->nFreeChunks++;
+- /*T((" %d %d deleted\n",blk,c)); */
+- } else if (!tags.chunkUsed) {
+- /* An unassigned chunk in the block
+- * This means that either the block is empty or
+- * this is the one being allocated from
+- */
++ yaffs_dev_t *dev;
+
+- if (c == 0) {
+- /* We're looking at the first chunk in the block so the block is unused */
+- state = YAFFS_BLOCK_STATE_EMPTY;
+- dev->nErasedBlocks++;
+- } else {
+- /* this is the block being allocated from */
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- (" Allocating from %d %d" TENDSTR),
+- blk, c));
+- state = YAFFS_BLOCK_STATE_ALLOCATING;
+- dev->allocationBlock = blk;
+- dev->allocationPage = c;
+- dev->allocationBlockFinder = blk;
+- /* Set it to here to encourage the allocator to go forth from here. */
++ dev = in->my_dev;
+
+- }
++ while (n > 0 && chunkWritten >= 0) {
++ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+- dev->nFreeChunks += (dev->nChunksPerBlock - c);
+- } else if (tags.chunkId > 0) {
+- /* chunkId > 0 so it is a data chunk... */
+- unsigned int endpos;
+-
+- yaffs_SetChunkBit(dev, blk, c);
+- bi->pagesInUse++;
+-
+- in = yaffs_FindOrCreateObjectByNumber(dev,
+- tags.
+- objectId,
+- YAFFS_OBJECT_TYPE_FILE);
+- /* PutChunkIntoFile checks for a clash (two data chunks with
+- * the same chunkId).
+- */
++ if (chunk * dev->data_bytes_per_chunk + start != offset ||
++ start >= dev->data_bytes_per_chunk) {
++ T(YAFFS_TRACE_ERROR, (
++ TSTR("AddrToChunk of offset %d gives chunk %d start %d"
++ TENDSTR),
++ (int)offset, chunk, start));
++ }
++ chunk++; /* File pos to chunk in file offset */
+
+- if (!in)
+- alloc_failed = 1;
++ /* OK now check for the curveball where the start and end are in
++ * the same chunk.
++ */
+
+- if (in) {
+- if (!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk, 1))
+- alloc_failed = 1;
+- }
++ if ((start + n) < dev->data_bytes_per_chunk) {
++ nToCopy = n;
+
+- endpos =
+- (tags.chunkId - 1) * dev->nDataBytesPerChunk +
+- tags.byteCount;
+- if (in &&
+- in->variantType == YAFFS_OBJECT_TYPE_FILE
+- && in->variant.fileVariant.scannedFileSize <
+- endpos) {
+- in->variant.fileVariant.
+- scannedFileSize = endpos;
+- if (!dev->useHeaderFileSize) {
+- in->variant.fileVariant.
+- fileSize =
+- in->variant.fileVariant.
+- scannedFileSize;
+- }
++ /* Now folks, to calculate how many bytes to write back....
++ * If we're overwriting and not writing to then end of file then
++ * we need to write back as much as was there before.
++ */
+
+- }
+- /* T((" %d %d data %d %d\n",blk,c,tags.objectId,tags.chunkId)); */
+- } else {
+- /* chunkId == 0, so it is an ObjectHeader.
+- * Thus, we read in the object header and make the object
+- */
+- yaffs_SetChunkBit(dev, blk, c);
+- bi->pagesInUse++;
++ chunkStart = ((chunk - 1) * dev->data_bytes_per_chunk);
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk,
+- chunkData,
+- NULL);
+-
+- oh = (yaffs_ObjectHeader *) chunkData;
+-
+- in = yaffs_FindObjectByNumber(dev,
+- tags.objectId);
+- if (in && in->variantType != oh->type) {
+- /* This should not happen, but somehow
+- * Wev'e ended up with an objectId that has been reused but not yet
+- * deleted, and worse still it has changed type. Delete the old object.
+- */
++ if (chunkStart > in->variant.file_variant.file_size)
++ n_bytesRead = 0; /* Past end of file */
++ else
++ n_bytesRead = in->variant.file_variant.file_size - chunkStart;
+
+- yaffs_DeleteObject(in);
++ if (n_bytesRead > dev->data_bytes_per_chunk)
++ n_bytesRead = dev->data_bytes_per_chunk;
+
+- in = 0;
+- }
++ nToWriteBack =
++ (n_bytesRead >
++ (start + n)) ? n_bytesRead : (start + n);
+
+- in = yaffs_FindOrCreateObjectByNumber(dev,
+- tags.
+- objectId,
+- oh->type);
+-
+- if (!in)
+- alloc_failed = 1;
+-
+- if (in && oh->shadowsObject > 0) {
+-
+- struct yaffs_ShadowFixerStruct *fixer;
+- fixer = YMALLOC(sizeof(struct yaffs_ShadowFixerStruct));
+- if (fixer) {
+- fixer->next = shadowFixerList;
+- shadowFixerList = fixer;
+- fixer->objectId = tags.objectId;
+- fixer->shadowedId = oh->shadowsObject;
+- }
++ if (nToWriteBack < 0 || nToWriteBack > dev->data_bytes_per_chunk)
++ YBUG();
++
++ } else {
++ nToCopy = dev->data_bytes_per_chunk - start;
++ nToWriteBack = dev->data_bytes_per_chunk;
++ }
++
++ if (nToCopy != dev->data_bytes_per_chunk || dev->param.inband_tags) {
++ /* An incomplete start or end chunk (or maybe both start and end chunk),
++ * or we're using inband tags, so we want to use the cache buffers.
++ */
++ if (dev->param.n_caches > 0) {
++ yaffs_cache_t *cache;
++ /* If we can't find the data in the cache, then load the cache */
++ cache = yaffs_find_chunk_cache(in, chunk);
+
++ if (!cache
++ && yaffs_check_alloc_available(dev, 1)) {
++ cache = yaffs_grab_chunk_cache(dev);
++ cache->object = in;
++ cache->chunk_id = chunk;
++ cache->dirty = 0;
++ cache->locked = 0;
++ yaffs_rd_data_obj(in, chunk,
++ cache->data);
++ } else if (cache &&
++ !cache->dirty &&
++ !yaffs_check_alloc_available(dev, 1)) {
++ /* Drop the cache if it was a read cache item and
++ * no space check has been made for it.
++ */
++ cache = NULL;
+ }
+
+- if (in && in->valid) {
+- /* We have already filled this one. We have a duplicate and need to resolve it. */
++ if (cache) {
++ yaffs_use_cache(dev, cache, 1);
++ cache->locked = 1;
+
+- unsigned existingSerial = in->serial;
+- unsigned newSerial = tags.serialNumber;
+
+- if (((existingSerial + 1) & 3) == newSerial) {
+- /* Use new one - destroy the exisiting one */
+- yaffs_DeleteChunk(dev,
+- in->hdrChunk,
+- 1, __LINE__);
+- in->valid = 0;
+- } else {
+- /* Use existing - destroy this one. */
+- yaffs_DeleteChunk(dev, chunk, 1,
+- __LINE__);
++ memcpy(&cache->data[start], buffer,
++ nToCopy);
++
++
++ cache->locked = 0;
++ cache->n_bytes = nToWriteBack;
++
++ if (write_trhrough) {
++ chunkWritten =
++ yaffs_wr_data_obj
++ (cache->object,
++ cache->chunk_id,
++ cache->data, cache->n_bytes,
++ 1);
++ cache->dirty = 0;
+ }
++
++ } else {
++ chunkWritten = -1; /* fail the write */
+ }
++ } else {
++ /* An incomplete start or end chunk (or maybe both start and end chunk)
++ * Read into the local buffer then copy, then copy over and write back.
++ */
+
+- if (in && !in->valid &&
+- (tags.objectId == YAFFS_OBJECTID_ROOT ||
+- tags.objectId == YAFFS_OBJECTID_LOSTNFOUND)) {
+- /* We only load some info, don't fiddle with directory structure */
+- in->valid = 1;
+- in->variantType = oh->type;
++ __u8 *localBuffer =
++ yaffs_get_temp_buffer(dev, __LINE__);
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
+-#endif
+- in->hdrChunk = chunk;
+- in->serial = tags.serialNumber;
++ yaffs_rd_data_obj(in, chunk,
++ localBuffer);
+
+- } else if (in && !in->valid) {
+- /* we need to load this info */
+
+- in->valid = 1;
+- in->variantType = oh->type;
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
+-#endif
+- in->hdrChunk = chunk;
+- in->serial = tags.serialNumber;
++ memcpy(&localBuffer[start], buffer, nToCopy);
+
+- yaffs_SetObjectName(in, oh->name);
+- in->dirty = 0;
++ chunkWritten =
++ yaffs_wr_data_obj(in, chunk,
++ localBuffer,
++ nToWriteBack,
++ 0);
+
+- /* directory stuff...
+- * hook up to parent
+- */
++ yaffs_release_temp_buffer(dev, localBuffer,
++ __LINE__);
+
+- parent =
+- yaffs_FindOrCreateObjectByNumber
+- (dev, oh->parentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
+- if (!parent)
+- alloc_failed = 1;
+- if (parent && parent->variantType ==
+- YAFFS_OBJECT_TYPE_UNKNOWN) {
+- /* Set up as a directory */
+- parent->variantType =
+- YAFFS_OBJECT_TYPE_DIRECTORY;
+- YINIT_LIST_HEAD(&parent->variant.
+- directoryVariant.
+- children);
+- } else if (!parent || parent->variantType !=
+- YAFFS_OBJECT_TYPE_DIRECTORY) {
+- /* Hoosterman, another problem....
+- * We're trying to use a non-directory as a directory
+- */
++ }
+
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+- TENDSTR)));
+- parent = dev->lostNFoundDir;
+- }
++ } else {
++ /* A full chunk. Write directly from the supplied buffer. */
+
+- yaffs_AddObjectToDirectory(parent, in);
+
+- if (0 && (parent == dev->deletedDir ||
+- parent == dev->unlinkedDir)) {
+- in->deleted = 1; /* If it is unlinked at start up then it wants deleting */
+- dev->nDeletedFiles++;
+- }
+- /* Note re hardlinks.
+- * Since we might scan a hardlink before its equivalent object is scanned
+- * we put them all in a list.
+- * After scanning is complete, we should have all the objects, so we run through this
+- * list and fix up all the chains.
+- */
+
+- switch (in->variantType) {
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* Todo got a problem */
+- break;
+- case YAFFS_OBJECT_TYPE_FILE:
+- if (dev->useHeaderFileSize)
+-
+- in->variant.fileVariant.
+- fileSize =
+- oh->fileSize;
+-
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- in->variant.hardLinkVariant.
+- equivalentObjectId =
+- oh->equivalentObjectId;
+- in->hardLinks.next =
+- (struct ylist_head *)
+- hardList;
+- hardList = in;
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- in->variant.symLinkVariant.alias =
+- yaffs_CloneString(oh->alias);
+- if (!in->variant.symLinkVariant.alias)
+- alloc_failed = 1;
+- break;
+- }
++ chunkWritten =
++ yaffs_wr_data_obj(in, chunk, buffer,
++ dev->data_bytes_per_chunk,
++ 0);
+
+-/*
+- if (parent == dev->deletedDir) {
+- yaffs_DestroyObject(in);
+- bi->hasShrinkHeader = 1;
+- }
+-*/
+- }
+- }
++ /* Since we've overwritten the cached data, we better invalidate it. */
++ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+- if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+- /* If we got this far while scanning, then the block is fully allocated.*/
+- state = YAFFS_BLOCK_STATE_FULL;
++ if (chunkWritten >= 0) {
++ n -= nToCopy;
++ offset += nToCopy;
++ buffer += nToCopy;
++ nDone += nToCopy;
+ }
+
+- bi->blockState = state;
++ }
+
+- /* Now let's see if it was dirty */
+- if (bi->pagesInUse == 0 &&
+- !bi->hasShrinkHeader &&
+- bi->blockState == YAFFS_BLOCK_STATE_FULL) {
+- yaffs_BlockBecameDirty(dev, blk);
+- }
++ /* Update file object */
+
+- }
++ if ((startOfWrite + nDone) > in->variant.file_variant.file_size)
++ in->variant.file_variant.file_size = (startOfWrite + nDone);
+
++ in->dirty = 1;
+
+- /* Ok, we've done all the scanning.
+- * Fix up the hard link chains.
+- * We should now have scanned all the objects, now it's time to add these
+- * hardlinks.
+- */
++ return nDone;
++}
+
+- yaffs_HardlinkFixup(dev, hardList);
++int yaffs_wr_file(yaffs_obj_t *in, const __u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough)
++{
++ yaffs2_handle_hole(in,offset);
++ return yaffs_do_file_wr(in,buffer,offset,n_bytes,write_trhrough);
++}
+
+- /* Fix up any shadowed objects */
+- {
+- struct yaffs_ShadowFixerStruct *fixer;
+- yaffs_Object *obj;
+-
+- while (shadowFixerList) {
+- fixer = shadowFixerList;
+- shadowFixerList = fixer->next;
+- /* Complete the rename transaction by deleting the shadowed object
+- * then setting the object header to unshadowed.
+- */
+- obj = yaffs_FindObjectByNumber(dev, fixer->shadowedId);
+- if (obj)
+- yaffs_DeleteObject(obj);
+
+- obj = yaffs_FindObjectByNumber(dev, fixer->objectId);
+
+- if (obj)
+- yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++/* ---------------------- File resizing stuff ------------------ */
+
+- YFREE(fixer);
+- }
+- }
++static void yaffs_prune_chunks(yaffs_obj_t *in, int new_size)
++{
+
+- yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++ yaffs_dev_t *dev = in->my_dev;
++ int oldFileSize = in->variant.file_variant.file_size;
+
+- if (alloc_failed)
+- return YAFFS_FAIL;
++ int lastDel = 1 + (oldFileSize - 1) / dev->data_bytes_per_chunk;
++
++ int startDel = 1 + (new_size + dev->data_bytes_per_chunk - 1) /
++ dev->data_bytes_per_chunk;
++ int i;
++ int chunk_id;
+
+- T(YAFFS_TRACE_SCAN, (TSTR("yaffs_Scan ends" TENDSTR)));
++ /* Delete backwards so that we don't end up with holes if
++ * power is lost part-way through the operation.
++ */
++ for (i = lastDel; i >= startDel; i--) {
++ /* NB this could be optimised somewhat,
++ * eg. could retrieve the tags and write them without
++ * using yaffs_chunk_del
++ */
+
++ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
++ if (chunk_id > 0) {
++ if (chunk_id <
++ (dev->internal_start_block * dev->param.chunks_per_block)
++ || chunk_id >=
++ ((dev->internal_end_block +
++ 1) * dev->param.chunks_per_block)) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("Found daft chunk_id %d for %d" TENDSTR),
++ chunk_id, i));
++ } else {
++ in->n_data_chunks--;
++ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
++ }
++ }
++ }
+
+- return YAFFS_OK;
+ }
+
+-static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in)
+-{
+- __u8 *chunkData;
+- yaffs_ObjectHeader *oh;
+- yaffs_Device *dev;
+- yaffs_ExtendedTags tags;
+- int result;
+- int alloc_failed = 0;
+
+- if (!in)
+- return;
++void yaffs_resize_file_down( yaffs_obj_t *obj, loff_t new_size)
++{
++ int newFullChunks;
++ __u32 new_sizeOfPartialChunk;
++ yaffs_dev_t *dev = obj->my_dev;
+
+- dev = in->myDev;
++ yaffs_addr_to_chunk(dev, new_size, &newFullChunks, &new_sizeOfPartialChunk);
+
+-#if 0
+- T(YAFFS_TRACE_SCAN, (TSTR("details for object %d %s loaded" TENDSTR),
+- in->objectId,
+- in->lazyLoaded ? "not yet" : "already"));
+-#endif
++ yaffs_prune_chunks(obj, new_size);
+
+- if (in->lazyLoaded && in->hdrChunk > 0) {
+- in->lazyLoaded = 0;
+- chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++ if (new_sizeOfPartialChunk != 0) {
++ int lastChunk = 1 + newFullChunks;
++ __u8 *localBuffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, in->hdrChunk, chunkData, &tags);
+- oh = (yaffs_ObjectHeader *) chunkData;
++ /* Got to read and rewrite the last chunk with its new size and zero pad */
++ yaffs_rd_data_obj(obj, lastChunk, localBuffer);
++ memset(localBuffer + new_sizeOfPartialChunk, 0,
++ dev->data_bytes_per_chunk - new_sizeOfPartialChunk);
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
++ yaffs_wr_data_obj(obj, lastChunk, localBuffer,
++ new_sizeOfPartialChunk, 1);
+
+-#endif
+- yaffs_SetObjectName(in, oh->name);
++ yaffs_release_temp_buffer(dev, localBuffer, __LINE__);
++ }
+
+- if (in->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
+- in->variant.symLinkVariant.alias =
+- yaffs_CloneString(oh->alias);
+- if (!in->variant.symLinkVariant.alias)
+- alloc_failed = 1; /* Not returned to caller */
+- }
++ obj->variant.file_variant.file_size = new_size;
+
+- yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
+- }
++ yaffs_prune_tree(dev, &obj->variant.file_variant);
+ }
+
+-static int yaffs_ScanBackwards(yaffs_Device *dev)
+-{
+- yaffs_ExtendedTags tags;
+- int blk;
+- int blockIterator;
+- int startIterator;
+- int endIterator;
+- int nBlocksToScan = 0;
+-
+- int chunk;
+- int result;
+- int c;
+- int deleted;
+- yaffs_BlockState state;
+- yaffs_Object *hardList = NULL;
+- yaffs_BlockInfo *bi;
+- __u32 sequenceNumber;
+- yaffs_ObjectHeader *oh;
+- yaffs_Object *in;
+- yaffs_Object *parent;
+- int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
+- int itsUnlinked;
+- __u8 *chunkData;
+
+- int fileSize;
+- int isShrink;
+- int foundChunksInBlock;
+- int equivalentObjectId;
+- int alloc_failed = 0;
++int yaffs_resize_file(yaffs_obj_t *in, loff_t new_size)
++{
++ yaffs_dev_t *dev = in->my_dev;
++ int oldFileSize = in->variant.file_variant.file_size;
+
++ yaffs_flush_file_cache(in);
++ yaffs_invalidate_whole_cache(in);
+
+- yaffs_BlockIndex *blockIndex = NULL;
+- int altBlockIndex = 0;
++ yaffs_check_gc(dev,0);
+
+- if (!dev->isYaffs2) {
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("yaffs_ScanBackwards is only for YAFFS2!" TENDSTR)));
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+- }
+
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- ("yaffs_ScanBackwards starts intstartblk %d intendblk %d..."
+- TENDSTR), dev->internalStartBlock, dev->internalEndBlock));
++ if (new_size == oldFileSize)
++ return YAFFS_OK;
++
++ if(new_size > oldFileSize){
++ yaffs2_handle_hole(in,new_size);
++ in->variant.file_variant.file_size = new_size;
++ } else {
++ /* new_size < oldFileSize */
++ yaffs_resize_file_down(in, new_size);
++ }
+
++ /* Write a new object header to reflect the resize.
++ * show we've shrunk the file, if need be
++ * Do this only if the file is not in the deleted directories
++ * and is not shadowed.
++ */
++ if (in->parent &&
++ !in->is_shadowed &&
++ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
++ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+- dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+- blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
++ return YAFFS_OK;
++}
+
+- if (!blockIndex) {
+- blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
+- altBlockIndex = 1;
+- }
++loff_t yaffs_get_file_size(yaffs_obj_t *obj)
++{
++ YCHAR *alias = NULL;
++ obj = yaffs_get_equivalent_obj(obj);
+
+- if (!blockIndex) {
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("yaffs_Scan() could not allocate block index!" TENDSTR)));
+- return YAFFS_FAIL;
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return obj->variant.file_variant.file_size;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ alias = obj->variant.symlink_variant.alias;
++ if(!alias)
++ return 0;
++ return yaffs_strnlen(alias,YAFFS_MAX_ALIAS_LENGTH);
++ default:
++ return 0;
+ }
++}
+
+- dev->blocksInCheckpoint = 0;
+-
+- chunkData = yaffs_GetTempBuffer(dev, __LINE__);
+-
+- /* Scan all the blocks to determine their state */
+- for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
+- bi = yaffs_GetBlockInfo(dev, blk);
+- yaffs_ClearChunkBits(dev, blk);
+- bi->pagesInUse = 0;
+- bi->softDeletions = 0;
+-
+- yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
+
+- bi->blockState = state;
+- bi->sequenceNumber = sequenceNumber;
+
+- if (bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+- bi->blockState = state = YAFFS_BLOCK_STATE_CHECKPOINT;
+- if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
+- bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
++int yaffs_flush_file(yaffs_obj_t *in, int update_time, int data_sync)
++{
++ int retVal;
++ if (in->dirty) {
++ yaffs_flush_file_cache(in);
++ if(data_sync) /* Only sync data */
++ retVal=YAFFS_OK;
++ else {
++ if (update_time) {
++#ifdef CONFIG_YAFFS_WINCE
++ yfsd_win_file_time_now(in->win_mtime);
++#else
+
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
+- state, sequenceNumber));
++ in->yst_mtime = Y_CURRENT_TIME;
+
++#endif
++ }
+
+- if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+- dev->blocksInCheckpoint++;
++ retVal = (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >=
++ 0) ? YAFFS_OK : YAFFS_FAIL;
++ }
++ } else {
++ retVal = YAFFS_OK;
++ }
+
+- } else if (state == YAFFS_BLOCK_STATE_DEAD) {
+- T(YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("block %d is bad" TENDSTR), blk));
+- } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("Block empty " TENDSTR)));
+- dev->nErasedBlocks++;
+- dev->nFreeChunks += dev->nChunksPerBlock;
+- } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++ return retVal;
+
+- /* Determine the highest sequence number */
+- if (sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+- sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++}
+
+- blockIndex[nBlocksToScan].seq = sequenceNumber;
+- blockIndex[nBlocksToScan].block = blk;
++static int yaffs_generic_obj_del(yaffs_obj_t *in)
++{
+
+- nBlocksToScan++;
++ /* First off, invalidate the file's data in the cache, without flushing. */
++ yaffs_invalidate_whole_cache(in);
+
+- if (sequenceNumber >= dev->sequenceNumber)
+- dev->sequenceNumber = sequenceNumber;
+- } else {
+- /* TODO: Nasty sequence number! */
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- ("Block scanning block %d has bad sequence number %d"
+- TENDSTR), blk, sequenceNumber));
++ if (in->my_dev->param.is_yaffs2 && (in->parent != in->my_dev->del_dir)) {
++ /* Move to the unlinked directory so we have a record that it was deleted. */
++ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0, 0);
+
+- }
+- }
+ }
+
+- T(YAFFS_TRACE_SCAN,
+- (TSTR("%d blocks to be sorted..." TENDSTR), nBlocksToScan));
++ yaffs_remove_obj_from_dir(in);
++ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
++ in->hdr_chunk = 0;
+
++ yaffs_free_obj(in);
++ return YAFFS_OK;
+
++}
+
+- YYIELD();
++/* yaffs_del_file deletes the whole file data
++ * and the inode associated with the file.
++ * It does not delete the links associated with the file.
++ */
++static int yaffs_unlink_file_if_needed(yaffs_obj_t *in)
++{
+
+- /* Sort the blocks */
+-#ifndef CONFIG_YAFFS_USE_OWN_SORT
+- {
+- /* Use qsort now. */
+- yaffs_qsort(blockIndex, nBlocksToScan, sizeof(yaffs_BlockIndex), ybicmp);
+- }
+-#else
+- {
+- /* Dungy old bubble sort... */
++ int retVal;
++ int immediateDeletion = 0;
++ yaffs_dev_t *dev = in->my_dev;
+
+- yaffs_BlockIndex temp;
+- int i;
+- int j;
++ if (!in->my_inode)
++ immediateDeletion = 1;
+
+- for (i = 0; i < nBlocksToScan; i++)
+- for (j = i + 1; j < nBlocksToScan; j++)
+- if (blockIndex[i].seq > blockIndex[j].seq) {
+- temp = blockIndex[j];
+- blockIndex[j] = blockIndex[i];
+- blockIndex[i] = temp;
+- }
++ if (immediateDeletion) {
++ retVal =
++ yaffs_change_obj_name(in, in->my_dev->del_dir,
++ _Y("deleted"), 0, 0);
++ T(YAFFS_TRACE_TRACING,
++ (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
++ in->obj_id));
++ in->deleted = 1;
++ in->my_dev->n_deleted_files++;
++ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++ yaffs_resize_file(in, 0);
++ yaffs_soft_del_file(in);
++ } else {
++ retVal =
++ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
++ _Y("unlinked"), 0, 0);
+ }
+-#endif
+
+- YYIELD();
+
+- T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
++ return retVal;
++}
+
+- /* Now scan the blocks looking at the data. */
+- startIterator = 0;
+- endIterator = nBlocksToScan - 1;
+- T(YAFFS_TRACE_SCAN_DEBUG,
+- (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
++int yaffs_del_file(yaffs_obj_t *in)
++{
++ int retVal = YAFFS_OK;
++ int deleted; /* Need to cache value on stack if in is freed */
++ yaffs_dev_t *dev = in->my_dev;
+
+- /* For each block.... backwards */
+- for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
+- blockIterator--) {
+- /* Cooperative multitasking! This loop can run for so
+- long that watchdog timers expire. */
+- YYIELD();
++ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++ yaffs_resize_file(in, 0);
+
+- /* get the block to scan in the correct order */
+- blk = blockIndex[blockIterator].block;
++ if (in->n_data_chunks > 0) {
++ /* Use soft deletion if there is data in the file.
++ * That won't be the case if it has been resized to zero.
++ */
++ if (!in->unlinked)
++ retVal = yaffs_unlink_file_if_needed(in);
+
+- bi = yaffs_GetBlockInfo(dev, blk);
++ deleted = in->deleted;
+
++ if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
++ in->deleted = 1;
++ deleted = 1;
++ in->my_dev->n_deleted_files++;
++ yaffs_soft_del_file(in);
++ }
++ return deleted ? YAFFS_OK : YAFFS_FAIL;
++ } else {
++ /* The file has no data chunks so we toss it immediately */
++ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
++ in->variant.file_variant.top = NULL;
++ yaffs_generic_obj_del(in);
+
+- state = bi->blockState;
++ return YAFFS_OK;
++ }
++}
+
+- deleted = 0;
++static int yaffs_is_non_empty_dir(yaffs_obj_t *obj)
++{
++ return (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
++ !(ylist_empty(&obj->variant.dir_variant.children));
++}
+
+- /* For each chunk in each block that needs scanning.... */
+- foundChunksInBlock = 0;
+- for (c = dev->nChunksPerBlock - 1;
+- !alloc_failed && c >= 0 &&
+- (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+- state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
+- /* Scan backwards...
+- * Read the tags and decide what to do
+- */
++static int yaffs_del_dir(yaffs_obj_t *obj)
++{
++ /* First check that the directory is empty. */
++ if (yaffs_is_non_empty_dir(obj))
++ return YAFFS_FAIL;
+
+- chunk = blk * dev->nChunksPerBlock + c;
++ return yaffs_generic_obj_del(obj);
++}
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
+- &tags);
++static int yaffs_del_symlink(yaffs_obj_t *in)
++{
++ if(in->variant.symlink_variant.alias)
++ YFREE(in->variant.symlink_variant.alias);
++ in->variant.symlink_variant.alias=NULL;
+
+- /* Let's have a good look at this chunk... */
++ return yaffs_generic_obj_del(in);
++}
+
+- if (!tags.chunkUsed) {
+- /* An unassigned chunk in the block.
+- * If there are used chunks after this one, then
+- * it is a chunk that was skipped due to failing the erased
+- * check. Just skip it so that it can be deleted.
+- * But, more typically, We get here when this is an unallocated
+- * chunk and his means that either the block is empty or
+- * this is the one being allocated from
+- */
++static int yaffs_del_link(yaffs_obj_t *in)
++{
++ /* remove this hardlink from the list assocaited with the equivalent
++ * object
++ */
++ ylist_del_init(&in->hard_links);
++ return yaffs_generic_obj_del(in);
++}
+
+- if (foundChunksInBlock) {
+- /* This is a chunk that was skipped due to failing the erased check */
+- } else if (c == 0) {
+- /* We're looking at the first chunk in the block so the block is unused */
+- state = YAFFS_BLOCK_STATE_EMPTY;
+- dev->nErasedBlocks++;
+- } else {
+- if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+- state == YAFFS_BLOCK_STATE_ALLOCATING) {
+- if (dev->sequenceNumber == bi->sequenceNumber) {
+- /* this is the block being allocated from */
+-
+- T(YAFFS_TRACE_SCAN,
+- (TSTR
+- (" Allocating from %d %d"
+- TENDSTR), blk, c));
+-
+- state = YAFFS_BLOCK_STATE_ALLOCATING;
+- dev->allocationBlock = blk;
+- dev->allocationPage = c;
+- dev->allocationBlockFinder = blk;
+- } else {
+- /* This is a partially written block that is not
+- * the current allocation block. This block must have
+- * had a write failure, so set up for retirement.
+- */
+-
+- /* bi->needsRetiring = 1; ??? TODO */
+- bi->gcPrioritise = 1;
+-
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("Partially written block %d detected" TENDSTR),
+- blk));
+- }
+- }
+- }
++int yaffs_del_obj(yaffs_obj_t *obj)
++{
++int retVal = -1;
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ retVal = yaffs_del_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if(!ylist_empty(&obj->variant.dir_variant.dirty)){
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Remove object %d from dirty directories" TENDSTR),obj->obj_id));
++ ylist_del_init(&obj->variant.dir_variant.dirty);
++ }
++ return yaffs_del_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ retVal = yaffs_del_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ retVal = yaffs_del_link(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ retVal = yaffs_generic_obj_del(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ retVal = 0;
++ break; /* should not happen. */
++ }
+
+- dev->nFreeChunks++;
++ return retVal;
++}
+
+- } else if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED) {
+- T(YAFFS_TRACE_SCAN,
+- (TSTR(" Unfixed ECC in chunk(%d:%d), chunk ignored"TENDSTR),
+- blk, c));
+-
+- dev->nFreeChunks++;
+-
+- } else if (tags.chunkId > 0) {
+- /* chunkId > 0 so it is a data chunk... */
+- unsigned int endpos;
+- __u32 chunkBase =
+- (tags.chunkId - 1) * dev->nDataBytesPerChunk;
+-
+- foundChunksInBlock = 1;
+-
+-
+- yaffs_SetChunkBit(dev, blk, c);
+- bi->pagesInUse++;
+-
+- in = yaffs_FindOrCreateObjectByNumber(dev,
+- tags.
+- objectId,
+- YAFFS_OBJECT_TYPE_FILE);
+- if (!in) {
+- /* Out of memory */
+- alloc_failed = 1;
+- }
++static int yaffs_unlink_worker(yaffs_obj_t *obj)
++{
+
+- if (in &&
+- in->variantType == YAFFS_OBJECT_TYPE_FILE
+- && chunkBase <
+- in->variant.fileVariant.shrinkSize) {
+- /* This has not been invalidated by a resize */
+- if (!yaffs_PutChunkIntoFile(in, tags.chunkId,
+- chunk, -1)) {
+- alloc_failed = 1;
+- }
++ int immediateDeletion = 0;
+
+- /* File size is calculated by looking at the data chunks if we have not
+- * seen an object header yet. Stop this practice once we find an object header.
+- */
+- endpos =
+- (tags.chunkId -
+- 1) * dev->nDataBytesPerChunk +
+- tags.byteCount;
+-
+- if (!in->valid && /* have not got an object header yet */
+- in->variant.fileVariant.
+- scannedFileSize < endpos) {
+- in->variant.fileVariant.
+- scannedFileSize = endpos;
+- in->variant.fileVariant.
+- fileSize =
+- in->variant.fileVariant.
+- scannedFileSize;
+- }
++ if (!obj->my_inode)
++ immediateDeletion = 1;
+
+- } else if (in) {
+- /* This chunk has been invalidated by a resize, so delete */
+- yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++ if(obj)
++ yaffs_update_parent(obj->parent);
+
+- }
+- } else {
+- /* chunkId == 0, so it is an ObjectHeader.
+- * Thus, we read in the object header and make the object
+- */
+- foundChunksInBlock = 1;
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++ return yaffs_del_link(obj);
++ } else if (!ylist_empty(&obj->hard_links)) {
++ /* Curve ball: We're unlinking an object that has a hardlink.
++ *
++ * This problem arises because we are not strictly following
++ * The Linux link/inode model.
++ *
++ * We can't really delete the object.
++ * Instead, we do the following:
++ * - Select a hardlink.
++ * - Unhook it from the hard links
++ * - Move it from its parent directory (so that the rename can work)
++ * - Rename the object to the hardlink's name.
++ * - Delete the hardlink
++ */
+
+- yaffs_SetChunkBit(dev, blk, c);
+- bi->pagesInUse++;
++ yaffs_obj_t *hl;
++ yaffs_obj_t *parent;
++ int retVal;
++ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+- oh = NULL;
+- in = NULL;
++ hl = ylist_entry(obj->hard_links.next, yaffs_obj_t, hard_links);
+
+- if (tags.extraHeaderInfoAvailable) {
+- in = yaffs_FindOrCreateObjectByNumber
+- (dev, tags.objectId,
+- tags.extraObjectType);
+- if (!in)
+- alloc_failed = 1;
+- }
++ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++ parent = hl->parent;
+
+- if (!in ||
+-#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
+- !in->valid ||
+-#endif
+- tags.extraShadows ||
+- (!in->valid &&
+- (tags.objectId == YAFFS_OBJECTID_ROOT ||
+- tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))) {
+-
+- /* If we don't have valid info then we need to read the chunk
+- * TODO In future we can probably defer reading the chunk and
+- * living with invalid data until needed.
+- */
++ ylist_del_init(&hl->hard_links);
+
+- result = yaffs_ReadChunkWithTagsFromNAND(dev,
+- chunk,
+- chunkData,
+- NULL);
+-
+- oh = (yaffs_ObjectHeader *) chunkData;
+-
+- if (dev->inbandTags) {
+- /* Fix up the header if they got corrupted by inband tags */
+- oh->shadowsObject = oh->inbandShadowsObject;
+- oh->isShrink = oh->inbandIsShrink;
+- }
++ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+- if (!in) {
+- in = yaffs_FindOrCreateObjectByNumber(dev, tags.objectId, oh->type);
+- if (!in)
+- alloc_failed = 1;
+- }
++ retVal = yaffs_change_obj_name(obj,parent, name, 0, 0);
+
+- }
++ if (retVal == YAFFS_OK)
++ retVal = yaffs_generic_obj_del(hl);
+
+- if (!in) {
+- /* TODO Hoosterman we have a problem! */
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy: Could not make object for object %d at chunk %d during scan"
+- TENDSTR), tags.objectId, chunk));
+- continue;
+- }
++ return retVal;
+
+- if (in->valid) {
+- /* We have already filled this one.
+- * We have a duplicate that will be discarded, but
+- * we first have to suck out resize info if it is a file.
+- */
++ } else if (immediateDeletion) {
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return yaffs_del_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ ylist_del_init(&obj->variant.dir_variant.dirty);
++ return yaffs_del_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ return yaffs_del_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ return yaffs_generic_obj_del(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ default:
++ return YAFFS_FAIL;
++ }
++ } else if(yaffs_is_non_empty_dir(obj))
++ return YAFFS_FAIL;
++ else
++ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
++ _Y("unlinked"), 0, 0);
++}
+
+- if ((in->variantType == YAFFS_OBJECT_TYPE_FILE) &&
+- ((oh &&
+- oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+- (tags.extraHeaderInfoAvailable &&
+- tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))) {
+- __u32 thisSize =
+- (oh) ? oh->fileSize : tags.
+- extraFileLength;
+- __u32 parentObjectId =
+- (oh) ? oh->
+- parentObjectId : tags.
+- extraParentObjectId;
+-
+-
+- isShrink =
+- (oh) ? oh->isShrink : tags.
+- extraIsShrinkHeader;
+
+- /* If it is deleted (unlinked at start also means deleted)
+- * we treat the file size as being zeroed at this point.
+- */
+- if (parentObjectId ==
+- YAFFS_OBJECTID_DELETED
+- || parentObjectId ==
+- YAFFS_OBJECTID_UNLINKED) {
+- thisSize = 0;
+- isShrink = 1;
+- }
++static int yaffs_unlink_obj(yaffs_obj_t *obj)
++{
+
+- if (isShrink &&
+- in->variant.fileVariant.
+- shrinkSize > thisSize) {
+- in->variant.fileVariant.
+- shrinkSize =
+- thisSize;
+- }
++ if (obj && obj->unlink_allowed)
++ return yaffs_unlink_worker(obj);
+
+- if (isShrink)
+- bi->hasShrinkHeader = 1;
++ return YAFFS_FAIL;
+
+- }
+- /* Use existing - destroy this one. */
+- yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++}
++int yaffs_unlinker(yaffs_obj_t *dir, const YCHAR *name)
++{
++ yaffs_obj_t *obj;
+
+- }
++ obj = yaffs_find_by_name(dir, name);
++ return yaffs_unlink_obj(obj);
++}
+
+- if (!in->valid && in->variantType !=
+- (oh ? oh->type : tags.extraObjectType))
+- T(YAFFS_TRACE_ERROR, (
+- TSTR("yaffs tragedy: Bad object type, "
+- TCONT("%d != %d, for object %d at chunk ")
+- TCONT("%d during scan")
+- TENDSTR), oh ?
+- oh->type : tags.extraObjectType,
+- in->variantType, tags.objectId,
+- chunk));
+-
+- if (!in->valid &&
+- (tags.objectId == YAFFS_OBJECTID_ROOT ||
+- tags.objectId ==
+- YAFFS_OBJECTID_LOSTNFOUND)) {
+- /* We only load some info, don't fiddle with directory structure */
+- in->valid = 1;
++/*----------------------- Initialisation Scanning ---------------------- */
+
+- if (oh) {
+- in->variantType = oh->type;
++void yaffs_handle_shadowed_obj(yaffs_dev_t *dev, int obj_id,
++ int backward_scanning)
++{
++ yaffs_obj_t *obj;
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
++ if (!backward_scanning) {
++ /* Handle YAFFS1 forward scanning case
++ * For YAFFS1 we always do the deletion
++ */
+
+-#endif
+- } else {
+- in->variantType = tags.extraObjectType;
+- in->lazyLoaded = 1;
+- }
++ } else {
++ /* Handle YAFFS2 case (backward scanning)
++ * If the shadowed object exists then ignore.
++ */
++ obj = yaffs_find_by_number(dev, obj_id);
++ if(obj)
++ return;
++ }
+
+- in->hdrChunk = chunk;
++ /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
++ * We put it in unlinked dir to be cleaned up after the scanning
++ */
++ obj =
++ yaffs_find_or_create_by_number(dev, obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ if (!obj)
++ return;
++ obj->is_shadowed = 1;
++ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
++ obj->variant.file_variant.shrink_size = 0;
++ obj->valid = 1; /* So that we don't read any other info for this file */
+
+- } else if (!in->valid) {
+- /* we need to load this info */
++}
+
+- in->valid = 1;
+- in->hdrChunk = chunk;
+
+- if (oh) {
+- in->variantType = oh->type;
++void yaffs_link_fixup(yaffs_dev_t *dev, yaffs_obj_t *hard_list)
++{
++ yaffs_obj_t *hl;
++ yaffs_obj_t *in;
+
+- in->yst_mode = oh->yst_mode;
+-#ifdef CONFIG_YAFFS_WINCE
+- in->win_atime[0] = oh->win_atime[0];
+- in->win_ctime[0] = oh->win_ctime[0];
+- in->win_mtime[0] = oh->win_mtime[0];
+- in->win_atime[1] = oh->win_atime[1];
+- in->win_ctime[1] = oh->win_ctime[1];
+- in->win_mtime[1] = oh->win_mtime[1];
+-#else
+- in->yst_uid = oh->yst_uid;
+- in->yst_gid = oh->yst_gid;
+- in->yst_atime = oh->yst_atime;
+- in->yst_mtime = oh->yst_mtime;
+- in->yst_ctime = oh->yst_ctime;
+- in->yst_rdev = oh->yst_rdev;
+-#endif
++ while (hard_list) {
++ hl = hard_list;
++ hard_list = (yaffs_obj_t *) (hard_list->hard_links.next);
++
++ in = yaffs_find_by_number(dev,
++ hl->variant.hardlink_variant.
++ equiv_id);
+
+- if (oh->shadowsObject > 0)
+- yaffs_HandleShadowedObject(dev,
+- oh->
+- shadowsObject,
+- 1);
+-
+-
+- yaffs_SetObjectName(in, oh->name);
+- parent =
+- yaffs_FindOrCreateObjectByNumber
+- (dev, oh->parentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
+-
+- fileSize = oh->fileSize;
+- isShrink = oh->isShrink;
+- equivalentObjectId = oh->equivalentObjectId;
++ if (in) {
++ /* Add the hardlink pointers */
++ hl->variant.hardlink_variant.equiv_obj = in;
++ ylist_add(&hl->hard_links, &in->hard_links);
++ } else {
++ /* Todo Need to report/handle this better.
++ * Got a problem... hardlink to a non-existant object
++ */
++ hl->variant.hardlink_variant.equiv_obj = NULL;
++ YINIT_LIST_HEAD(&hl->hard_links);
+
+- } else {
+- in->variantType = tags.extraObjectType;
+- parent =
+- yaffs_FindOrCreateObjectByNumber
+- (dev, tags.extraParentObjectId,
+- YAFFS_OBJECT_TYPE_DIRECTORY);
+- fileSize = tags.extraFileLength;
+- isShrink = tags.extraIsShrinkHeader;
+- equivalentObjectId = tags.extraEquivalentObjectId;
+- in->lazyLoaded = 1;
++ }
++ }
++}
+
+- }
+- in->dirty = 0;
+
+- if (!parent)
+- alloc_failed = 1;
++static void yaffs_strip_deleted_objs(yaffs_dev_t *dev)
++{
++ /*
++ * Sort out state of unlinked and deleted objects after scanning.
++ */
++ struct ylist_head *i;
++ struct ylist_head *n;
++ yaffs_obj_t *l;
+
+- /* directory stuff...
+- * hook up to parent
+- */
++ if (dev->read_only)
++ return;
+
+- if (parent && parent->variantType ==
+- YAFFS_OBJECT_TYPE_UNKNOWN) {
+- /* Set up as a directory */
+- parent->variantType =
+- YAFFS_OBJECT_TYPE_DIRECTORY;
+- YINIT_LIST_HEAD(&parent->variant.
+- directoryVariant.
+- children);
+- } else if (!parent || parent->variantType !=
+- YAFFS_OBJECT_TYPE_DIRECTORY) {
+- /* Hoosterman, another problem....
+- * We're trying to use a non-directory as a directory
+- */
++ /* Soft delete all the unlinked files */
++ ylist_for_each_safe(i, n,
++ &dev->unlinked_dir->variant.dir_variant.children) {
++ if (i) {
++ l = ylist_entry(i, yaffs_obj_t, siblings);
++ yaffs_del_obj(l);
++ }
++ }
+
+- T(YAFFS_TRACE_ERROR,
+- (TSTR
+- ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+- TENDSTR)));
+- parent = dev->lostNFoundDir;
+- }
++ ylist_for_each_safe(i, n,
++ &dev->del_dir->variant.dir_variant.children) {
++ if (i) {
++ l = ylist_entry(i, yaffs_obj_t, siblings);
++ yaffs_del_obj(l);
++ }
++ }
+
+- yaffs_AddObjectToDirectory(parent, in);
++}
+
+- itsUnlinked = (parent == dev->deletedDir) ||
+- (parent == dev->unlinkedDir);
++/*
++ * This code iterates through all the objects making sure that they are rooted.
++ * Any unrooted objects are re-rooted in lost+found.
++ * An object needs to be in one of:
++ * - Directly under deleted, unlinked
++ * - Directly or indirectly under root.
++ *
++ * Note:
++ * This code assumes that we don't ever change the current relationships between
++ * directories:
++ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
++ * lostNfound->parent == root_dir
++ *
++ * This fixes the problem where directories might have inadvertently been deleted
++ * leaving the object "hanging" without being rooted in the directory tree.
++ */
++
++static int yaffs_has_null_parent(yaffs_dev_t *dev, yaffs_obj_t *obj)
++{
++ return (obj == dev->del_dir ||
++ obj == dev->unlinked_dir||
++ obj == dev->root_dir);
++}
+
+- if (isShrink) {
+- /* Mark the block as having a shrinkHeader */
+- bi->hasShrinkHeader = 1;
+- }
++static void yaffs_fix_hanging_objs(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ yaffs_obj_t *parent;
++ int i;
++ struct ylist_head *lh;
++ struct ylist_head *n;
++ int depthLimit;
++ int hanging;
+
+- /* Note re hardlinks.
+- * Since we might scan a hardlink before its equivalent object is scanned
+- * we put them all in a list.
+- * After scanning is complete, we should have all the objects, so we run
+- * through this list and fix up all the chains.
+- */
++ if (dev->read_only)
++ return;
+
+- switch (in->variantType) {
+- case YAFFS_OBJECT_TYPE_UNKNOWN:
+- /* Todo got a problem */
+- break;
+- case YAFFS_OBJECT_TYPE_FILE:
+-
+- if (in->variant.fileVariant.
+- scannedFileSize < fileSize) {
+- /* This covers the case where the file size is greater
+- * than where the data is
+- * This will happen if the file is resized to be larger
+- * than its current data extents.
+- */
+- in->variant.fileVariant.fileSize = fileSize;
+- in->variant.fileVariant.scannedFileSize =
+- in->variant.fileVariant.fileSize;
+- }
++ /* Iterate through the objects in each hash entry,
++ * looking at each object.
++ * Make sure it is rooted.
++ */
+
+- if (isShrink &&
+- in->variant.fileVariant.shrinkSize > fileSize) {
+- in->variant.fileVariant.shrinkSize = fileSize;
+- }
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
++ if (lh) {
++ obj = ylist_entry(lh, yaffs_obj_t, hash_link);
++ parent= obj->parent;
++
++ if(yaffs_has_null_parent(dev,obj)){
++ /* These directories are not hanging */
++ hanging = 0;
++ }
++ else if(!parent || parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ hanging = 1;
++ else if(yaffs_has_null_parent(dev,parent))
++ hanging = 0;
++ else {
++ /*
++ * Need to follow the parent chain to see if it is hanging.
++ */
++ hanging = 0;
++ depthLimit=100;
+
+- break;
+- case YAFFS_OBJECT_TYPE_HARDLINK:
+- if (!itsUnlinked) {
+- in->variant.hardLinkVariant.equivalentObjectId =
+- equivalentObjectId;
+- in->hardLinks.next =
+- (struct ylist_head *) hardList;
+- hardList = in;
+- }
+- break;
+- case YAFFS_OBJECT_TYPE_DIRECTORY:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SPECIAL:
+- /* Do nothing */
+- break;
+- case YAFFS_OBJECT_TYPE_SYMLINK:
+- if (oh) {
+- in->variant.symLinkVariant.alias =
+- yaffs_CloneString(oh->alias);
+- if (!in->variant.symLinkVariant.alias)
+- alloc_failed = 1;
+- }
+- break;
++ while(parent != dev->root_dir &&
++ parent->parent &&
++ parent->parent->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ depthLimit > 0){
++ parent = parent->parent;
++ depthLimit--;
+ }
+-
++ if(parent != dev->root_dir)
++ hanging = 1;
++ }
++ if(hanging){
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("Hanging object %d moved to lost and found" TENDSTR),
++ obj->obj_id));
++ yaffs_add_obj_to_dir(dev->lost_n_found,obj);
+ }
+-
+ }
++ }
++ }
++}
+
+- } /* End of scanning for each chunk */
+
+- if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+- /* If we got this far while scanning, then the block is fully allocated. */
+- state = YAFFS_BLOCK_STATE_FULL;
+- }
++/*
++ * Delete directory contents for cleaning up lost and found.
++ */
++static void yaffs_del_dir_contents(yaffs_obj_t *dir)
++{
++ yaffs_obj_t *obj;
++ struct ylist_head *lh;
++ struct ylist_head *n;
+
+- bi->blockState = state;
++ if(dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ YBUG();
++
++ ylist_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
++ if (lh) {
++ obj = ylist_entry(lh, yaffs_obj_t, siblings);
++ if(obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
++ yaffs_del_dir_contents(obj);
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("Deleting lost_found object %d" TENDSTR),
++ obj->obj_id));
+
+- /* Now let's see if it was dirty */
+- if (bi->pagesInUse == 0 &&
+- !bi->hasShrinkHeader &&
+- bi->blockState == YAFFS_BLOCK_STATE_FULL) {
+- yaffs_BlockBecameDirty(dev, blk);
++ /* Need to use UnlinkObject since Delete would not handle
++ * hardlinked objects correctly.
++ */
++ yaffs_unlink_obj(obj);
+ }
+-
+ }
++
++}
+
+- if (altBlockIndex)
+- YFREE_ALT(blockIndex);
+- else
+- YFREE(blockIndex);
++static void yaffs_empty_l_n_f(yaffs_dev_t *dev)
++{
++ yaffs_del_dir_contents(dev->lost_n_found);
++}
+
+- /* Ok, we've done all the scanning.
+- * Fix up the hard link chains.
+- * We should now have scanned all the objects, now it's time to add these
+- * hardlinks.
+- */
+- yaffs_HardlinkFixup(dev, hardList);
++static void yaffs_check_obj_details_loaded(yaffs_obj_t *in)
++{
++ __u8 *chunkData;
++ yaffs_obj_header *oh;
++ yaffs_dev_t *dev;
++ yaffs_ext_tags tags;
++ int result;
++ int alloc_failed = 0;
+
++ if (!in)
++ return;
+
+- yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++ dev = in->my_dev;
+
+- if (alloc_failed)
+- return YAFFS_FAIL;
++#if 0
++ T(YAFFS_TRACE_SCAN, (TSTR("details for object %d %s loaded" TENDSTR),
++ in->obj_id,
++ in->lazy_loaded ? "not yet" : "already"));
++#endif
+
+- T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
++ if (in->lazy_loaded && in->hdr_chunk > 0) {
++ in->lazy_loaded = 0;
++ chunkData = yaffs_get_temp_buffer(dev, __LINE__);
+
+- return YAFFS_OK;
+-}
++ result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, chunkData, &tags);
++ oh = (yaffs_obj_header *) chunkData;
+
+-/*------------------------------ Directory Functions ----------------------------- */
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
+
+-static void yaffs_VerifyObjectInDirectory(yaffs_Object *obj)
+-{
+- struct ylist_head *lh;
+- yaffs_Object *listObj;
++#endif
++ yaffs_set_obj_name_from_oh(in, oh);
+
+- int count = 0;
++ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ in->variant.symlink_variant.alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.symlink_variant.alias)
++ alloc_failed = 1; /* Not returned to caller */
++ }
+
+- if (!obj) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("No object to verify" TENDSTR)));
+- YBUG();
+- return;
++ yaffs_release_temp_buffer(dev, chunkData, __LINE__);
+ }
++}
+
+- if (yaffs_SkipVerification(obj->myDev))
+- return;
++/*------------------------------ Directory Functions ----------------------------- */
+
+- if (!obj->parent) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Object does not have parent" TENDSTR)));
+- YBUG();
++/*
++ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
++ * link (ie. name) is created or deleted in the directory.
++ *
++ * ie.
++ * create dir/a : update dir's mtime/ctime
++ * rm dir/a: update dir's mtime/ctime
++ * modify dir/a: don't update dir's mtimme/ctime
++ *
++ * This can be handled immediately or defered. Defering helps reduce the number
++ * of updates when many files in a directory are changed within a brief period.
++ *
++ * If the directory updating is defered then yaffs_update_dirty_dirs must be
++ * called periodically.
++ */
++
++static void yaffs_update_parent(yaffs_obj_t *obj)
++{
++ yaffs_dev_t *dev;
++ if(!obj)
+ return;
+- }
+-
+- if (obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Parent is not directory" TENDSTR)));
+- YBUG();
+- }
+-
+- /* Iterate through the objects in each hash entry */
++#ifndef CONFIG_YAFFS_WINCE
+
+- ylist_for_each(lh, &obj->parent->variant.directoryVariant.children) {
+- if (lh) {
+- listObj = ylist_entry(lh, yaffs_Object, siblings);
+- yaffs_VerifyObject(listObj);
+- if (obj == listObj)
+- count++;
++ dev = obj->my_dev;
++ obj->dirty = 1;
++ obj->yst_mtime = obj->yst_ctime = Y_CURRENT_TIME;
++ if(dev->param.defered_dir_update){
++ struct ylist_head *link = &obj->variant.dir_variant.dirty;
++
++ if(ylist_empty(link)){
++ ylist_add(link,&dev->dirty_dirs);
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Added object %d to dirty directories" TENDSTR),obj->obj_id));
+ }
+- }
+
+- if (count != 1) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory %d times" TENDSTR), count));
+- YBUG();
+- }
++ } else
++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
++#endif
+ }
+
+-static void yaffs_VerifyDirectory(yaffs_Object *directory)
++void yaffs_update_dirty_dirs(yaffs_dev_t *dev)
+ {
+- struct ylist_head *lh;
+- yaffs_Object *listObj;
+-
+- if (!directory) {
+- YBUG();
+- return;
+- }
++ struct ylist_head *link;
++ yaffs_obj_t *obj;
++ yaffs_dir_s *dS;
++ yaffs_obj_variant *oV;
+
+- if (yaffs_SkipFullVerification(directory->myDev))
+- return;
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Update dirty directories" TENDSTR)));
+
+- if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Directory has wrong type: %d" TENDSTR), directory->variantType));
+- YBUG();
+- }
++ while(!ylist_empty(&dev->dirty_dirs)){
++ link = dev->dirty_dirs.next;
++ ylist_del_init(link);
++
++ dS=ylist_entry(link,yaffs_dir_s,dirty);
++ oV = ylist_entry(dS,yaffs_obj_variant,dir_variant);
++ obj = ylist_entry(oV,yaffs_obj_t,variant);
+
+- /* Iterate through the objects in each hash entry */
++ T(YAFFS_TRACE_BACKGROUND, (TSTR("Update directory %d" TENDSTR), obj->obj_id));
+
+- ylist_for_each(lh, &directory->variant.directoryVariant.children) {
+- if (lh) {
+- listObj = ylist_entry(lh, yaffs_Object, siblings);
+- if (listObj->parent != directory) {
+- T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory list has wrong parent %p" TENDSTR), listObj->parent));
+- YBUG();
+- }
+- yaffs_VerifyObjectInDirectory(listObj);
+- }
++ if(obj->dirty)
++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+ }
+
+-
+-static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj)
++static void yaffs_remove_obj_from_dir(yaffs_obj_t *obj)
+ {
+- yaffs_Device *dev = obj->myDev;
+- yaffs_Object *parent;
++ yaffs_dev_t *dev = obj->my_dev;
++ yaffs_obj_t *parent;
+
+- yaffs_VerifyObjectInDirectory(obj);
++ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+- yaffs_VerifyDirectory(parent);
++ yaffs_verify_dir(parent);
+
+- if (dev && dev->removeObjectCallback)
+- dev->removeObjectCallback(obj);
++ if (dev && dev->param.remove_obj_fn)
++ dev->param.remove_obj_fn(obj);
+
+
+ ylist_del_init(&obj->siblings);
+ obj->parent = NULL;
+-
+- yaffs_VerifyDirectory(parent);
++
++ yaffs_verify_dir(parent);
+ }
+
+-
+-static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
+- yaffs_Object *obj)
++void yaffs_add_obj_to_dir(yaffs_obj_t *directory,
++ yaffs_obj_t *obj)
+ {
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+@@ -6699,7 +4495,7 @@ static void yaffs_AddObjectToDirectory(y
+ YBUG();
+ return;
+ }
+- if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("tragedy: Trying to add an object to a non-directory"
+@@ -6713,27 +4509,27 @@ static void yaffs_AddObjectToDirectory(y
+ }
+
+
+- yaffs_VerifyDirectory(directory);
++ yaffs_verify_dir(directory);
+
+- yaffs_RemoveObjectFromDirectory(obj);
++ yaffs_remove_obj_from_dir(obj);
+
+
+ /* Now add it */
+- ylist_add(&obj->siblings, &directory->variant.directoryVariant.children);
++ ylist_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+- if (directory == obj->myDev->unlinkedDir
+- || directory == obj->myDev->deletedDir) {
++ if (directory == obj->my_dev->unlinked_dir
++ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+- obj->myDev->nUnlinkedFiles++;
+- obj->renameAllowed = 0;
++ obj->my_dev->n_unlinked_files++;
++ obj->rename_allowed = 0;
+ }
+
+- yaffs_VerifyDirectory(directory);
+- yaffs_VerifyObjectInDirectory(obj);
++ yaffs_verify_dir(directory);
++ yaffs_verify_obj_in_dir(obj);
+ }
+
+-yaffs_Object *yaffs_FindObjectByName(yaffs_Object *directory,
++yaffs_obj_t *yaffs_find_by_name(yaffs_obj_t *directory,
+ const YCHAR *name)
+ {
+ int sum;
+@@ -6741,7 +4537,7 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+ struct ylist_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+- yaffs_Object *l;
++ yaffs_obj_t *l;
+
+ if (!name)
+ return NULL;
+@@ -6749,39 +4545,39 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+ if (!directory) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragedy: yaffs_FindObjectByName: null pointer directory"
++ ("tragedy: yaffs_find_by_name: null pointer directory"
+ TENDSTR)));
+ YBUG();
+ return NULL;
+ }
+- if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
++ ("tragedy: yaffs_find_by_name: non-directory" TENDSTR)));
+ YBUG();
+ }
+
+- sum = yaffs_CalcNameSum(name);
++ sum = yaffs_calc_name_sum(name);
+
+- ylist_for_each(i, &directory->variant.directoryVariant.children) {
++ ylist_for_each(i, &directory->variant.dir_variant.children) {
+ if (i) {
+- l = ylist_entry(i, yaffs_Object, siblings);
++ l = ylist_entry(i, yaffs_obj_t, siblings);
+
+ if (l->parent != directory)
+ YBUG();
+
+- yaffs_CheckObjectDetailsLoaded(l);
++ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+- if (l->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
++ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0)
+ return l;
+- } else if (yaffs_SumCompare(l->sum, sum) || l->hdrChunk <= 0) {
++ } else if (yaffs_sum_cmp(l->sum, sum) || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+- yaffs_GetObjectName(l, buffer,
+- YAFFS_MAX_NAME_LENGTH);
++ yaffs_get_obj_name(l, buffer,
++ YAFFS_MAX_NAME_LENGTH + 1);
+ if (yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
+ return l;
+ }
+@@ -6793,31 +4589,31 @@ yaffs_Object *yaffs_FindObjectByName(yaf
+
+
+ #if 0
+-int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
+- int (*fn) (yaffs_Object *))
++int yaffs_ApplyToDirectoryChildren(yaffs_obj_t *the_dir,
++ int (*fn) (yaffs_obj_t *))
+ {
+ struct ylist_head *i;
+- yaffs_Object *l;
++ yaffs_obj_t *l;
+
+- if (!theDir) {
++ if (!the_dir) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragedy: yaffs_FindObjectByName: null pointer directory"
++ ("tragedy: yaffs_find_by_name: null pointer directory"
+ TENDSTR)));
+ YBUG();
+ return YAFFS_FAIL;
+ }
+- if (theDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ if (the_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
++ ("tragedy: yaffs_find_by_name: non-directory" TENDSTR)));
+ YBUG();
+ return YAFFS_FAIL;
+ }
+
+- ylist_for_each(i, &theDir->variant.directoryVariant.children) {
++ ylist_for_each(i, &the_dir->variant.dir_variant.children) {
+ if (i) {
+- l = ylist_entry(i, yaffs_Object, siblings);
++ l = ylist_entry(i, yaffs_obj_t, siblings);
+ if (l && !fn(l))
+ return YAFFS_FAIL;
+ }
+@@ -6832,82 +4628,175 @@ int yaffs_ApplyToDirectoryChildren(yaffs
+ * actual object.
+ */
+
+-yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj)
++yaffs_obj_t *yaffs_get_equivalent_obj(yaffs_obj_t *obj)
+ {
+- if (obj && obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ /* We want the object id of the equivalent object, not this one */
+- obj = obj->variant.hardLinkVariant.equivalentObject;
+- yaffs_CheckObjectDetailsLoaded(obj);
++ obj = obj->variant.hardlink_variant.equiv_obj;
++ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+ }
+
+-int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize)
+-{
+- memset(name, 0, buffSize * sizeof(YCHAR));
+-
+- yaffs_CheckObjectDetailsLoaded(obj);
++/*
++ * A note or two on object names.
++ * * If the object name is missing, we then make one up in the form objnnn
++ *
++ * * ASCII names are stored in the object header's name field from byte zero
++ * * Unicode names are historically stored starting from byte zero.
++ *
++ * Then there are automatic Unicode names...
++ * The purpose of these is to save names in a way that can be read as
++ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
++ * system to share files.
++ *
++ * These automatic unicode are stored slightly differently...
++ * - If the name can fit in the ASCII character space then they are saved as
++ * ascii names as per above.
++ * - If the name needs Unicode then the name is saved in Unicode
++ * starting at oh->name[1].
+
+- if (obj->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
+- yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffSize - 1);
+- } else if (obj->hdrChunk <= 0) {
++ */
++static void yaffs_fix_null_name(yaffs_obj_t * obj,YCHAR * name, int buffer_size)
++{
++ /* Create an object name if we could not find one. */
++ if(yaffs_strnlen(name,YAFFS_MAX_NAME_LENGTH) == 0){
+ YCHAR locName[20];
+ YCHAR numString[20];
+ YCHAR *x = &numString[19];
+- unsigned v = obj->objectId;
++ unsigned v = obj->obj_id;
+ numString[19] = 0;
+- while (v > 0) {
++ while(v>0){
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ yaffs_strcpy(locName, YAFFS_LOSTNFOUND_PREFIX);
+- yaffs_strcat(locName, x);
+- yaffs_strncpy(name, locName, buffSize - 1);
++ yaffs_strcat(locName,x);
++ yaffs_strncpy(name, locName, buffer_size - 1);
++ }
++}
++
++static void yaffs_load_name_from_oh(yaffs_dev_t *dev,YCHAR *name, const YCHAR *ohName, int bufferSize)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ if(dev->param.auto_unicode){
++ if(*ohName){
++ /* It is an ASCII name, so do an ASCII to unicode conversion */
++ const char *asciiOhName = (const char *)ohName;
++ int n = bufferSize - 1;
++ while(n > 0 && *asciiOhName){
++ *name = *asciiOhName;
++ name++;
++ asciiOhName++;
++ n--;
++ }
++ } else
++ yaffs_strncpy(name,ohName+1, bufferSize -1);
++ } else
++#endif
++ yaffs_strncpy(name, ohName, bufferSize - 1);
++}
++
++
++static void yaffs_load_oh_from_name(yaffs_dev_t *dev, YCHAR *ohName, const YCHAR *name)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++
++ int isAscii;
++ YCHAR *w;
++
++ if(dev->param.auto_unicode){
++
++ isAscii = 1;
++ w = name;
++
++ /* Figure out if the name will fit in ascii character set */
++ while(isAscii && *w){
++ if((*w) & 0xff00)
++ isAscii = 0;
++ w++;
++ }
+
++ if(isAscii){
++ /* It is an ASCII name, so do a unicode to ascii conversion */
++ char *asciiOhName = (char *)ohName;
++ int n = YAFFS_MAX_NAME_LENGTH - 1;
++ while(n > 0 && *name){
++ *asciiOhName= *name;
++ name++;
++ asciiOhName++;
++ n--;
++ }
++ } else{
++ /* It is a unicode name, so save starting at the second YCHAR */
++ *ohName = 0;
++ yaffs_strncpy(ohName+1,name, YAFFS_MAX_NAME_LENGTH -2);
++ }
+ }
++ else
++#endif
++ yaffs_strncpy(ohName,name, YAFFS_MAX_NAME_LENGTH - 1);
++
++}
++
++int yaffs_get_obj_name(yaffs_obj_t * obj, YCHAR * name, int buffer_size)
++{
++ memset(name, 0, buffer_size * sizeof(YCHAR));
++
++ yaffs_check_obj_details_loaded(obj);
++
++ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
++ yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
++ }
+ #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- else if (obj->shortName[0])
+- yaffs_strcpy(name, obj->shortName);
++ else if (obj->short_name[0]) {
++ yaffs_strcpy(name, obj->short_name);
++ }
+ #endif
+- else {
++ else if(obj->hdr_chunk > 0) {
+ int result;
+- __u8 *buffer = yaffs_GetTempBuffer(obj->myDev, __LINE__);
++ __u8 *buffer = yaffs_get_temp_buffer(obj->my_dev, __LINE__);
+
+- yaffs_ObjectHeader *oh = (yaffs_ObjectHeader *) buffer;
++ yaffs_obj_header *oh = (yaffs_obj_header *) buffer;
+
+- memset(buffer, 0, obj->myDev->nDataBytesPerChunk);
++ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+- if (obj->hdrChunk > 0) {
+- result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
+- obj->hdrChunk, buffer,
++ if (obj->hdr_chunk > 0) {
++ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
++ obj->hdr_chunk, buffer,
+ NULL);
+ }
+- yaffs_strncpy(name, oh->name, buffSize - 1);
++ yaffs_load_name_from_oh(obj->my_dev,name,oh->name,buffer_size);
+
+- yaffs_ReleaseTempBuffer(obj->myDev, buffer, __LINE__);
++ yaffs_release_temp_buffer(obj->my_dev, buffer, __LINE__);
+ }
+
+- return yaffs_strlen(name);
++ yaffs_fix_null_name(obj,name,buffer_size);
++
++ return yaffs_strnlen(name,YAFFS_MAX_NAME_LENGTH);
+ }
+
+-int yaffs_GetObjectFileLength(yaffs_Object *obj)
++
++int yaffs_get_obj_length(yaffs_obj_t *obj)
+ {
+ /* Dereference any hard linking */
+- obj = yaffs_GetEquivalentObject(obj);
++ obj = yaffs_get_equivalent_obj(obj);
+
+- if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
+- return obj->variant.fileVariant.fileSize;
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
+- return yaffs_strlen(obj->variant.symLinkVariant.alias);
+- else {
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ return obj->variant.file_variant.file_size;
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK){
++ if(!obj->variant.symlink_variant.alias)
++ return 0;
++ return yaffs_strnlen(obj->variant.symlink_variant.alias,YAFFS_MAX_ALIAS_LENGTH);
++ } else {
+ /* Only a directory should drop through to here */
+- return obj->myDev->nDataBytesPerChunk;
++ return obj->my_dev->data_bytes_per_chunk;
+ }
+ }
+
+-int yaffs_GetObjectLinkCount(yaffs_Object *obj)
++int yaffs_get_obj_link_count(yaffs_obj_t *obj)
+ {
+ int count = 0;
+ struct ylist_head *i;
+@@ -6915,24 +4804,24 @@ int yaffs_GetObjectLinkCount(yaffs_Objec
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+- ylist_for_each(i, &obj->hardLinks)
++ ylist_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+ }
+
+-int yaffs_GetObjectInode(yaffs_Object *obj)
++int yaffs_get_obj_inode(yaffs_obj_t *obj)
+ {
+- obj = yaffs_GetEquivalentObject(obj);
++ obj = yaffs_get_equivalent_obj(obj);
+
+- return obj->objectId;
++ return obj->obj_id;
+ }
+
+-unsigned yaffs_GetObjectType(yaffs_Object *obj)
++unsigned yaffs_get_obj_type(yaffs_obj_t *obj)
+ {
+- obj = yaffs_GetEquivalentObject(obj);
++ obj = yaffs_get_equivalent_obj(obj);
+
+- switch (obj->variantType) {
++ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+@@ -6960,18 +4849,18 @@ unsigned yaffs_GetObjectType(yaffs_Objec
+ }
+ }
+
+-YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj)
++YCHAR *yaffs_get_symlink_alias(yaffs_obj_t *obj)
+ {
+- obj = yaffs_GetEquivalentObject(obj);
+- if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
+- return yaffs_CloneString(obj->variant.symLinkVariant.alias);
++ obj = yaffs_get_equivalent_obj(obj);
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
++ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+- return yaffs_CloneString(_Y(""));
++ return yaffs_clone_str(_Y(""));
+ }
+
+ #ifndef CONFIG_YAFFS_WINCE
+
+-int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr)
++int yaffs_set_attribs(yaffs_obj_t *obj, struct iattr *attr)
+ {
+ unsigned int valid = attr->ia_valid;
+
+@@ -6990,14 +4879,14 @@ int yaffs_SetAttributes(yaffs_Object *ob
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+- yaffs_ResizeFile(obj, attr->ia_size);
++ yaffs_resize_file(obj, attr->ia_size);
+
+- yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+ }
+-int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr)
++int yaffs_get_attribs(yaffs_obj_t *obj, struct iattr *attr)
+ {
+ unsigned int valid = 0;
+
+@@ -7015,7 +4904,7 @@ int yaffs_GetAttributes(yaffs_Object *ob
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+- attr->ia_size = yaffs_GetFileSize(obj);
++ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+@@ -7025,20 +4914,137 @@ int yaffs_GetAttributes(yaffs_Object *ob
+
+ #endif
+
++
++static int yaffs_do_xattrib_mod(yaffs_obj_t *obj, int set, const YCHAR *name, const void *value, int size, int flags)
++{
++ yaffs_xattr_mod xmod;
++
++ int result;
++
++ xmod.set = set;
++ xmod.name = name;
++ xmod.data = value;
++ xmod.size = size;
++ xmod.flags = flags;
++ xmod.result = -ENOSPC;
++
++ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
++
++ if(result > 0)
++ return xmod.result;
++ else
++ return -ENOSPC;
++}
++
++static int yaffs_apply_xattrib_mod(yaffs_obj_t *obj, char *buffer, yaffs_xattr_mod *xmod)
++{
++ int retval = 0;
++ int x_offs = sizeof(yaffs_obj_header);
++ yaffs_dev_t *dev = obj->my_dev;
++ int x_size = dev->data_bytes_per_chunk - sizeof(yaffs_obj_header);
++
++ char * x_buffer = buffer + x_offs;
++
++ if(xmod->set)
++ retval = nval_set(x_buffer, x_size, xmod->name, xmod->data, xmod->size, xmod->flags);
++ else
++ retval = nval_del(x_buffer, x_size, xmod->name);
++
++ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++ obj->xattr_known = 1;
++
++ xmod->result = retval;
++
++ return retval;
++}
++
++static int yaffs_do_xattrib_fetch(yaffs_obj_t *obj, const YCHAR *name, void *value, int size)
++{
++ char *buffer = NULL;
++ int result;
++ yaffs_ext_tags tags;
++ yaffs_dev_t *dev = obj->my_dev;
++ int x_offs = sizeof(yaffs_obj_header);
++ int x_size = dev->data_bytes_per_chunk - sizeof(yaffs_obj_header);
++
++ char * x_buffer;
++
++ int retval = 0;
++
++ if(obj->hdr_chunk < 1)
++ return -ENODATA;
++
++ /* If we know that the object has no xattribs then don't do all the
++ * reading and parsing.
++ */
++ if(obj->xattr_known && !obj->has_xattr){
++ if(name)
++ return -ENODATA;
++ else
++ return 0;
++ }
++
++ buffer = (char *) yaffs_get_temp_buffer(dev, __LINE__);
++ if(!buffer)
++ return -ENOMEM;
++
++ result = yaffs_rd_chunk_tags_nand(dev,obj->hdr_chunk, (__u8 *)buffer, &tags);
++
++ if(result != YAFFS_OK)
++ retval = -ENOENT;
++ else{
++ x_buffer = buffer + x_offs;
++
++ if (!obj->xattr_known){
++ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++ obj->xattr_known = 1;
++ }
++
++ if(name)
++ retval = nval_get(x_buffer, x_size, name, value, size);
++ else
++ retval = nval_list(x_buffer, x_size, value,size);
++ }
++ yaffs_release_temp_buffer(dev,(__u8 *)buffer,__LINE__);
++ return retval;
++}
++
++int yaffs_set_xattrib(yaffs_obj_t *obj, const YCHAR *name, const void * value, int size, int flags)
++{
++ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
++}
++
++int yaffs_remove_xattrib(yaffs_obj_t *obj, const YCHAR *name)
++{
++ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
++}
++
++int yaffs_get_xattrib(yaffs_obj_t *obj, const YCHAR *name, void *value, int size)
++{
++ return yaffs_do_xattrib_fetch(obj, name, value, size);
++}
++
++int yaffs_list_xattrib(yaffs_obj_t *obj, char *buffer, int size)
++{
++ return yaffs_do_xattrib_fetch(obj, NULL, buffer,size);
++}
++
++
++
+ #if 0
+-int yaffs_DumpObject(yaffs_Object *obj)
++int yaffs_dump_obj(yaffs_obj_t *obj)
+ {
+ YCHAR name[257];
+
+- yaffs_GetObjectName(obj, name, 256);
++ yaffs_get_obj_name(obj, name, YAFFS_MAX_NAME_LENGTH + 1);
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+ ("Object %d, inode %d \"%s\"\n dirty %d valid %d serial %d sum %d"
+ " chunk %d type %d size %d\n"
+- TENDSTR), obj->objectId, yaffs_GetObjectInode(obj), name,
+- obj->dirty, obj->valid, obj->serial, obj->sum, obj->hdrChunk,
+- yaffs_GetObjectType(obj), yaffs_GetObjectFileLength(obj)));
++ TENDSTR), obj->obj_id, yaffs_get_obj_inode(obj), name,
++ obj->dirty, obj->valid, obj->serial, obj->sum, obj->hdr_chunk,
++ yaffs_get_obj_type(obj), yaffs_get_obj_length(obj)));
+
+ return YAFFS_OK;
+ }
+@@ -7046,72 +5052,74 @@ int yaffs_DumpObject(yaffs_Object *obj)
+
+ /*---------------------------- Initialisation code -------------------------------------- */
+
+-static int yaffs_CheckDevFunctions(const yaffs_Device *dev)
++static int yaffs_cehck_dev_fns(const yaffs_dev_t *dev)
+ {
+
+ /* Common functions, gotta have */
+- if (!dev->eraseBlockInNAND || !dev->initialiseNAND)
++ if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
+ return 0;
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+
+ /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
+- if (dev->writeChunkWithTagsToNAND &&
+- dev->readChunkWithTagsFromNAND &&
+- !dev->writeChunkToNAND &&
+- !dev->readChunkFromNAND &&
+- dev->markNANDBlockBad && dev->queryNANDBlock)
++ if (dev->param.write_chunk_tags_fn &&
++ dev->param.read_chunk_tags_fn &&
++ !dev->param.write_chunk_fn &&
++ !dev->param.read_chunk_fn &&
++ dev->param.bad_block_fn &&
++ dev->param.query_block_fn)
+ return 1;
+ #endif
+
+ /* Can use the "spare" style interface for yaffs1 */
+- if (!dev->isYaffs2 &&
+- !dev->writeChunkWithTagsToNAND &&
+- !dev->readChunkWithTagsFromNAND &&
+- dev->writeChunkToNAND &&
+- dev->readChunkFromNAND &&
+- !dev->markNANDBlockBad && !dev->queryNANDBlock)
++ if (!dev->param.is_yaffs2 &&
++ !dev->param.write_chunk_tags_fn &&
++ !dev->param.read_chunk_tags_fn &&
++ dev->param.write_chunk_fn &&
++ dev->param.read_chunk_fn &&
++ !dev->param.bad_block_fn &&
++ !dev->param.query_block_fn)
+ return 1;
+
+- return 0; /* bad */
++ return 0; /* bad */
+ }
+
+
+-static int yaffs_CreateInitialDirectories(yaffs_Device *dev)
++static int yaffs_create_initial_dir(yaffs_dev_t *dev)
+ {
+ /* Initialise the unlinked, deleted, root and lost and found directories */
+
+- dev->lostNFoundDir = dev->rootDir = NULL;
+- dev->unlinkedDir = dev->deletedDir = NULL;
++ dev->lost_n_found = dev->root_dir = NULL;
++ dev->unlinked_dir = dev->del_dir = NULL;
+
+- dev->unlinkedDir =
+- yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
++ dev->unlinked_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+
+- dev->deletedDir =
+- yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
++ dev->del_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+
+- dev->rootDir =
+- yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_ROOT,
++ dev->root_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+- dev->lostNFoundDir =
+- yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_LOSTNFOUND,
++ dev->lost_n_found =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+- if (dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir) {
+- yaffs_AddObjectToDirectory(dev->rootDir, dev->lostNFoundDir);
++ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir && dev->del_dir) {
++ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+ }
+
+-int yaffs_GutsInitialise(yaffs_Device *dev)
++int yaffs_guts_initialise(yaffs_dev_t *dev)
+ {
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+- T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_GutsInitialise()" TENDSTR)));
++ T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_guts_initialise()" TENDSTR)));
+
+ /* Check stuff that must be set */
+
+@@ -7120,52 +5128,52 @@ int yaffs_GutsInitialise(yaffs_Device *d
+ return YAFFS_FAIL;
+ }
+
+- dev->internalStartBlock = dev->startBlock;
+- dev->internalEndBlock = dev->endBlock;
+- dev->blockOffset = 0;
+- dev->chunkOffset = 0;
+- dev->nFreeChunks = 0;
+-
+- dev->gcBlock = -1;
+-
+- if (dev->startBlock == 0) {
+- dev->internalStartBlock = dev->startBlock + 1;
+- dev->internalEndBlock = dev->endBlock + 1;
+- dev->blockOffset = 1;
+- dev->chunkOffset = dev->nChunksPerBlock;
++ dev->internal_start_block = dev->param.start_block;
++ dev->internal_end_block = dev->param.end_block;
++ dev->block_offset = 0;
++ dev->chunk_offset = 0;
++ dev->n_free_chunks = 0;
++
++ dev->gc_block = 0;
++
++ if (dev->param.start_block == 0) {
++ dev->internal_start_block = dev->param.start_block + 1;
++ dev->internal_end_block = dev->param.end_block + 1;
++ dev->block_offset = 1;
++ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+- if ((!dev->inbandTags && dev->isYaffs2 && dev->totalBytesPerChunk < 1024) ||
+- (!dev->isYaffs2 && dev->totalBytesPerChunk < 512) ||
+- (dev->inbandTags && !dev->isYaffs2) ||
+- dev->nChunksPerBlock < 2 ||
+- dev->nReservedBlocks < 2 ||
+- dev->internalStartBlock <= 0 ||
+- dev->internalEndBlock <= 0 ||
+- dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2)) { /* otherwise it is too small */
++ if ((!dev->param.inband_tags && dev->param.is_yaffs2 && dev->param.total_bytes_per_chunk < 1024) ||
++ (!dev->param.is_yaffs2 && dev->param.total_bytes_per_chunk < 512) ||
++ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
++ dev->param.chunks_per_block < 2 ||
++ dev->param.n_reserved_blocks < 2 ||
++ dev->internal_start_block <= 0 ||
++ dev->internal_end_block <= 0 ||
++ dev->internal_end_block <= (dev->internal_start_block + dev->param.n_reserved_blocks + 2)) { /* otherwise it is too small */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+- ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inbandTags %d "
+- TENDSTR), dev->totalBytesPerChunk, dev->isYaffs2 ? "2" : "", dev->inbandTags));
++ ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d "
++ TENDSTR), dev->param.total_bytes_per_chunk, dev->param.is_yaffs2 ? "2" : "", dev->param.inband_tags));
+ return YAFFS_FAIL;
+ }
+
+- if (yaffs_InitialiseNAND(dev) != YAFFS_OK) {
++ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: InitialiseNAND failed" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+- if (dev->inbandTags)
+- dev->nDataBytesPerChunk = dev->totalBytesPerChunk - sizeof(yaffs_PackedTags2TagsPart);
++ if (dev->param.inband_tags)
++ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk - sizeof(yaffs_PackedTags2TagsPart);
+ else
+- dev->nDataBytesPerChunk = dev->totalBytesPerChunk;
++ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+- if (!yaffs_CheckDevFunctions(dev)) {
++ if (!yaffs_cehck_dev_fns(dev)) {
+ /* Function missing */
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR
+@@ -7175,13 +5183,13 @@ int yaffs_GutsInitialise(yaffs_Device *d
+ }
+
+ /* This is really a compilation check. */
+- if (!yaffs_CheckStructures()) {
++ if (!yaffs_check_structures()) {
+ T(YAFFS_TRACE_ALWAYS,
+- (TSTR("yaffs_CheckStructures failed\n" TENDSTR)));
++ (TSTR("yaffs_check_structures failed\n" TENDSTR)));
+ return YAFFS_FAIL;
+ }
+
+- if (dev->isMounted) {
++ if (dev->is_mounted) {
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: device already mounted\n" TENDSTR)));
+ return YAFFS_FAIL;
+@@ -7189,59 +5197,62 @@ int yaffs_GutsInitialise(yaffs_Device *d
+
+ /* Finished with most checks. One or two more checks happen later on too. */
+
+- dev->isMounted = 1;
++ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+- x = dev->nDataBytesPerChunk;
+- /* We always use dev->chunkShift and dev->chunkDiv */
+- dev->chunkShift = Shifts(x);
+- x >>= dev->chunkShift;
+- dev->chunkDiv = x;
+- /* We only use chunk mask if chunkDiv is 1 */
+- dev->chunkMask = (1<<dev->chunkShift) - 1;
++ x = dev->data_bytes_per_chunk;
++ /* We always use dev->chunk_shift and dev->chunk_div */
++ dev->chunk_shift = Shifts(x);
++ x >>= dev->chunk_shift;
++ dev->chunk_div = x;
++ /* We only use chunk mask if chunk_div is 1 */
++ dev->chunk_mask = (1<<dev->chunk_shift) - 1;
+
+ /*
+- * Calculate chunkGroupBits.
+- * We need to find the next power of 2 > than internalEndBlock
++ * Calculate chunk_grp_bits.
++ * We need to find the next power of 2 > than internal_end_block
+ */
+
+- x = dev->nChunksPerBlock * (dev->internalEndBlock + 1);
++ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = ShiftsGE(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+- if (!dev->wideTnodesDisabled) {
++ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+- dev->tnodeWidth = 16;
++ dev->tnode_width = 16;
+ else
+- dev->tnodeWidth = bits;
++ dev->tnode_width = bits;
+ } else
+- dev->tnodeWidth = 16;
++ dev->tnode_width = 16;
+
+- dev->tnodeMask = (1<<dev->tnodeWidth)-1;
++ dev->tnode_mask = (1<<dev->tnode_width)-1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+- * to figure out chunk shift and chunkGroupSize
++ * to figure out chunk shift and chunk_grp_size
+ */
+
+- if (bits <= dev->tnodeWidth)
+- dev->chunkGroupBits = 0;
++ if (bits <= dev->tnode_width)
++ dev->chunk_grp_bits = 0;
+ else
+- dev->chunkGroupBits = bits - dev->tnodeWidth;
++ dev->chunk_grp_bits = bits - dev->tnode_width;
+
++ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0)/8;
++ if(dev->tnode_size < sizeof(yaffs_tnode_t))
++ dev->tnode_size = sizeof(yaffs_tnode_t);
+
+- dev->chunkGroupSize = 1 << dev->chunkGroupBits;
++ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+- if (dev->nChunksPerBlock < dev->chunkGroupSize) {
++ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+@@ -7255,85 +5266,89 @@ int yaffs_GutsInitialise(yaffs_Device *d
+ /* OK, we've finished verifying the device, lets continue with initialisation */
+
+ /* More device initialisation */
+- dev->garbageCollections = 0;
+- dev->passiveGarbageCollections = 0;
+- dev->currentDirtyChecker = 0;
+- dev->bufferedBlock = -1;
+- dev->doingBufferedBlockRewrite = 0;
+- dev->nDeletedFiles = 0;
+- dev->nBackgroundDeletions = 0;
+- dev->nUnlinkedFiles = 0;
+- dev->eccFixed = 0;
+- dev->eccUnfixed = 0;
+- dev->tagsEccFixed = 0;
+- dev->tagsEccUnfixed = 0;
+- dev->nErasureFailures = 0;
+- dev->nErasedBlocks = 0;
+- dev->isDoingGC = 0;
+- dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
++ dev->all_gcs = 0;
++ dev->passive_gc_count = 0;
++ dev->oldest_dirty_gc_count = 0;
++ dev->bg_gcs = 0;
++ dev->gc_block_finder = 0;
++ dev->buffered_block = -1;
++ dev->doing_buffered_block_rewrite = 0;
++ dev->n_deleted_files = 0;
++ dev->n_bg_deletions = 0;
++ dev->n_unlinked_files = 0;
++ dev->n_ecc_fixed = 0;
++ dev->n_ecc_unfixed = 0;
++ dev->n_tags_ecc_fixed = 0;
++ dev->n_tags_ecc_unfixed = 0;
++ dev->n_erase_failures = 0;
++ dev->n_erased_blocks = 0;
++ dev->gc_disable= 0;
++ dev->has_pending_prioritised_gc = 1; /* Assume the worst for now, will get fixed on first GC */
++ YINIT_LIST_HEAD(&dev->dirty_dirs);
++ dev->oldest_dirty_seq = 0;
++ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+- if (!yaffs_InitialiseTempBuffers(dev))
++ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+- dev->srCache = NULL;
+- dev->gcCleanupList = NULL;
++ dev->cache = NULL;
++ dev->gc_cleanup_list = NULL;
+
+
+ if (!init_failed &&
+- dev->nShortOpCaches > 0) {
++ dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+- int srCacheBytes = dev->nShortOpCaches * sizeof(yaffs_ChunkCache);
++ int cacheBytes = dev->param.n_caches * sizeof(yaffs_cache_t);
+
+- if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES)
+- dev->nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
++ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
++ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+- dev->srCache = YMALLOC(srCacheBytes);
++ dev->cache = YMALLOC(cacheBytes);
+
+- buf = (__u8 *) dev->srCache;
++ buf = (__u8 *) dev->cache;
+
+- if (dev->srCache)
+- memset(dev->srCache, 0, srCacheBytes);
++ if (dev->cache)
++ memset(dev->cache, 0, cacheBytes);
+
+- for (i = 0; i < dev->nShortOpCaches && buf; i++) {
+- dev->srCache[i].object = NULL;
+- dev->srCache[i].lastUse = 0;
+- dev->srCache[i].dirty = 0;
+- dev->srCache[i].data = buf = YMALLOC_DMA(dev->totalBytesPerChunk);
++ for (i = 0; i < dev->param.n_caches && buf; i++) {
++ dev->cache[i].object = NULL;
++ dev->cache[i].last_use = 0;
++ dev->cache[i].dirty = 0;
++ dev->cache[i].data = buf = YMALLOC_DMA(dev->param.total_bytes_per_chunk);
+ }
+ if (!buf)
+ init_failed = 1;
+
+- dev->srLastUse = 0;
++ dev->cache_last_use = 0;
+ }
+
+- dev->cacheHits = 0;
++ dev->cache_hits = 0;
+
+ if (!init_failed) {
+- dev->gcCleanupList = YMALLOC(dev->nChunksPerBlock * sizeof(__u32));
+- if (!dev->gcCleanupList)
++ dev->gc_cleanup_list = YMALLOC(dev->param.chunks_per_block * sizeof(__u32));
++ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+- if (dev->isYaffs2)
+- dev->useHeaderFileSize = 1;
++ if (dev->param.is_yaffs2)
++ dev->param.use_header_file_size = 1;
+
+- if (!init_failed && !yaffs_InitialiseBlocks(dev))
++ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+- yaffs_InitialiseTnodes(dev);
+- yaffs_InitialiseObjects(dev);
++ yaffs_init_tnodes_and_objs(dev);
+
+- if (!init_failed && !yaffs_CreateInitialDirectories(dev))
++ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+- if (dev->isYaffs2) {
+- if (yaffs_CheckpointRestore(dev)) {
+- yaffs_CheckObjectDetailsLoaded(dev->rootDir);
++ if (dev->param.is_yaffs2) {
++ if (yaffs2_checkpt_restore(dev)) {
++ yaffs_check_obj_details_loaded(dev->root_dir);
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("yaffs: restored from checkpoint" TENDSTR)));
+ } else {
+@@ -7341,128 +5356,129 @@ int yaffs_GutsInitialise(yaffs_Device *d
+ /* Clean up the mess caused by an aborted checkpoint load
+ * and scan backwards.
+ */
+- yaffs_DeinitialiseBlocks(dev);
+- yaffs_DeinitialiseTnodes(dev);
+- yaffs_DeinitialiseObjects(dev);
++ yaffs_deinit_blocks(dev);
+
++ yaffs_deinit_tnodes_and_objs(dev);
+
+- dev->nErasedBlocks = 0;
+- dev->nFreeChunks = 0;
+- dev->allocationBlock = -1;
+- dev->allocationPage = -1;
+- dev->nDeletedFiles = 0;
+- dev->nUnlinkedFiles = 0;
+- dev->nBackgroundDeletions = 0;
+- dev->oldestDirtySequence = 0;
++ dev->n_erased_blocks = 0;
++ dev->n_free_chunks = 0;
++ dev->alloc_block = -1;
++ dev->alloc_page = -1;
++ dev->n_deleted_files = 0;
++ dev->n_unlinked_files = 0;
++ dev->n_bg_deletions = 0;
+
+- if (!init_failed && !yaffs_InitialiseBlocks(dev))
++ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+- yaffs_InitialiseTnodes(dev);
+- yaffs_InitialiseObjects(dev);
++ yaffs_init_tnodes_and_objs(dev);
+
+- if (!init_failed && !yaffs_CreateInitialDirectories(dev))
++ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+- if (!init_failed && !yaffs_ScanBackwards(dev))
++ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+- } else if (!yaffs_Scan(dev))
++ } else if (!yaffs1_scan(dev))
+ init_failed = 1;
+
+- yaffs_StripDeletedObjects(dev);
++ yaffs_strip_deleted_objs(dev);
++ yaffs_fix_hanging_objs(dev);
++ if(dev->param.empty_lost_n_found)
++ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: yaffs_GutsInitialise() aborted.\n" TENDSTR)));
++ (TSTR("yaffs: yaffs_guts_initialise() aborted.\n" TENDSTR)));
+
+- yaffs_Deinitialise(dev);
++ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+- dev->nPageReads = 0;
+- dev->nPageWrites = 0;
+- dev->nBlockErasures = 0;
+- dev->nGCCopies = 0;
+- dev->nRetriedWrites = 0;
+-
+- dev->nRetiredBlocks = 0;
+-
+- yaffs_VerifyFreeChunks(dev);
+- yaffs_VerifyBlocks(dev);
+-
++ dev->n_page_reads = 0;
++ dev->n_page_writes = 0;
++ dev->n_erasures = 0;
++ dev->n_gc_copies = 0;
++ dev->n_retired_writes = 0;
++
++ dev->n_retired_blocks = 0;
++
++ yaffs_verify_free_chunks(dev);
++ yaffs_verify_blocks(dev);
++
++ /* Clean up any aborted checkpoint data */
++ if(!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
++ yaffs2_checkpt_invalidate(dev);
+
+ T(YAFFS_TRACE_TRACING,
+- (TSTR("yaffs: yaffs_GutsInitialise() done.\n" TENDSTR)));
++ (TSTR("yaffs: yaffs_guts_initialise() done.\n" TENDSTR)));
+ return YAFFS_OK;
+
+ }
+
+-void yaffs_Deinitialise(yaffs_Device *dev)
++void yaffs_deinitialise(yaffs_dev_t *dev)
+ {
+- if (dev->isMounted) {
++ if (dev->is_mounted) {
+ int i;
+
+- yaffs_DeinitialiseBlocks(dev);
+- yaffs_DeinitialiseTnodes(dev);
+- yaffs_DeinitialiseObjects(dev);
+- if (dev->nShortOpCaches > 0 &&
+- dev->srCache) {
++ yaffs_deinit_blocks(dev);
++ yaffs_deinit_tnodes_and_objs(dev);
++ if (dev->param.n_caches > 0 &&
++ dev->cache) {
+
+- for (i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].data)
+- YFREE(dev->srCache[i].data);
+- dev->srCache[i].data = NULL;
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].data)
++ YFREE(dev->cache[i].data);
++ dev->cache[i].data = NULL;
+ }
+
+- YFREE(dev->srCache);
+- dev->srCache = NULL;
++ YFREE(dev->cache);
++ dev->cache = NULL;
+ }
+
+- YFREE(dev->gcCleanupList);
++ YFREE(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+- YFREE(dev->tempBuffer[i].buffer);
++ YFREE(dev->temp_buffer[i].buffer);
+
+- dev->isMounted = 0;
++ dev->is_mounted = 0;
+
+- if (dev->deinitialiseNAND)
+- dev->deinitialiseNAND(dev);
++ if (dev->param.deinitialise_flash_fn)
++ dev->param.deinitialise_flash_fn(dev);
+ }
+ }
+
+-static int yaffs_CountFreeChunks(yaffs_Device *dev)
++int yaffs_count_free_chunks(yaffs_dev_t *dev)
+ {
+- int nFree;
++ int nFree=0;
+ int b;
+
+- yaffs_BlockInfo *blk;
+-
+- for (nFree = 0, b = dev->internalStartBlock; b <= dev->internalEndBlock;
+- b++) {
+- blk = yaffs_GetBlockInfo(dev, b);
++ yaffs_block_info_t *blk;
+
+- switch (blk->blockState) {
++ blk = dev->block_info;
++ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
++ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ nFree +=
+- (dev->nChunksPerBlock - blk->pagesInUse +
+- blk->softDeletions);
++ (dev->param.chunks_per_block - blk->pages_in_use +
++ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
++ blk++;
+ }
+
+ return nFree;
+ }
+
+-int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev)
++int yaffs_get_n_free_chunks(yaffs_dev_t *dev)
+ {
+ /* This is what we report to the outside world */
+
+@@ -7472,30 +5488,28 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+ int i;
+
+ #if 1
+- nFree = dev->nFreeChunks;
++ nFree = dev->n_free_chunks;
+ #else
+- nFree = yaffs_CountFreeChunks(dev);
++ nFree = yaffs_count_free_chunks(dev);
+ #endif
+
+- nFree += dev->nDeletedFiles;
++ nFree += dev->n_deleted_files;
+
+ /* Now count the number of dirty chunks in the cache and subtract those */
+
+- for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
+- if (dev->srCache[i].dirty)
++ for (nDirtyCacheChunks = 0, i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].dirty)
+ nDirtyCacheChunks++;
+ }
+
+ nFree -= nDirtyCacheChunks;
+
+- nFree -= ((dev->nReservedBlocks + 1) * dev->nChunksPerBlock);
++ nFree -= ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now we figure out how much to reserve for the checkpoint and report that... */
+- blocksForCheckpoint = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
+- if (blocksForCheckpoint < 0)
+- blocksForCheckpoint = 0;
++ blocksForCheckpoint = yaffs_calc_checkpt_blocks_required(dev);
+
+- nFree -= (blocksForCheckpoint * dev->nChunksPerBlock);
++ nFree -= (blocksForCheckpoint * dev->param.chunks_per_block);
+
+ if (nFree < 0)
+ nFree = 0;
+@@ -7504,49 +5518,27 @@ int yaffs_GetNumberOfFreeChunks(yaffs_De
+
+ }
+
+-static int yaffs_freeVerificationFailures;
+-
+-static void yaffs_VerifyFreeChunks(yaffs_Device *dev)
+-{
+- int counted;
+- int difference;
+-
+- if (yaffs_SkipVerification(dev))
+- return;
+-
+- counted = yaffs_CountFreeChunks(dev);
+-
+- difference = dev->nFreeChunks - counted;
+-
+- if (difference) {
+- T(YAFFS_TRACE_ALWAYS,
+- (TSTR("Freechunks verification failure %d %d %d" TENDSTR),
+- dev->nFreeChunks, counted, difference));
+- yaffs_freeVerificationFailures++;
+- }
+-}
+
+ /*---------------------------------------- YAFFS test code ----------------------*/
+
+-#define yaffs_CheckStruct(structure, syze, name) \
++#define yaffs_check_struct(structure, syze, name) \
+ do { \
+ if (sizeof(structure) != syze) { \
+ T(YAFFS_TRACE_ALWAYS, (TSTR("%s should be %d but is %d\n" TENDSTR),\
+- name, syze, sizeof(structure))); \
++ name, syze, (int) sizeof(structure))); \
+ return YAFFS_FAIL; \
+ } \
+ } while (0)
+
+-static int yaffs_CheckStructures(void)
++static int yaffs_check_structures(void)
+ {
+-/* yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags"); */
+-/* yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion"); */
+-/* yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare"); */
+-#ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode");
+-#endif
++/* yaffs_check_struct(yaffs_tags_t,8,"yaffs_tags_t"); */
++/* yaffs_check_struct(yaffs_tags_union_t,8,"yaffs_tags_union_t"); */
++/* yaffs_check_struct(yaffs_spare,16,"yaffs_spare"); */
++/* yaffs_check_struct(yaffs_tnode_t, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_tnode_t"); */
++
+ #ifndef CONFIG_YAFFS_WINCE
+- yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader");
++ yaffs_check_struct(yaffs_obj_header, 512, "yaffs_obj_header");
+ #endif
+ return YAFFS_OK;
+ }
+--- a/fs/yaffs2/yaffs_guts.h
++++ b/fs/yaffs2/yaffs_guts.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -16,8 +16,9 @@
+ #ifndef __YAFFS_GUTS_H__
+ #define __YAFFS_GUTS_H__
+
+-#include "devextras.h"
+ #include "yportenv.h"
++#include "devextras.h"
++#include "yaffs_list.h"
+
+ #define YAFFS_OK 1
+ #define YAFFS_FAIL 0
+@@ -52,7 +53,6 @@
+
+ #define YAFFS_MAX_CHUNK_ID 0x000FFFFF
+
+-#define YAFFS_UNUSED_OBJECT_ID 0x0003FFFF
+
+ #define YAFFS_ALLOCATION_NOBJECTS 100
+ #define YAFFS_ALLOCATION_NTNODES 100
+@@ -62,8 +62,9 @@
+
+
+ #define YAFFS_OBJECT_SPACE 0x40000
++#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE -1)
+
+-#define YAFFS_CHECKPOINT_VERSION 3
++#define YAFFS_CHECKPOINT_VERSION 4
+
+ #ifdef CONFIG_YAFFS_UNICODE
+ #define YAFFS_MAX_NAME_LENGTH 127
+@@ -81,12 +82,11 @@
+ #define YAFFS_OBJECTID_UNLINKED 3
+ #define YAFFS_OBJECTID_DELETED 4
+
+-/* Sseudo object ids for checkpointing */
++/* Pseudo object ids for checkpointing */
+ #define YAFFS_OBJECTID_SB_HEADER 0x10
+ #define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+ #define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+-/* */
+
+ #define YAFFS_MAX_SHORT_OP_CACHES 20
+
+@@ -113,18 +113,14 @@
+
+ /* ChunkCache is used for short read/write operations.*/
+ typedef struct {
+- struct yaffs_ObjectStruct *object;
+- int chunkId;
+- int lastUse;
++ struct yaffs_obj_s *object;
++ int chunk_id;
++ int last_use;
+ int dirty;
+- int nBytes; /* Only valid if the cache is dirty */
++ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+-#ifdef CONFIG_YAFFS_YAFFS2
+ __u8 *data;
+-#else
+- __u8 data[YAFFS_BYTES_PER_CHUNK];
+-#endif
+-} yaffs_ChunkCache;
++} yaffs_cache_t;
+
+
+
+@@ -135,18 +131,18 @@ typedef struct {
+
+ #ifndef CONFIG_YAFFS_NO_YAFFS1
+ typedef struct {
+- unsigned chunkId:20;
+- unsigned serialNumber:2;
+- unsigned byteCountLSB:10;
+- unsigned objectId:18;
++ unsigned chunk_id:20;
++ unsigned serial_number:2;
++ unsigned n_bytes_lsb:10;
++ unsigned obj_id:18;
+ unsigned ecc:12;
+- unsigned byteCountMSB:2;
+-} yaffs_Tags;
++ unsigned n_bytes_msb:2;
++} yaffs_tags_t;
+
+ typedef union {
+- yaffs_Tags asTags;
+- __u8 asBytes[8];
+-} yaffs_TagsUnion;
++ yaffs_tags_t as_tags;
++ __u8 as_bytes[8];
++} yaffs_tags_union_t;
+
+ #endif
+
+@@ -157,7 +153,7 @@ typedef enum {
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+-} yaffs_ECCResult;
++} yaffs_ecc_result;
+
+ typedef enum {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+@@ -166,64 +162,64 @@ typedef enum {
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+-} yaffs_ObjectType;
++} yaffs_obj_type;
+
+ #define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+ typedef struct {
+
+- unsigned validMarker0;
+- unsigned chunkUsed; /* Status of the chunk: used or unused */
+- unsigned objectId; /* If 0 then this is not part of an object (unused) */
+- unsigned chunkId; /* If 0 then this is a header, else a data chunk */
+- unsigned byteCount; /* Only valid for data chunks */
++ unsigned validity1;
++ unsigned chunk_used; /* Status of the chunk: used or unused */
++ unsigned obj_id; /* If 0 then this is not part of an object (unused) */
++ unsigned chunk_id; /* If 0 then this is a header, else a data chunk */
++ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+- yaffs_ECCResult eccResult;
+- unsigned blockBad;
++ yaffs_ecc_result ecc_result;
++ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+- unsigned chunkDeleted; /* The chunk is marked deleted */
+- unsigned serialNumber; /* Yaffs1 2-bit serial number */
++ unsigned is_deleted; /* The chunk is marked deleted */
++ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+- unsigned sequenceNumber; /* The sequence number of this block */
++ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+- unsigned extraHeaderInfoAvailable; /* There is extra info available if this is not zero */
+- unsigned extraParentObjectId; /* The parent object */
+- unsigned extraIsShrinkHeader; /* Is it a shrink header? */
+- unsigned extraShadows; /* Does this shadow another object? */
++ unsigned extra_available; /* There is extra info available if this is not zero */
++ unsigned extra_parent_id; /* The parent object */
++ unsigned extra_is_shrink; /* Is it a shrink header? */
++ unsigned extra_shadows; /* Does this shadow another object? */
+
+- yaffs_ObjectType extraObjectType; /* What object type? */
++ yaffs_obj_type extra_obj_type; /* What object type? */
+
+- unsigned extraFileLength; /* Length if it is a file */
+- unsigned extraEquivalentObjectId; /* Equivalent object Id if it is a hard link */
++ unsigned extra_length; /* Length if it is a file */
++ unsigned extra_equiv_id; /* Equivalent object Id if it is a hard link */
+
+- unsigned validMarker1;
++ unsigned validty1;
+
+-} yaffs_ExtendedTags;
++} yaffs_ext_tags;
+
+ /* Spare structure for YAFFS1 */
+ typedef struct {
+- __u8 tagByte0;
+- __u8 tagByte1;
+- __u8 tagByte2;
+- __u8 tagByte3;
+- __u8 pageStatus; /* set to 0 to delete the chunk */
+- __u8 blockStatus;
+- __u8 tagByte4;
+- __u8 tagByte5;
++ __u8 tb0;
++ __u8 tb1;
++ __u8 tb2;
++ __u8 tb3;
++ __u8 page_status; /* set to 0 to delete the chunk */
++ __u8 block_status;
++ __u8 tb4;
++ __u8 tb5;
+ __u8 ecc1[3];
+- __u8 tagByte6;
+- __u8 tagByte7;
++ __u8 tb6;
++ __u8 tb7;
+ __u8 ecc2[3];
+-} yaffs_Spare;
++} yaffs_spare;
+
+ /*Special structure for passing through to mtd */
+-struct yaffs_NANDSpare {
+- yaffs_Spare spare;
++struct yaffs_nand_spare {
++ yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+ };
+@@ -234,6 +230,8 @@ typedef enum {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
++ /* Being scanned */
++
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING,
+ /* The block might have something on it (ie it is allocating or full, perhaps empty)
+ * but it needs to be scanned to determine its true state.
+@@ -249,67 +247,69 @@ typedef enum {
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+- * allocation. Should never be more than one of these
++ * allocation. Should never be more than one of these.
++ * If a block is only partially allocated at mount it is treated as full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
++ * If a block was only partially allocated when mounted we treat
++ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+- /* All pages have been allocated and deleted.
++ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+- /* This block is assigned to holding checkpoint data.
+- */
++ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+-} yaffs_BlockState;
++} yaffs_block_state_t;
+
+ #define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+
+ typedef struct {
+
+- int softDeletions:10; /* number of soft deleted pages */
+- int pagesInUse:10; /* number of pages in use */
+- unsigned blockState:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
+- __u32 needsRetiring:1; /* Data has failed on this block, need to get valid data off */
++ int soft_del_pages:10; /* number of soft deleted pages */
++ int pages_in_use:10; /* number of pages in use */
++ unsigned block_state:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
++ __u32 needs_retiring:1; /* Data has failed on this block, need to get valid data off */
+ /* and retire the block. */
+- __u32 skipErasedCheck:1; /* If this is set we can skip the erased check on this block */
+- __u32 gcPrioritise:1; /* An ECC check or blank check has failed on this block.
++ __u32 skip_erased_check:1; /* If this is set we can skip the erased check on this block */
++ __u32 gc_prioritise:1; /* An ECC check or blank check has failed on this block.
+ It should be prioritised for GC */
+- __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
++ __u32 chunk_error_strikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+- __u32 hasShrinkHeader:1; /* This block has at least one shrink object header */
+- __u32 sequenceNumber; /* block sequence number for yaffs2 */
++ __u32 has_shrink_hdr:1; /* This block has at least one shrink object header */
++ __u32 seq_number; /* block sequence number for yaffs2 */
+ #endif
+
+-} yaffs_BlockInfo;
++} yaffs_block_info_t;
+
+ /* -------------------------- Object structure -------------------------------*/
+ /* This is the object structure as stored on NAND */
+
+ typedef struct {
+- yaffs_ObjectType type;
++ yaffs_obj_type type;
+
+ /* Apply to everything */
+- int parentObjectId;
+- __u16 sum__NoLongerUsed; /* checksum of name. No longer used */
++ int parent_obj_id;
++ __u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to directories, files, symlinks - not hard links */
+ __u32 yst_mode; /* protection */
+
+ #ifdef CONFIG_YAFFS_WINCE
+- __u32 notForWinCE[5];
++ __u32 not_for_wince[5];
+ #else
+ __u32 yst_uid;
+ __u32 yst_gid;
+@@ -319,10 +319,10 @@ typedef struct {
+ #endif
+
+ /* File size applies to files only */
+- int fileSize;
++ int file_size;
+
+ /* Equivalent object id applies to hard links only. */
+- int equivalentObjectId;
++ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+@@ -334,40 +334,29 @@ typedef struct {
+ __u32 win_atime[2];
+ __u32 win_mtime[2];
+ #else
+- __u32 roomToGrow[6];
++ __u32 room_to_grow[6];
+
+ #endif
+- __u32 inbandShadowsObject;
+- __u32 inbandIsShrink;
++ __u32 inband_shadowed_obj_id;
++ __u32 inband_is_shrink;
+
+- __u32 reservedSpace[2];
+- int shadowsObject; /* This object header shadows the specified object if > 0 */
++ __u32 reserved[2];
++ int shadows_obj; /* This object header shadows the specified object if > 0 */
+
+- /* isShrink applies to object headers written when we shrink the file (ie resize) */
+- __u32 isShrink;
++ /* is_shrink applies to object headers written when we shrink the file (ie resize) */
++ __u32 is_shrink;
+
+-} yaffs_ObjectHeader;
++} yaffs_obj_header;
+
+ /*--------------------------- Tnode -------------------------- */
+
+-union yaffs_Tnode_union {
+-#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
+- union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL + 1];
+-#else
+- union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL];
+-#endif
+-/* __u16 level0[YAFFS_NTNODES_LEVEL0]; */
++union yaffs_tnode_union {
++ union yaffs_tnode_union *internal[YAFFS_NTNODES_INTERNAL];
+
+ };
+
+-typedef union yaffs_Tnode_union yaffs_Tnode;
++typedef union yaffs_tnode_union yaffs_tnode_t;
+
+-struct yaffs_TnodeList_struct {
+- struct yaffs_TnodeList_struct *next;
+- yaffs_Tnode *tnodes;
+-};
+-
+-typedef struct yaffs_TnodeList_struct yaffs_TnodeList;
+
+ /*------------------------ Object -----------------------------*/
+ /* An object can be one of:
+@@ -378,82 +367,85 @@ typedef struct yaffs_TnodeList_struct ya
+ */
+
+ typedef struct {
+- __u32 fileSize;
+- __u32 scannedFileSize;
+- __u32 shrinkSize;
+- int topLevel;
+- yaffs_Tnode *top;
+-} yaffs_FileStructure;
++ __u32 file_size;
++ __u32 scanned_size;
++ __u32 shrink_size;
++ int top_level;
++ yaffs_tnode_t *top;
++} yaffs_file_s;
+
+ typedef struct {
+ struct ylist_head children; /* list of child links */
+-} yaffs_DirectoryStructure;
++ struct ylist_head dirty; /* Entry for list of dirty directories */
++} yaffs_dir_s;
+
+ typedef struct {
+ YCHAR *alias;
+-} yaffs_SymLinkStructure;
++} yaffs_symlink_t;
+
+ typedef struct {
+- struct yaffs_ObjectStruct *equivalentObject;
+- __u32 equivalentObjectId;
+-} yaffs_HardLinkStructure;
++ struct yaffs_obj_s *equiv_obj;
++ __u32 equiv_id;
++} yaffs_hard_link_s;
+
+ typedef union {
+- yaffs_FileStructure fileVariant;
+- yaffs_DirectoryStructure directoryVariant;
+- yaffs_SymLinkStructure symLinkVariant;
+- yaffs_HardLinkStructure hardLinkVariant;
+-} yaffs_ObjectVariant;
++ yaffs_file_s file_variant;
++ yaffs_dir_s dir_variant;
++ yaffs_symlink_t symlink_variant;
++ yaffs_hard_link_s hardlink_variant;
++} yaffs_obj_variant;
++
++
+
+-struct yaffs_ObjectStruct {
++struct yaffs_obj_s {
+ __u8 deleted:1; /* This should only apply to unlinked files. */
+- __u8 softDeleted:1; /* it has also been soft deleted */
++ __u8 soft_del:1; /* it has also been soft deleted */
+ __u8 unlinked:1; /* An unlinked file. The file should be in the unlinked directory.*/
+ __u8 fake:1; /* A fake object has no presence on NAND. */
+- __u8 renameAllowed:1; /* Some objects are not allowed to be renamed. */
+- __u8 unlinkAllowed:1;
++ __u8 rename_allowed:1; /* Some objects are not allowed to be renamed. */
++ __u8 unlink_allowed:1;
+ __u8 dirty:1; /* the object needs to be written to flash */
+ __u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available (ie. file data records appear before the header).
+ */
+- __u8 lazyLoaded:1; /* This object has been lazy loaded and is missing some detail */
++ __u8 lazy_loaded:1; /* This object has been lazy loaded and is missing some detail */
+
+- __u8 deferedFree:1; /* For Linux kernel. Object is removed from NAND, but is
++ __u8 defered_free:1; /* For Linux kernel. Object is removed from NAND, but is
+ * still in the inode cache. Free of object is defered.
+ * until the inode is released.
+ */
+- __u8 beingCreated:1; /* This object is still being created so skip some checks. */
++ __u8 being_created:1; /* This object is still being created so skip some checks. */
++ __u8 is_shadowed:1; /* This object is shadowed on the way to being renamed. */
++
++ __u8 xattr_known:1; /* We know if this has object has xattribs or not. */
++ __u8 has_xattr:1; /* This object has xattribs. Valid if xattr_known. */
+
+ __u8 serial; /* serial number of chunk in NAND. Cached here */
+ __u16 sum; /* sum of the name to speed searching */
+
+- struct yaffs_DeviceStruct *myDev; /* The device I'm on */
++ struct yaffs_dev_s *my_dev; /* The device I'm on */
+
+- struct ylist_head hashLink; /* list of objects in this hash bucket */
++ struct ylist_head hash_link; /* list of objects in this hash bucket */
+
+- struct ylist_head hardLinks; /* all the equivalent hard linked objects */
++ struct ylist_head hard_links; /* all the equivalent hard linked objects */
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+- struct yaffs_ObjectStruct *parent;
++ struct yaffs_obj_s *parent;
+ struct ylist_head siblings;
+
+ /* Where's my object header in NAND? */
+- int hdrChunk;
++ int hdr_chunk;
+
+- int nDataChunks; /* Number of data chunks attached to the file. */
++ int n_data_chunks; /* Number of data chunks attached to the file. */
+
+- __u32 objectId; /* the object id value */
++ __u32 obj_id; /* the object id value */
+
+ __u32 yst_mode;
+
+ #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+- YCHAR shortName[YAFFS_SHORT_NAME_LENGTH + 1];
+-#endif
+-
+-#ifndef __KERNEL__
+- __u32 inUse;
++ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+ #endif
+
+ #ifdef CONFIG_YAFFS_WINCE
+@@ -470,53 +462,43 @@ struct yaffs_ObjectStruct {
+
+ __u32 yst_rdev;
+
+-#ifdef __KERNEL__
+- struct inode *myInode;
++ void *my_inode;
+
+-#endif
++ yaffs_obj_type variant_type;
+
+- yaffs_ObjectType variantType;
++ yaffs_obj_variant variant;
+
+- yaffs_ObjectVariant variant;
+-
+-};
+-
+-typedef struct yaffs_ObjectStruct yaffs_Object;
+-
+-struct yaffs_ObjectList_struct {
+- yaffs_Object *objects;
+- struct yaffs_ObjectList_struct *next;
+ };
+
+-typedef struct yaffs_ObjectList_struct yaffs_ObjectList;
++typedef struct yaffs_obj_s yaffs_obj_t;
+
+ typedef struct {
+ struct ylist_head list;
+ int count;
+-} yaffs_ObjectBucket;
++} yaffs_obj_bucket;
+
+
+-/* yaffs_CheckpointObject holds the definition of an object as dumped
++/* yaffs_checkpt_obj_t holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+ typedef struct {
+- int structType;
+- __u32 objectId;
+- __u32 parentId;
+- int hdrChunk;
+- yaffs_ObjectType variantType:3;
++ int struct_type;
++ __u32 obj_id;
++ __u32 parent_id;
++ int hdr_chunk;
++ yaffs_obj_type variant_type:3;
+ __u8 deleted:1;
+- __u8 softDeleted:1;
++ __u8 soft_del:1;
+ __u8 unlinked:1;
+ __u8 fake:1;
+- __u8 renameAllowed:1;
+- __u8 unlinkAllowed:1;
++ __u8 rename_allowed:1;
++ __u8 unlink_allowed:1;
+ __u8 serial;
+
+- int nDataChunks;
+- __u32 fileSizeOrEquivalentObjectId;
+-} yaffs_CheckpointObject;
++ int n_data_chunks;
++ __u32 size_or_equiv_obj;
++} yaffs_checkpt_obj_t;
+
+ /*--------------------- Temporary buffers ----------------
+ *
+@@ -526,379 +508,462 @@ typedef struct {
+ typedef struct {
+ __u8 *buffer;
+ int line; /* track from whence this buffer was allocated */
+- int maxLine;
+-} yaffs_TempBuffer;
++ int max_line;
++} yaffs_buffer_t;
+
+ /*----------------- Device ---------------------------------*/
+
+-struct yaffs_DeviceStruct {
+- struct ylist_head devList;
+- const char *name;
+-
+- /* Entry parameters set up way early. Yaffs sets up the rest.*/
+- int nDataBytesPerChunk; /* Should be a power of 2 >= 512 */
+- int nChunksPerBlock; /* does not need to be a power of 2 */
+- int spareBytesPerChunk; /* spare area size */
+- int startBlock; /* Start block we're allowed to use */
+- int endBlock; /* End block we're allowed to use */
+- int nReservedBlocks; /* We want this tuneable so that we can reduce */
+- /* reserved blocks on NOR and RAM. */
+-
+
+- /* Stuff used by the shared space checkpointing mechanism */
+- /* If this value is zero, then this mechanism is disabled */
++struct yaffs_param_s {
++ const YCHAR *name;
+
+-/* int nCheckpointReservedBlocks; */ /* Blocks to reserve for checkpoint data */
++ /*
++ * Entry parameters set up way early. Yaffs sets up the rest.
++ * The structure should be zeroed out before use so that unused
++ * and defualt values are zero.
++ */
++
++ int inband_tags; /* Use unband tags */
++ __u32 total_bytes_per_chunk; /* Should be >= 512, does not need to be a power of 2 */
++ int chunks_per_block; /* does not need to be a power of 2 */
++ int spare_bytes_per_chunk; /* spare area size */
++ int start_block; /* Start block we're allowed to use */
++ int end_block; /* End block we're allowed to use */
++ int n_reserved_blocks; /* We want this tuneable so that we can reduce */
++ /* reserved blocks on NOR and RAM. */
+
+
+- int nShortOpCaches; /* If <= 0, then short op caching is disabled, else
+- * the number of short op caches (don't use too many)
++ int n_caches; /* If <= 0, then short op caching is disabled, else
++ * the number of short op caches (don't use too many).
++ * 10 to 20 is a good bet.
+ */
++ int use_nand_ecc; /* Flag to decide whether or not to use NANDECC on data (yaffs1) */
++ int no_tags_ecc; /* Flag to decide whether or not to do ECC on packed tags (yaffs2) */
+
+- int useHeaderFileSize; /* Flag to determine if we should use file sizes from the header */
++ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+- int useNANDECC; /* Flag to decide whether or not to use NANDECC */
++ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+- void *genericDevice; /* Pointer to device context
+- * On an mtd this holds the mtd pointer.
+- */
+- void *superBlock;
++ int refresh_period; /* How often we should check to do a block refresh */
++
++ /* Checkpoint control. Can be set before or after initialisation */
++ __u8 skip_checkpt_rd;
++ __u8 skip_checkpt_wr;
++
++ int enable_xattr; /* Enable xattribs */
+
+ /* NAND access functions (Must be set before calling YAFFS)*/
+
+- int (*writeChunkToNAND) (struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 *data,
+- const yaffs_Spare *spare);
+- int (*readChunkFromNAND) (struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, __u8 *data,
+- yaffs_Spare *spare);
+- int (*eraseBlockInNAND) (struct yaffs_DeviceStruct *dev,
+- int blockInNAND);
+- int (*initialiseNAND) (struct yaffs_DeviceStruct *dev);
+- int (*deinitialiseNAND) (struct yaffs_DeviceStruct *dev);
++ int (*write_chunk_fn) (struct yaffs_dev_s *dev,
++ int nand_chunk, const __u8 *data,
++ const yaffs_spare *spare);
++ int (*read_chunk_fn) (struct yaffs_dev_s *dev,
++ int nand_chunk, __u8 *data,
++ yaffs_spare *spare);
++ int (*erase_fn) (struct yaffs_dev_s *dev,
++ int flash_block);
++ int (*initialise_flash_fn) (struct yaffs_dev_s *dev);
++ int (*deinitialise_flash_fn) (struct yaffs_dev_s *dev);
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+- int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+- int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, __u8 *data,
+- yaffs_ExtendedTags *tags);
+- int (*markNANDBlockBad) (struct yaffs_DeviceStruct *dev, int blockNo);
+- int (*queryNANDBlock) (struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber);
+-#endif
+-
+- int isYaffs2;
+-
+- /* The removeObjectCallback function must be supplied by OS flavours that
+- * need it. The Linux kernel does not use this, but yaffs direct does use
+- * it to implement the faster readdir
++ int (*write_chunk_tags_fn) (struct yaffs_dev_s *dev,
++ int nand_chunk, const __u8 *data,
++ const yaffs_ext_tags *tags);
++ int (*read_chunk_tags_fn) (struct yaffs_dev_s *dev,
++ int nand_chunk, __u8 *data,
++ yaffs_ext_tags *tags);
++ int (*bad_block_fn) (struct yaffs_dev_s *dev, int block_no);
++ int (*query_block_fn) (struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number);
++#endif
++
++ /* The remove_obj_fn function must be supplied by OS flavours that
++ * need it.
++ * yaffs direct uses it to implement the faster readdir.
++ * Linux uses it to protect the directory during unlocking.
+ */
+- void (*removeObjectCallback)(struct yaffs_ObjectStruct *obj);
++ void (*remove_obj_fn)(struct yaffs_obj_s *obj);
+
+- /* Callback to mark the superblock dirsty */
+- void (*markSuperBlockDirty)(void *superblock);
++ /* Callback to mark the superblock dirty */
++ void (*sb_dirty_fn)(struct yaffs_dev_s *dev);
++
++ /* Callback to control garbage collection. */
++ unsigned (*gc_control)(struct yaffs_dev_s *dev);
++
++ /* Debug control flags. Don't use unless you know what you're doing */
++ int use_header_file_size; /* Flag to determine if we should use file sizes from the header */
++ int disable_lazy_load; /* Disable lazy loading on this device */
++ int wide_tnodes_disabled; /* Set to disable wide tnodes */
++ int disable_soft_del; /* yaffs 1 only: Set to disable the use of softdeletion. */
++
++ int defered_dir_update; /* Set to defer directory updates */
+
+- int wideTnodesDisabled; /* Set to disable wide tnodes */
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ int auto_unicode;
++#endif
++ int always_check_erased; /* Force chunk erased check always on */
++};
+
+- YCHAR *pathDividers; /* String of legal path dividers */
++typedef struct yaffs_param_s yaffs_param_t;
+
++struct yaffs_dev_s {
++ struct yaffs_param_s param;
+
+- /* End of stuff that must be set before initialisation. */
++ /* Context storage. Holds extra OS specific data for this device */
+
+- /* Checkpoint control. Can be set before or after initialisation */
+- __u8 skipCheckpointRead;
+- __u8 skipCheckpointWrite;
++ void *os_context;
++ void *driver_context;
++
++ struct ylist_head dev_list;
+
+ /* Runtime parameters. Set up by YAFFS. */
++ int data_bytes_per_chunk;
+
+- __u16 chunkGroupBits; /* 0 for devices <= 32MB. else log2(nchunks) - 16 */
+- __u16 chunkGroupSize; /* == 2^^chunkGroupBits */
++ /* Non-wide tnode stuff */
++ __u16 chunk_grp_bits; /* Number of bits that need to be resolved if
++ * the tnodes are not wide enough.
++ */
++ __u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+- __u32 tnodeWidth;
+- __u32 tnodeMask;
++ __u32 tnode_width;
++ __u32 tnode_mask;
++ __u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+- __u32 chunkShift; /* Shift value */
+- __u32 chunkDiv; /* Divisor after shifting: 1 for power-of-2 sizes */
+- __u32 chunkMask; /* Mask to use for power-of-2 case */
+-
+- /* Stuff to handle inband tags */
+- int inbandTags;
+- __u32 totalBytesPerChunk;
+-
+-#ifdef __KERNEL__
+-
+- struct semaphore sem; /* Semaphore for waiting on erasure.*/
+- struct semaphore grossLock; /* Gross locking semaphore */
+- __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer
+- * at compile time so we have to allocate it.
+- */
+- void (*putSuperFunc) (struct super_block *sb);
+-#endif
++ __u32 chunk_shift; /* Shift value */
++ __u32 chunk_div; /* Divisor after shifting: 1 for power-of-2 sizes */
++ __u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+- int isMounted;
+
+- int isCheckpointed;
++
++ int is_mounted;
++ int read_only;
++ int is_checkpointed;
+
+
+ /* Stuff to support block offsetting to support start block zero */
+- int internalStartBlock;
+- int internalEndBlock;
+- int blockOffset;
+- int chunkOffset;
++ int internal_start_block;
++ int internal_end_block;
++ int block_offset;
++ int chunk_offset;
+
+
+ /* Runtime checkpointing stuff */
+- int checkpointPageSequence; /* running sequence number of checkpoint pages */
+- int checkpointByteCount;
+- int checkpointByteOffset;
+- __u8 *checkpointBuffer;
+- int checkpointOpenForWrite;
+- int blocksInCheckpoint;
+- int checkpointCurrentChunk;
+- int checkpointCurrentBlock;
+- int checkpointNextBlock;
+- int *checkpointBlockList;
+- int checkpointMaxBlocks;
+- __u32 checkpointSum;
+- __u32 checkpointXor;
++ int checkpt_page_seq; /* running sequence number of checkpoint pages */
++ int checkpt_byte_count;
++ int checkpt_byte_offs;
++ __u8 *checkpt_buffer;
++ int checkpt_open_write;
++ int blocks_in_checkpt;
++ int checkpt_cur_chunk;
++ int checkpt_cur_block;
++ int checkpt_next_block;
++ int *checkpt_block_list;
++ int checkpt_max_blocks;
++ __u32 checkpt_sum;
++ __u32 checkpt_xor;
+
+- int nCheckpointBlocksRequired; /* Number of blocks needed to store current checkpoint set */
++ int checkpoint_blocks_required; /* Number of blocks needed to store current checkpoint set */
+
+ /* Block Info */
+- yaffs_BlockInfo *blockInfo;
+- __u8 *chunkBits; /* bitmap of chunks in use */
+- unsigned blockInfoAlt:1; /* was allocated using alternative strategy */
+- unsigned chunkBitsAlt:1; /* was allocated using alternative strategy */
+- int chunkBitmapStride; /* Number of bytes of chunkBits per block.
+- * Must be consistent with nChunksPerBlock.
++ yaffs_block_info_t *block_info;
++ __u8 *chunk_bits; /* bitmap of chunks in use */
++ unsigned block_info_alt:1; /* was allocated using alternative strategy */
++ unsigned chunk_bits_alt:1; /* was allocated using alternative strategy */
++ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
++ * Must be consistent with chunks_per_block.
+ */
+
+- int nErasedBlocks;
+- int allocationBlock; /* Current block being allocated off */
+- __u32 allocationPage;
+- int allocationBlockFinder; /* Used to search for next allocation block */
+-
+- /* Runtime state */
+- int nTnodesCreated;
+- yaffs_Tnode *freeTnodes;
+- int nFreeTnodes;
+- yaffs_TnodeList *allocatedTnodeList;
+-
+- int isDoingGC;
+- int gcBlock;
+- int gcChunk;
+-
+- int nObjectsCreated;
+- yaffs_Object *freeObjects;
+- int nFreeObjects;
+-
+- int nHardLinks;
+-
+- yaffs_ObjectList *allocatedObjectList;
+-
+- yaffs_ObjectBucket objectBucket[YAFFS_NOBJECT_BUCKETS];
+-
+- int nFreeChunks;
+-
+- int currentDirtyChecker; /* Used to find current dirtiest block */
+-
+- __u32 *gcCleanupList; /* objects to delete at the end of a GC. */
+- int nonAggressiveSkip; /* GC state/mode */
+-
+- /* Statistcs */
+- int nPageWrites;
+- int nPageReads;
+- int nBlockErasures;
+- int nErasureFailures;
+- int nGCCopies;
+- int garbageCollections;
+- int passiveGarbageCollections;
+- int nRetriedWrites;
+- int nRetiredBlocks;
+- int eccFixed;
+- int eccUnfixed;
+- int tagsEccFixed;
+- int tagsEccUnfixed;
+- int nDeletions;
+- int nUnmarkedDeletions;
+-
+- int hasPendingPrioritisedGCs; /* We think this device might have pending prioritised gcs */
++ int n_erased_blocks;
++ int alloc_block; /* Current block being allocated off */
++ __u32 alloc_page;
++ int alloc_block_finder; /* Used to search for next allocation block */
++
++ /* Object and Tnode memory management */
++ void *allocator;
++ int n_obj;
++ int n_tnodes;
++
++ int n_hardlinks;
++
++ yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
++ __u32 bucket_finder;
++
++ int n_free_chunks;
++
++ /* Garbage collection control */
++ __u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
++ __u32 n_clean_ups;
++
++ unsigned has_pending_prioritised_gc; /* We think this device might have pending prioritised gcs */
++ unsigned gc_disable;
++ unsigned gc_block_finder;
++ unsigned gc_dirtiest;
++ unsigned gc_pages_in_use;
++ unsigned gc_not_done;
++ unsigned gc_block;
++ unsigned gc_chunk;
++ unsigned gc_skip;
+
+ /* Special directories */
+- yaffs_Object *rootDir;
+- yaffs_Object *lostNFoundDir;
++ yaffs_obj_t *root_dir;
++ yaffs_obj_t *lost_n_found;
+
+ /* Buffer areas for storing data to recover from write failures TODO
+- * __u8 bufferedData[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
+- * yaffs_Spare bufferedSpare[YAFFS_CHUNKS_PER_BLOCK];
++ * __u8 buffered_data[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
++ * yaffs_spare buffered_spare[YAFFS_CHUNKS_PER_BLOCK];
+ */
+
+- int bufferedBlock; /* Which block is buffered here? */
+- int doingBufferedBlockRewrite;
+-
+- yaffs_ChunkCache *srCache;
+- int srLastUse;
++ int buffered_block; /* Which block is buffered here? */
++ int doing_buffered_block_rewrite;
+
+- int cacheHits;
++ yaffs_cache_t *cache;
++ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files.*/
+- yaffs_Object *unlinkedDir; /* Directory where unlinked and deleted files live. */
+- yaffs_Object *deletedDir; /* Directory where deleted objects are sent to disappear. */
+- yaffs_Object *unlinkedDeletion; /* Current file being background deleted.*/
+- int nDeletedFiles; /* Count of files awaiting deletion;*/
+- int nUnlinkedFiles; /* Count of unlinked files. */
+- int nBackgroundDeletions; /* Count of background deletions. */
+-
++ yaffs_obj_t *unlinked_dir; /* Directory where unlinked and deleted files live. */
++ yaffs_obj_t *del_dir; /* Directory where deleted objects are sent to disappear. */
++ yaffs_obj_t *unlinked_deletion; /* Current file being background deleted.*/
++ int n_deleted_files; /* Count of files awaiting deletion;*/
++ int n_unlinked_files; /* Count of unlinked files. */
++ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+- yaffs_TempBuffer tempBuffer[YAFFS_N_TEMP_BUFFERS];
+- int maxTemp;
+- int tempInUse;
+- int unmanagedTempAllocations;
+- int unmanagedTempDeallocations;
++ yaffs_buffer_t temp_buffer[YAFFS_N_TEMP_BUFFERS];
++ int max_temp;
++ int temp_in_use;
++ int unmanaged_buffer_allocs;
++ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+- unsigned sequenceNumber; /* Sequence number of currently allocating block */
+- unsigned oldestDirtySequence;
++ unsigned seq_number; /* Sequence number of currently allocating block */
++ unsigned oldest_dirty_seq;
++ unsigned oldest_dirty_block;
++
++ /* Block refreshing */
++ int refresh_skip; /* A skip down counter. Refresh happens when this gets to zero. */
++
++ /* Dirty directory handling */
++ struct ylist_head dirty_dirs; /* List of dirty directories */
++
++
++ /* Statistcs */
++ __u32 n_page_writes;
++ __u32 n_page_reads;
++ __u32 n_erasures;
++ __u32 n_erase_failures;
++ __u32 n_gc_copies;
++ __u32 all_gcs;
++ __u32 passive_gc_count;
++ __u32 oldest_dirty_gc_count;
++ __u32 n_gc_blocks;
++ __u32 bg_gcs;
++ __u32 n_retired_writes;
++ __u32 n_retired_blocks;
++ __u32 n_ecc_fixed;
++ __u32 n_ecc_unfixed;
++ __u32 n_tags_ecc_fixed;
++ __u32 n_tags_ecc_unfixed;
++ __u32 n_deletions;
++ __u32 n_unmarked_deletions;
++ __u32 refresh_count;
++ __u32 cache_hits;
+
+ };
+
+-typedef struct yaffs_DeviceStruct yaffs_Device;
++typedef struct yaffs_dev_s yaffs_dev_t;
+
+ /* The static layout of block usage etc is stored in the super block header */
+ typedef struct {
+ int StructType;
+ int version;
+- int checkpointStartBlock;
+- int checkpointEndBlock;
+- int startBlock;
+- int endBlock;
++ int checkpt_start_block;
++ int checkpt_end_block;
++ int start_block;
++ int end_block;
+ int rfu[100];
+-} yaffs_SuperBlockHeader;
++} yaffs_sb_header;
+
+ /* The CheckpointDevice structure holds the device information that changes at runtime and
+ * must be preserved over unmount/mount cycles.
+ */
+ typedef struct {
+- int structType;
+- int nErasedBlocks;
+- int allocationBlock; /* Current block being allocated off */
+- __u32 allocationPage;
+- int nFreeChunks;
+-
+- int nDeletedFiles; /* Count of files awaiting deletion;*/
+- int nUnlinkedFiles; /* Count of unlinked files. */
+- int nBackgroundDeletions; /* Count of background deletions. */
++ int struct_type;
++ int n_erased_blocks;
++ int alloc_block; /* Current block being allocated off */
++ __u32 alloc_page;
++ int n_free_chunks;
++
++ int n_deleted_files; /* Count of files awaiting deletion;*/
++ int n_unlinked_files; /* Count of unlinked files. */
++ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+- unsigned sequenceNumber; /* Sequence number of currently allocating block */
+- unsigned oldestDirtySequence;
++ unsigned seq_number; /* Sequence number of currently allocating block */
+
+-} yaffs_CheckpointDevice;
++} yaffs_checkpt_dev_t;
+
+
+ typedef struct {
+- int structType;
++ int struct_type;
+ __u32 magic;
+ __u32 version;
+ __u32 head;
+-} yaffs_CheckpointValidity;
++} yaffs_checkpt_validty_t;
++
++
++struct yaffs_shadow_fixer_s {
++ int obj_id;
++ int shadowed_id;
++ struct yaffs_shadow_fixer_s *next;
++};
++
++/* Structure for doing xattr modifications */
++typedef struct {
++ int set; /* If 0 then this is a deletion */
++ const YCHAR *name;
++ const void *data;
++ int size;
++ int flags;
++ int result;
++}yaffs_xattr_mod;
+
+
+ /*----------------------- YAFFS Functions -----------------------*/
+
+-int yaffs_GutsInitialise(yaffs_Device *dev);
+-void yaffs_Deinitialise(yaffs_Device *dev);
++int yaffs_guts_initialise(yaffs_dev_t *dev);
++void yaffs_deinitialise(yaffs_dev_t *dev);
+
+-int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev);
++int yaffs_get_n_free_chunks(yaffs_dev_t *dev);
+
+-int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
+- yaffs_Object *newDir, const YCHAR *newName);
++int yaffs_rename_obj(yaffs_obj_t *old_dir, const YCHAR *old_name,
++ yaffs_obj_t *new_dir, const YCHAR *new_name);
+
+-int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name);
+-int yaffs_DeleteObject(yaffs_Object *obj);
++int yaffs_unlinker(yaffs_obj_t *dir, const YCHAR *name);
++int yaffs_del_obj(yaffs_obj_t *obj);
+
+-int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize);
+-int yaffs_GetObjectFileLength(yaffs_Object *obj);
+-int yaffs_GetObjectInode(yaffs_Object *obj);
+-unsigned yaffs_GetObjectType(yaffs_Object *obj);
+-int yaffs_GetObjectLinkCount(yaffs_Object *obj);
++int yaffs_get_obj_name(yaffs_obj_t *obj, YCHAR *name, int buffer_size);
++int yaffs_get_obj_length(yaffs_obj_t *obj);
++int yaffs_get_obj_inode(yaffs_obj_t *obj);
++unsigned yaffs_get_obj_type(yaffs_obj_t *obj);
++int yaffs_get_obj_link_count(yaffs_obj_t *obj);
+
+-int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr);
+-int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr);
++int yaffs_set_attribs(yaffs_obj_t *obj, struct iattr *attr);
++int yaffs_get_attribs(yaffs_obj_t *obj, struct iattr *attr);
+
+ /* File operations */
+-int yaffs_ReadDataFromFile(yaffs_Object *obj, __u8 *buffer, loff_t offset,
+- int nBytes);
+-int yaffs_WriteDataToFile(yaffs_Object *obj, const __u8 *buffer, loff_t offset,
+- int nBytes, int writeThrough);
+-int yaffs_ResizeFile(yaffs_Object *obj, loff_t newSize);
++int yaffs_file_rd(yaffs_obj_t *obj, __u8 *buffer, loff_t offset,
++ int n_bytes);
++int yaffs_wr_file(yaffs_obj_t *obj, const __u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough);
++int yaffs_resize_file(yaffs_obj_t *obj, loff_t new_size);
+
+-yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
++yaffs_obj_t *yaffs_create_file(yaffs_obj_t *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid);
+-int yaffs_FlushFile(yaffs_Object *obj, int updateTime);
++
++int yaffs_flush_file(yaffs_obj_t *obj, int update_time, int data_sync);
+
+ /* Flushing and checkpointing */
+-void yaffs_FlushEntireDeviceCache(yaffs_Device *dev);
++void yaffs_flush_whole_cache(yaffs_dev_t *dev);
+
+-int yaffs_CheckpointSave(yaffs_Device *dev);
+-int yaffs_CheckpointRestore(yaffs_Device *dev);
++int yaffs_checkpoint_save(yaffs_dev_t *dev);
++int yaffs_checkpoint_restore(yaffs_dev_t *dev);
+
+ /* Directory operations */
+-yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
++yaffs_obj_t *yaffs_create_dir(yaffs_obj_t *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid);
+-yaffs_Object *yaffs_FindObjectByName(yaffs_Object *theDir, const YCHAR *name);
+-int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
+- int (*fn) (yaffs_Object *));
++yaffs_obj_t *yaffs_find_by_name(yaffs_obj_t *the_dir, const YCHAR *name);
++int yaffs_ApplyToDirectoryChildren(yaffs_obj_t *the_dir,
++ int (*fn) (yaffs_obj_t *));
+
+-yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number);
++yaffs_obj_t *yaffs_find_by_number(yaffs_dev_t *dev, __u32 number);
+
+ /* Link operations */
+-yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
+- yaffs_Object *equivalentObject);
++yaffs_obj_t *yaffs_link_obj(yaffs_obj_t *parent, const YCHAR *name,
++ yaffs_obj_t *equiv_obj);
+
+-yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj);
++yaffs_obj_t *yaffs_get_equivalent_obj(yaffs_obj_t *obj);
+
+ /* Symlink operations */
+-yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
++yaffs_obj_t *yaffs_create_symlink(yaffs_obj_t *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid,
+ const YCHAR *alias);
+-YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj);
++YCHAR *yaffs_get_symlink_alias(yaffs_obj_t *obj);
+
+ /* Special inodes (fifos, sockets and devices) */
+-yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
++yaffs_obj_t *yaffs_create_special(yaffs_obj_t *parent, const YCHAR *name,
+ __u32 mode, __u32 uid, __u32 gid, __u32 rdev);
+
++
++int yaffs_set_xattrib(yaffs_obj_t *obj, const YCHAR *name, const void * value, int size, int flags);
++int yaffs_get_xattrib(yaffs_obj_t *obj, const YCHAR *name, void *value, int size);
++int yaffs_list_xattrib(yaffs_obj_t *obj, char *buffer, int size);
++int yaffs_remove_xattrib(yaffs_obj_t *obj, const YCHAR *name);
++
+ /* Special directories */
+-yaffs_Object *yaffs_Root(yaffs_Device *dev);
+-yaffs_Object *yaffs_LostNFound(yaffs_Device *dev);
++yaffs_obj_t *yaffs_root(yaffs_dev_t *dev);
++yaffs_obj_t *yaffs_lost_n_found(yaffs_dev_t *dev);
+
+ #ifdef CONFIG_YAFFS_WINCE
+ /* CONFIG_YAFFS_WINCE special stuff */
+-void yfsd_WinFileTimeNow(__u32 target[2]);
++void yfsd_win_file_time_now(__u32 target[2]);
+ #endif
+
+-#ifdef __KERNEL__
++void yaffs_handle_defered_free(yaffs_obj_t *obj);
+
+-void yaffs_HandleDeferedFree(yaffs_Object *obj);
+-#endif
++void yaffs_update_dirty_dirs(yaffs_dev_t *dev);
++
++int yaffs_bg_gc(yaffs_dev_t *dev, unsigned urgency);
+
+ /* Debug dump */
+-int yaffs_DumpObject(yaffs_Object *obj);
++int yaffs_dump_obj(yaffs_obj_t *obj);
+
+-void yaffs_GutsTest(yaffs_Device *dev);
++void yaffs_guts_test(yaffs_dev_t *dev);
+
+-/* A few useful functions */
+-void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
+-void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn);
+-int yaffs_CheckFF(__u8 *buffer, int nBytes);
+-void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi);
++/* A few useful functions to be used within the core files*/
++void yaffs_chunk_del(yaffs_dev_t *dev, int chunk_id, int mark_flash, int lyn);
++int yaffs_check_ff(__u8 *buffer, int n_bytes);
++void yaffs_handle_chunk_error(yaffs_dev_t *dev, yaffs_block_info_t *bi);
++
++__u8 *yaffs_get_temp_buffer(yaffs_dev_t *dev, int line_no);
++void yaffs_release_temp_buffer(yaffs_dev_t *dev, __u8 *buffer, int line_no);
++
++yaffs_obj_t *yaffs_find_or_create_by_number(yaffs_dev_t *dev,
++ int number,
++ yaffs_obj_type type);
++int yaffs_put_chunk_in_file(yaffs_obj_t *in, int inode_chunk,
++ int nand_chunk, int in_scan);
++void yaffs_set_obj_name(yaffs_obj_t *obj, const YCHAR *name);
++void yaffs_set_obj_name_from_oh(yaffs_obj_t *obj, const yaffs_obj_header *oh);
++void yaffs_add_obj_to_dir(yaffs_obj_t *directory,
++ yaffs_obj_t *obj);
++YCHAR *yaffs_clone_str(const YCHAR *str);
++void yaffs_link_fixup(yaffs_dev_t *dev, yaffs_obj_t *hard_list);
++void yaffs_block_became_dirty(yaffs_dev_t *dev, int block_no);
++int yaffs_update_oh(yaffs_obj_t *in, const YCHAR *name,
++ int force, int is_shrink, int shadows,
++ yaffs_xattr_mod *xop);
++void yaffs_handle_shadowed_obj(yaffs_dev_t *dev, int obj_id,
++ int backward_scanning);
++int yaffs_check_alloc_available(yaffs_dev_t *dev, int n_chunks);
++yaffs_tnode_t *yaffs_get_tnode(yaffs_dev_t *dev);
++yaffs_tnode_t *yaffs_add_find_tnode_0(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct,
++ __u32 chunk_id,
++ yaffs_tnode_t *passed_tn);
++
++int yaffs_do_file_wr(yaffs_obj_t *in, const __u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough);
++void yaffs_resize_file_down( yaffs_obj_t *obj, loff_t new_size);
++void yaffs_skip_rest_of_block(yaffs_dev_t *dev);
++
++int yaffs_count_free_chunks(yaffs_dev_t *dev);
++
++yaffs_tnode_t *yaffs_find_tnode_0(yaffs_dev_t *dev,
++ yaffs_file_s *file_struct,
++ __u32 chunk_id);
+
+-__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo);
+-void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer, int lineNo);
++__u32 yaffs_get_group_base(yaffs_dev_t *dev, yaffs_tnode_t *tn, unsigned pos);
+
+ #endif
+--- a/fs/yaffs2/yaffsinterface.h
++++ b/fs/yaffs2/yaffsinterface.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -16,6 +16,6 @@
+ #ifndef __YAFFSINTERFACE_H__
+ #define __YAFFSINTERFACE_H__
+
+-int yaffs_Initialise(unsigned nBlocks);
++int yaffs_initialise(unsigned nBlocks);
+
+ #endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_linux_allocator.c
+@@ -0,0 +1,200 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ *
++ * Note: Tis code is currently unused. Being checked in in case it becomes useful.
++ */
++
++
++#include "yaffs_allocator.h"
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yportenv.h"
++#include "yaffs_linux.h"
++/*
++ * Start out with the same allocator as yaffs direct.
++ * Todo: Change to Linux slab allocator.
++ */
++
++
++
++#define NAMELEN 20
++struct yaffs_AllocatorStruct {
++ char tnode_name[NAMELEN+1];
++ char object_name[NAMELEN+1];
++ struct kmem_cache *tnode_cache;
++ struct kmem_cache *object_cache;
++};
++
++typedef struct yaffs_AllocatorStruct yaffs_Allocator;
++
++int mount_id;
++
++void yaffs_deinit_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = (yaffs_Allocator *)dev->allocator;
++
++ T(YAFFS_TRACE_ALLOCATE,(TSTR("Deinitialising yaffs allocator\n")));
++
++ if(allocator){
++ if(allocator->tnode_cache){
++ kmem_cache_destroy(allocator->tnode_cache);
++ allocator->tnode_cache = NULL;
++ } else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("NULL tnode cache\n")));
++ YBUG();
++ }
++
++ if(allocator->object_cache){
++ kmem_cache_destroy(allocator->object_cache);
++ allocator->object_cache = NULL;
++ } else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("NULL object cache\n")));
++ YBUG();
++ }
++
++ YFREE(allocator);
++
++ } else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("Deinitialising NULL allocator\n")));
++ YBUG();
++ }
++ dev->allocator = NULL;
++}
++
++
++static void fake_ctor0(void *data){data = data;}
++static void fake_ctor1(void *data){data = data;}
++static void fake_ctor2(void *data){data = data;}
++static void fake_ctor3(void *data){data = data;}
++static void fake_ctor4(void *data){data = data;}
++static void fake_ctor5(void *data){data = data;}
++static void fake_ctor6(void *data){data = data;}
++static void fake_ctor7(void *data){data = data;}
++static void fake_ctor8(void *data){data = data;}
++static void fake_ctor9(void *data){data = data;}
++
++static void (*fake_ctor_list[10]) (void *) = {
++ fake_ctor0,
++ fake_ctor1,
++ fake_ctor2,
++ fake_ctor3,
++ fake_ctor4,
++ fake_ctor5,
++ fake_ctor6,
++ fake_ctor7,
++ fake_ctor8,
++ fake_ctor9,
++};
++
++void yaffs_init_raw_tnodes_and_objs(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator;
++ unsigned mount_id = yaffs_dev_to_lc(dev)->mount_id;
++
++ T(YAFFS_TRACE_ALLOCATE,(TSTR("Initialising yaffs allocator\n")));
++
++ if(dev->allocator)
++ YBUG();
++ else if(mount_id >= 10){
++ T(YAFFS_TRACE_ALWAYS,(TSTR("Bad mount_id %u\n"),mount_id));
++ } else {
++ allocator = YMALLOC(sizeof(yaffs_Allocator));
++ memset(allocator,0,sizeof(yaffs_Allocator));
++ dev->allocator = allocator;
++
++ if(!dev->allocator){
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs allocator creation failed\n")));
++ YBUG();
++ return;
++
++ }
++
++ sprintf(allocator->tnode_name,"yaffs_t_%u",mount_id);
++ sprintf(allocator->object_name,"yaffs_o_%u",mount_id);
++
++ allocator->tnode_cache =
++ kmem_cache_create(allocator->tnode_name,
++ dev->tnode_size,
++ 0, 0,
++ fake_ctor_list[mount_id]);
++ if(allocator->tnode_cache)
++ T(YAFFS_TRACE_ALLOCATE,
++ (TSTR("tnode cache \"%s\" %p\n"),
++ allocator->tnode_name,allocator->tnode_cache));
++ else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs cache creation failed\n")));
++ YBUG();
++ }
++
++
++ allocator->object_cache =
++ kmem_cache_create(allocator->object_name,
++ sizeof(yaffs_obj_t),
++ 0, 0,
++ fake_ctor_list[mount_id]);
++
++ if(allocator->object_cache)
++ T(YAFFS_TRACE_ALLOCATE,
++ (TSTR("object cache \"%s\" %p\n"),
++ allocator->object_name,allocator->object_cache));
++
++ else {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs cache creation failed\n")));
++ YBUG();
++ }
++ }
++}
++
++
++yaffs_tnode_t *yaffs_alloc_raw_tnode(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ if(!allocator || !allocator->tnode_cache){
++ YBUG();
++ return NULL;
++ }
++ return kmem_cache_alloc(allocator->tnode_cache, GFP_NOFS);
++}
++
++void yaffs_free_raw_tnode(yaffs_dev_t *dev, yaffs_tnode_t *tn)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ kmem_cache_free(allocator->tnode_cache,tn);
++}
++
++yaffs_obj_t *yaffs_alloc_raw_obj(yaffs_dev_t *dev)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ if(!allocator){
++ YBUG();
++ return NULL;
++ }
++ if(!allocator->object_cache){
++ YBUG();
++ return NULL;
++ }
++ return kmem_cache_alloc(allocator->object_cache, GFP_NOFS);
++}
++
++void yaffs_free_raw_obj(yaffs_dev_t *dev, yaffs_obj_t *obj)
++{
++ yaffs_Allocator *allocator = dev->allocator;
++ kmem_cache_free(allocator->object_cache,obj);
++}
+--- /dev/null
++++ b/fs/yaffs2/yaffs_linux.h
+@@ -0,0 +1,43 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_LINUX_H__
++#define __YAFFS_LINUX_H__
++
++#include "devextras.h"
++#include "yportenv.h"
++
++struct yaffs_LinuxContext {
++ struct ylist_head contextList; /* List of these we have mounted */
++ struct yaffs_dev_s *dev;
++ struct super_block * superBlock;
++ struct task_struct *bgThread; /* Background thread for this device */
++ int bgRunning;
++ struct semaphore grossLock; /* Gross locking semaphore */
++ __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer
++ * at compile time so we have to allocate it.
++ */
++ struct ylist_head searchContexts;
++ void (*putSuperFunc)(struct super_block *sb);
++
++ struct task_struct *readdirProcess;
++ unsigned mount_id;
++};
++
++#define yaffs_dev_to_lc(dev) ((struct yaffs_LinuxContext *)((dev)->os_context))
++#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
++
++#endif
++
+--- /dev/null
++++ b/fs/yaffs2/yaffs_list.h
+@@ -0,0 +1,127 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * This file is just holds extra declarations of macros that would normally
++ * be providesd in the Linux kernel. These macros have been written from
++ * scratch but are functionally equivalent to the Linux ones.
++ *
++ */
++
++#ifndef __YAFFS_LIST_H__
++#define __YAFFS_LIST_H__
++
++
++#include "yportenv.h"
++
++/*
++ * This is a simple doubly linked list implementation that matches the
++ * way the Linux kernel doubly linked list implementation works.
++ */
++
++struct ylist_head {
++ struct ylist_head *next; /* next in chain */
++ struct ylist_head *prev; /* previous in chain */
++};
++
++
++/* Initialise a static list */
++#define YLIST_HEAD(name) \
++struct ylist_head name = { &(name), &(name)}
++
++
++
++/* Initialise a list head to an empty list */
++#define YINIT_LIST_HEAD(p) \
++do { \
++ (p)->next = (p);\
++ (p)->prev = (p); \
++} while (0)
++
++
++/* Add an element to a list */
++static Y_INLINE void ylist_add(struct ylist_head *newEntry,
++ struct ylist_head *list)
++{
++ struct ylist_head *listNext = list->next;
++
++ list->next = newEntry;
++ newEntry->prev = list;
++ newEntry->next = listNext;
++ listNext->prev = newEntry;
++
++}
++
++static Y_INLINE void ylist_add_tail(struct ylist_head *newEntry,
++ struct ylist_head *list)
++{
++ struct ylist_head *listPrev = list->prev;
++
++ list->prev = newEntry;
++ newEntry->next = list;
++ newEntry->prev = listPrev;
++ listPrev->next = newEntry;
++
++}
++
++
++/* Take an element out of its current list, with or without
++ * reinitialising the links.of the entry*/
++static Y_INLINE void ylist_del(struct ylist_head *entry)
++{
++ struct ylist_head *listNext = entry->next;
++ struct ylist_head *listPrev = entry->prev;
++
++ listNext->prev = listPrev;
++ listPrev->next = listNext;
++
++}
++
++static Y_INLINE void ylist_del_init(struct ylist_head *entry)
++{
++ ylist_del(entry);
++ entry->next = entry->prev = entry;
++}
++
++
++/* Test if the list is empty */
++static Y_INLINE int ylist_empty(struct ylist_head *entry)
++{
++ return (entry->next == entry);
++}
++
++
++/* ylist_entry takes a pointer to a list entry and offsets it to that
++ * we can find a pointer to the object it is embedded in.
++ */
++
++
++#define ylist_entry(entry, type, member) \
++ ((type *)((char *)(entry)-(unsigned long)(&((type *)NULL)->member)))
++
++
++/* ylist_for_each and list_for_each_safe iterate over lists.
++ * ylist_for_each_safe uses temporary storage to make the list delete safe
++ */
++
++#define ylist_for_each(itervar, list) \
++ for (itervar = (list)->next; itervar != (list); itervar = itervar->next)
++
++#define ylist_for_each_safe(itervar, saveVar, list) \
++ for (itervar = (list)->next, saveVar = (list)->next->next; \
++ itervar != (list); itervar = saveVar, saveVar = saveVar->next)
++
++
++#endif
+--- a/fs/yaffs2/yaffs_mtdif1.c
++++ b/fs/yaffs2/yaffs_mtdif1.c
+@@ -2,7 +2,7 @@
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ * yaffs_mtdif1.c NAND mtd interface functions for small-page NAND.
+ *
+- * Copyright (C) 2002 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -18,15 +18,17 @@
+ *
+ * These functions are invoked via function pointers in yaffs_nand.c.
+ * This replaces functionality provided by functions in yaffs_mtdif.c
+- * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
++ * and the yaffs_tags_tCompatability functions in yaffs_tagscompat.c that are
+ * called in yaffs_mtdif.c when the function pointers are NULL.
+- * We assume the MTD layer is performing ECC (useNANDECC is true).
++ * We assume the MTD layer is performing ECC (use_nand_ecc is true).
+ */
+
+ #include "yportenv.h"
++#include "yaffs_trace.h"
+ #include "yaffs_guts.h"
+ #include "yaffs_packedtags1.h"
+-#include "yaffs_tagscompat.h" /* for yaffs_CalcTagsECC */
++#include "yaffs_tagscompat.h" /* for yaffs_calc_tags_ecc */
++#include "yaffs_linux.h"
+
+ #include "linux/kernel.h"
+ #include "linux/version.h"
+@@ -36,8 +38,6 @@
+ /* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+ #if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+
+-const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.10 2009-03-09 07:41:10 charles Exp $";
+-
+ #ifndef CONFIG_YAFFS_9BYTE_TAGS
+ # define YTAG1_SIZE 8
+ #else
+@@ -51,12 +51,12 @@ const char *yaffs_mtdif1_c_version = "$I
+ * adjust 'oobfree' to match your existing Yaffs data.
+ *
+ * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
+- * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
++ * page_status byte (at NAND spare offset 4) scattered/gathered from/to
+ * the 9th byte.
+ *
+ * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
+- * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
+- * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
++ * We have/need PackedTags1 plus page_status: T0,T1,T2,T3,T4,T5,T6,T7,P
++ * where Tn are the tag bytes, En are MTD's ECC bytes, P is the page_status
+ * byte and B is the small-page bad-block indicator byte.
+ */
+ static struct nand_ecclayout nand_oob_16 = {
+@@ -88,42 +88,40 @@ static struct nand_ecclayout nand_oob_16
+ * Any underlying MTD error results in YAFFS_FAIL.
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+-int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND, const __u8 *data, const yaffs_ExtendedTags *etags)
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_dev_t *dev,
++ int nand_chunk, const __u8 *data, const yaffs_ext_tags *etags)
+ {
+- struct mtd_info *mtd = dev->genericDevice;
+- int chunkBytes = dev->nDataBytesPerChunk;
+- loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int chunkBytes = dev->data_bytes_per_chunk;
++ loff_t addr = ((loff_t)nand_chunk) * chunkBytes;
+ struct mtd_oob_ops ops;
+ yaffs_PackedTags1 pt1;
+ int retval;
+
+- /* we assume that PackedTags1 and yaffs_Tags are compatible */
++ /* we assume that PackedTags1 and yaffs_tags_t are compatible */
+ compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
+- compile_time_assertion(sizeof(yaffs_Tags) == 8);
+-
+- dev->nPageWrites++;
++ compile_time_assertion(sizeof(yaffs_tags_t) == 8);
+
+ yaffs_PackTags1(&pt1, etags);
+- yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
++ yaffs_calc_tags_ecc((yaffs_tags_t *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+- * etags, one with chunkDeleted set. However, we need to update the
++ * etags, one with is_deleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+ #ifndef CONFIG_YAFFS_9BYTE_TAGS
+- if (etags->chunkDeleted) {
++ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+ #else
+ ((__u8 *)&pt1)[8] = 0xff;
+- if (etags->chunkDeleted) {
++ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+- /* zero pageStatus byte to indicate deleted */
++ /* zero page_status byte to indicate deleted */
+ ((__u8 *)&pt1)[8] = 0;
+ }
+ #endif
+@@ -137,20 +135,20 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya
+
+ retval = mtd->write_oob(mtd, addr, &ops);
+ if (retval) {
+- yaffs_trace(YAFFS_TRACE_MTD,
+- "write_oob failed, chunk %d, mtd error %d\n",
+- chunkInNAND, retval);
++ T(YAFFS_TRACE_MTD,
++ (TSTR("write_oob failed, chunk %d, mtd error %d"TENDSTR),
++ nand_chunk, retval));
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+ }
+
+-/* Return with empty ExtendedTags but add eccResult.
++/* Return with empty ExtendedTags but add ecc_result.
+ */
+-static int rettags(yaffs_ExtendedTags *etags, int eccResult, int retval)
++static int rettags(yaffs_ext_tags *etags, int ecc_result, int retval)
+ {
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+- etags->eccResult = eccResult;
++ etags->ecc_result = ecc_result;
+ }
+ return retval;
+ }
+@@ -158,30 +156,28 @@ static int rettags(yaffs_ExtendedTags *e
+ /* Read a chunk (page) from NAND.
+ *
+ * Caller expects ExtendedTags data to be usable even on error; that is,
+- * all members except eccResult and blockBad are zeroed.
++ * all members except ecc_result and block_bad are zeroed.
+ *
+ * - Check ECC results for data (if applicable)
+ * - Check for blank/erased block (return empty ExtendedTags if blank)
+ * - Check the PackedTags1 mini-ECC (correct if necessary/possible)
+ * - Convert PackedTags1 to ExtendedTags
+- * - Update eccResult and blockBad members to refect state.
++ * - Update ecc_result and block_bad members to refect state.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+-int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND, __u8 *data, yaffs_ExtendedTags *etags)
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_dev_t *dev,
++ int nand_chunk, __u8 *data, yaffs_ext_tags *etags)
+ {
+- struct mtd_info *mtd = dev->genericDevice;
+- int chunkBytes = dev->nDataBytesPerChunk;
+- loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int chunkBytes = dev->data_bytes_per_chunk;
++ loff_t addr = ((loff_t)nand_chunk) * chunkBytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+ struct mtd_oob_ops ops;
+ yaffs_PackedTags1 pt1;
+ int retval;
+ int deleted;
+
+- dev->nPageReads++;
+-
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunkBytes : 0;
+@@ -200,9 +196,9 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ */
+ retval = mtd->read_oob(mtd, addr, &ops);
+ if (retval) {
+- yaffs_trace(YAFFS_TRACE_MTD,
+- "read_oob failed, chunk %d, mtd error %d\n",
+- chunkInNAND, retval);
++ T(YAFFS_TRACE_MTD,
++ (TSTR("read_oob failed, chunk %d, mtd error %d"TENDSTR),
++ nand_chunk, retval));
+ }
+
+ switch (retval) {
+@@ -213,23 +209,23 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ eccres = YAFFS_ECC_RESULT_FIXED;
+- dev->eccFixed++;
++ dev->n_ecc_fixed++;
+ break;
+
+ case -EBADMSG:
+ /* MTD's ECC could not fix the data */
+- dev->eccUnfixed++;
++ dev->n_ecc_unfixed++;
+ /* fall into... */
+ default:
+ rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+- etags->blockBad = (mtd->block_isbad)(mtd, addr);
++ etags->block_bad = (mtd->block_isbad)(mtd, addr);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk.
+ */
+- if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
+- /* when blank, upper layers want eccResult to be <= NO_ERROR */
++ if (yaffs_check_ff((__u8 *)&pt1, 8)) {
++ /* when blank, upper layers want ecc_result to be <= NO_ERROR */
+ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+ }
+
+@@ -241,37 +237,37 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+ #else
+- deleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
++ deleted = (yaffs_count_bits(((__u8 *)&pt1)[8]) < 7);
+ #endif
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible.
+ */
+- retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
++ retval = yaffs_check_tags_ecc((yaffs_tags_t *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+- dev->tagsEccFixed++;
++ dev->n_tags_ecc_fixed++;
+ if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+- dev->tagsEccUnfixed++;
++ dev->n_tags_ecc_unfixed++;
+ return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+- * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
++ * [set shouldBeFF just to keep yaffs_unpack_tags1 happy]
+ */
+ pt1.shouldBeFF = 0xFFFFFFFF;
+- yaffs_UnpackTags1(etags, &pt1);
+- etags->eccResult = eccres;
++ yaffs_unpack_tags1(etags, &pt1);
++ etags->ecc_result = eccres;
+
+ /* Set deleted state */
+- etags->chunkDeleted = deleted;
++ etags->is_deleted = deleted;
+ return YAFFS_OK;
+ }
+
+@@ -282,15 +278,15 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+-int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++int nandmtd1_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no)
+ {
+- struct mtd_info *mtd = dev->genericDevice;
+- int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
+ int retval;
+
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad\n", blockNo);
++ T(YAFFS_TRACE_BAD_BLOCKS,(TSTR("marking block %d bad"TENDSTR), block_no));
+
+- retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
++ retval = mtd->block_markbad(mtd, (loff_t)blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+ }
+
+@@ -305,9 +301,9 @@ static int nandmtd1_TestPrerequists(stru
+ int oobavail = mtd->ecclayout->oobavail;
+
+ if (oobavail < YTAG1_SIZE) {
+- yaffs_trace(YAFFS_TRACE_ERROR,
+- "mtd device has only %d bytes for tags, need %d\n",
+- oobavail, YTAG1_SIZE);
++ T(YAFFS_TRACE_ERROR,
++ (TSTR("mtd device has only %d bytes for tags, need %d"TENDSTR),
++ oobavail, YTAG1_SIZE));
+ return YAFFS_FAIL;
+ }
+ return YAFFS_OK;
+@@ -322,13 +318,13 @@ static int nandmtd1_TestPrerequists(stru
+ *
+ * Always returns YAFFS_OK.
+ */
+-int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *pState, __u32 *pSequenceNumber)
++int nandmtd1_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *pState, __u32 *pSequenceNumber)
+ {
+- struct mtd_info *mtd = dev->genericDevice;
+- int chunkNo = blockNo * dev->nChunksPerBlock;
+- loff_t addr = (loff_t)chunkNo * dev->nDataBytesPerChunk;
+- yaffs_ExtendedTags etags;
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int chunkNo = block_no * dev->param.chunks_per_block;
++ loff_t addr = (loff_t)chunkNo * dev->data_bytes_per_chunk;
++ yaffs_ext_tags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+ int retval;
+@@ -340,17 +336,17 @@ int nandmtd1_QueryNANDBlock(struct yaffs
+ return YAFFS_FAIL;
+
+ retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
+- etags.blockBad = (mtd->block_isbad)(mtd, addr);
+- if (etags.blockBad) {
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+- "block %d is marked bad\n", blockNo);
++ etags.block_bad = (mtd->block_isbad)(mtd, addr);
++ if (etags.block_bad) {
++ T(YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("block %d is marked bad"TENDSTR), block_no));
+ state = YAFFS_BLOCK_STATE_DEAD;
+- } else if (etags.eccResult != YAFFS_ECC_RESULT_NO_ERROR) {
++ } else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+ /* bad tags, need to look more closely */
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+- } else if (etags.chunkUsed) {
++ } else if (etags.chunk_used) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+- seqnum = etags.sequenceNumber;
++ seqnum = etags.seq_number;
+ } else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+--- a/fs/yaffs2/yaffs_mtdif1-compat.c
++++ /dev/null
+@@ -1,434 +0,0 @@
+-From ian@brightstareng.com Fri May 18 15:06:49 2007
+-From ian@brightstareng.com Fri May 18 15:08:21 2007
+-Received: from 206.173.66.57.ptr.us.xo.net ([206.173.66.57] helo=zebra.brightstareng.com)
+- by apollo.linkchoose.co.uk with esmtp (Exim 4.60)
+- (envelope-from <ian@brightstareng.com>)
+- id 1Hp380-00011e-T6
+- for david.goodenough@linkchoose.co.uk; Fri, 18 May 2007 15:08:21 +0100
+-Received: from localhost (localhost.localdomain [127.0.0.1])
+- by zebra.brightstareng.com (Postfix) with ESMTP
+- id 4819F28C004; Fri, 18 May 2007 10:07:49 -0400 (EDT)
+-Received: from zebra.brightstareng.com ([127.0.0.1])
+- by localhost (zebra [127.0.0.1]) (amavisd-new, port 10024) with ESMTP
+- id 05328-06; Fri, 18 May 2007 10:07:16 -0400 (EDT)
+-Received: from pippin (unknown [192.168.1.25])
+- by zebra.brightstareng.com (Postfix) with ESMTP
+- id 8BEF528C1BC; Fri, 18 May 2007 10:06:53 -0400 (EDT)
+-From: Ian McDonnell <ian@brightstareng.com>
+-To: David Goodenough <david.goodenough@linkchoose.co.uk>
+-Subject: Re: something tested this time -- yaffs_mtdif1-compat.c
+-Date: Fri, 18 May 2007 10:06:49 -0400
+-User-Agent: KMail/1.9.1
+-References: <200705142207.06909.ian@brightstareng.com> <200705171131.53536.ian@brightstareng.com> <200705181334.32166.david.goodenough@linkchoose.co.uk>
+-In-Reply-To: <200705181334.32166.david.goodenough@linkchoose.co.uk>
+-Cc: Andrea Conti <alyf@alyf.net>,
+- Charles Manning <manningc2@actrix.gen.nz>
+-MIME-Version: 1.0
+-Content-Type: Multipart/Mixed;
+- boundary="Boundary-00=_5LbTGmt62YoutxM"
+-Message-Id: <200705181006.49860.ian@brightstareng.com>
+-X-Virus-Scanned: by amavisd-new at brightstareng.com
+-Status: R
+-X-Status: NT
+-X-KMail-EncryptionState:
+-X-KMail-SignatureState:
+-X-KMail-MDN-Sent:
+-
+---Boundary-00=_5LbTGmt62YoutxM
+-Content-Type: text/plain;
+- charset="iso-8859-15"
+-Content-Transfer-Encoding: 7bit
+-Content-Disposition: inline
+-
+-David, Andrea,
+-
+-On Friday 18 May 2007 08:34, you wrote:
+-> Yea team. With this fix in place (I put it in the wrong place
+-> at first) I can now mount and ls the Yaffs partition without
+-> an error messages!
+-
+-Good news!
+-
+-Attached is a newer yaffs_mtdif1.c with a bandaid to help the
+-2.6.18 and 2.6.19 versions of MTD not trip on the oob read.
+-See the LINUX_VERSION_CODE conditional in
+-nandmtd1_ReadChunkWithTagsFromNAND.
+-
+--imcd
+-
+---Boundary-00=_5LbTGmt62YoutxM
+-Content-Type: text/x-csrc;
+- charset="iso-8859-15";
+- name="yaffs_mtdif1.c"
+-Content-Transfer-Encoding: 7bit
+-Content-Disposition: attachment;
+- filename="yaffs_mtdif1.c"
+-
+-/*
+- * YAFFS: Yet another FFS. A NAND-flash specific file system.
+- * yaffs_mtdif1.c NAND mtd interface functions for small-page NAND.
+- *
+- * Copyright (C) 2002 Aleph One Ltd.
+- * for Toby Churchill Ltd and Brightstar Engineering
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-/*
+- * This module provides the interface between yaffs_nand.c and the
+- * MTD API. This version is used when the MTD interface supports the
+- * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
+- * and we have small-page NAND device.
+- *
+- * These functions are invoked via function pointers in yaffs_nand.c.
+- * This replaces functionality provided by functions in yaffs_mtdif.c
+- * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
+- * called in yaffs_mtdif.c when the function pointers are NULL.
+- * We assume the MTD layer is performing ECC (useNANDECC is true).
+- */
+-
+-#include "yportenv.h"
+-#include "yaffs_guts.h"
+-#include "yaffs_packedtags1.h"
+-#include "yaffs_tagscompat.h" // for yaffs_CalcTagsECC
+-
+-#include "linux/kernel.h"
+-#include "linux/version.h"
+-#include "linux/types.h"
+-#include "linux/mtd/mtd.h"
+-
+-/* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
+-
+-const char *yaffs_mtdif1_c_version = "$Id$";
+-
+-#ifndef CONFIG_YAFFS_9BYTE_TAGS
+-# define YTAG1_SIZE 8
+-#else
+-# define YTAG1_SIZE 9
+-#endif
+-
+-#if 0
+-/* Use the following nand_ecclayout with MTD when using
+- * CONFIG_YAFFS_9BYTE_TAGS and the older on-NAND tags layout.
+- * If you have existing Yaffs images and the byte order differs from this,
+- * adjust 'oobfree' to match your existing Yaffs data.
+- *
+- * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
+- * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
+- * the 9th byte.
+- *
+- * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
+- * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
+- * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
+- * byte and B is the small-page bad-block indicator byte.
+- */
+-static struct nand_ecclayout nand_oob_16 = {
+- .eccbytes = 6,
+- .eccpos = { 8, 9, 10, 13, 14, 15 },
+- .oobavail = 9,
+- .oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } }
+-};
+-#endif
+-
+-/* Write a chunk (page) of data to NAND.
+- *
+- * Caller always provides ExtendedTags data which are converted to a more
+- * compact (packed) form for storage in NAND. A mini-ECC runs over the
+- * contents of the tags meta-data; used to valid the tags when read.
+- *
+- * - Pack ExtendedTags to PackedTags1 form
+- * - Compute mini-ECC for PackedTags1
+- * - Write data and packed tags to NAND.
+- *
+- * Note: Due to the use of the PackedTags1 meta-data which does not include
+- * a full sequence number (as found in the larger PackedTags2 form) it is
+- * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
+- * discarded and dirty. This is not ideal: newer NAND parts are supposed
+- * to be written just once. When Yaffs performs this operation, this
+- * function is called with a NULL data pointer -- calling MTD write_oob
+- * without data is valid usage (2.6.17).
+- *
+- * Any underlying MTD error results in YAFFS_FAIL.
+- * Returns YAFFS_OK or YAFFS_FAIL.
+- */
+-int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * etags)
+-{
+- struct mtd_info * mtd = dev->genericDevice;
+- int chunkBytes = dev->nDataBytesPerChunk;
+- loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+- struct mtd_oob_ops ops;
+- yaffs_PackedTags1 pt1;
+- int retval;
+-
+- /* we assume that PackedTags1 and yaffs_Tags are compatible */
+- compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
+- compile_time_assertion(sizeof(yaffs_Tags) == 8);
+-
+- yaffs_PackTags1(&pt1, etags);
+- yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
+-
+- /* When deleting a chunk, the upper layer provides only skeletal
+- * etags, one with chunkDeleted set. However, we need to update the
+- * tags, not erase them completely. So we use the NAND write property
+- * that only zeroed-bits stick and set tag bytes to all-ones and
+- * zero just the (not) deleted bit.
+- */
+-#ifndef CONFIG_YAFFS_9BYTE_TAGS
+- if (etags->chunkDeleted) {
+- memset(&pt1, 0xff, 8);
+- /* clear delete status bit to indicate deleted */
+- pt1.deleted = 0;
+- }
+-#else
+- ((__u8 *)&pt1)[8] = 0xff;
+- if (etags->chunkDeleted) {
+- memset(&pt1, 0xff, 8);
+- /* zero pageStatus byte to indicate deleted */
+- ((__u8 *)&pt1)[8] = 0;
+- }
+-#endif
+-
+- memset(&ops, 0, sizeof(ops));
+- ops.mode = MTD_OOB_AUTO;
+- ops.len = (data) ? chunkBytes : 0;
+- ops.ooblen = YTAG1_SIZE;
+- ops.datbuf = (__u8 *)data;
+- ops.oobbuf = (__u8 *)&pt1;
+-
+- retval = mtd->write_oob(mtd, addr, &ops);
+- if (retval) {
+- yaffs_trace(YAFFS_TRACE_MTD,
+- "write_oob failed, chunk %d, mtd error %d\n",
+- chunkInNAND, retval);
+- }
+- return retval ? YAFFS_FAIL : YAFFS_OK;
+-}
+-
+-/* Return with empty ExtendedTags but add eccResult.
+- */
+-static int rettags(yaffs_ExtendedTags * etags, int eccResult, int retval)
+-{
+- if (etags) {
+- memset(etags, 0, sizeof(*etags));
+- etags->eccResult = eccResult;
+- }
+- return retval;
+-}
+-
+-/* Read a chunk (page) from NAND.
+- *
+- * Caller expects ExtendedTags data to be usable even on error; that is,
+- * all members except eccResult and blockBad are zeroed.
+- *
+- * - Check ECC results for data (if applicable)
+- * - Check for blank/erased block (return empty ExtendedTags if blank)
+- * - Check the PackedTags1 mini-ECC (correct if necessary/possible)
+- * - Convert PackedTags1 to ExtendedTags
+- * - Update eccResult and blockBad members to refect state.
+- *
+- * Returns YAFFS_OK or YAFFS_FAIL.
+- */
+-int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND, __u8 * data, yaffs_ExtendedTags * etags)
+-{
+- struct mtd_info * mtd = dev->genericDevice;
+- int chunkBytes = dev->nDataBytesPerChunk;
+- loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
+- int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+- struct mtd_oob_ops ops;
+- yaffs_PackedTags1 pt1;
+- int retval;
+- int deleted;
+-
+- memset(&ops, 0, sizeof(ops));
+- ops.mode = MTD_OOB_AUTO;
+- ops.len = (data) ? chunkBytes : 0;
+- ops.ooblen = YTAG1_SIZE;
+- ops.datbuf = data;
+- ops.oobbuf = (__u8 *)&pt1;
+-
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+- /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+- * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+- */
+- ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
+-#endif
+- /* Read page and oob using MTD.
+- * Check status and determine ECC result.
+- */
+- retval = mtd->read_oob(mtd, addr, &ops);
+- if (retval) {
+- yaffs_trace(YAFFS_TRACE_MTD,
+- "read_oob failed, chunk %d, mtd error %d\n",
+- chunkInNAND, retval);
+- }
+-
+- switch (retval) {
+- case 0:
+- /* no error */
+- break;
+-
+- case -EUCLEAN:
+- /* MTD's ECC fixed the data */
+- eccres = YAFFS_ECC_RESULT_FIXED;
+- dev->eccFixed++;
+- break;
+-
+- case -EBADMSG:
+- /* MTD's ECC could not fix the data */
+- dev->eccUnfixed++;
+- /* fall into... */
+- default:
+- rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+- etags->blockBad = (mtd->block_isbad)(mtd, addr);
+- return YAFFS_FAIL;
+- }
+-
+- /* Check for a blank/erased chunk.
+- */
+- if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
+- /* when blank, upper layers want eccResult to be <= NO_ERROR */
+- return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+- }
+-
+-#ifndef CONFIG_YAFFS_9BYTE_TAGS
+- /* Read deleted status (bit) then return it to it's non-deleted
+- * state before performing tags mini-ECC check. pt1.deleted is
+- * inverted.
+- */
+- deleted = !pt1.deleted;
+- pt1.deleted = 1;
+-#else
+- (void) deleted; /* not used */
+-#endif
+-
+- /* Check the packed tags mini-ECC and correct if necessary/possible.
+- */
+- retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
+- switch (retval) {
+- case 0:
+- /* no tags error, use MTD result */
+- break;
+- case 1:
+- /* recovered tags-ECC error */
+- dev->tagsEccFixed++;
+- eccres = YAFFS_ECC_RESULT_FIXED;
+- break;
+- default:
+- /* unrecovered tags-ECC error */
+- dev->tagsEccUnfixed++;
+- return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+- }
+-
+- /* Unpack the tags to extended form and set ECC result.
+- * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
+- */
+- pt1.shouldBeFF = 0xFFFFFFFF;
+- yaffs_UnpackTags1(etags, &pt1);
+- etags->eccResult = eccres;
+-
+- /* Set deleted state.
+- */
+-#ifndef CONFIG_YAFFS_9BYTE_TAGS
+- etags->chunkDeleted = deleted;
+-#else
+- etags->chunkDeleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
+-#endif
+- return YAFFS_OK;
+-}
+-
+-/* Mark a block bad.
+- *
+- * This is a persistant state.
+- * Use of this function should be rare.
+- *
+- * Returns YAFFS_OK or YAFFS_FAIL.
+- */
+-int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
+-{
+- struct mtd_info * mtd = dev->genericDevice;
+- int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
+- int retval;
+-
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", blockNo);
+-
+- retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
+- return (retval) ? YAFFS_FAIL : YAFFS_OK;
+-}
+-
+-/* Check any MTD prerequists.
+- *
+- * Returns YAFFS_OK or YAFFS_FAIL.
+- */
+-static int nandmtd1_TestPrerequists(struct mtd_info * mtd)
+-{
+- /* 2.6.18 has mtd->ecclayout->oobavail */
+- /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+- int oobavail = mtd->ecclayout->oobavail;
+-
+- if (oobavail < YTAG1_SIZE) {
+- yaffs_trace(YAFFS_TRACE_ERROR,
+- "mtd device has only %d bytes for tags, need %d",
+- oobavail, YTAG1_SIZE);
+- return YAFFS_FAIL;
+- }
+- return YAFFS_OK;
+-}
+-
+-/* Query for the current state of a specific block.
+- *
+- * Examine the tags of the first chunk of the block and return the state:
+- * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
+- * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
+- * - YAFFS_BLOCK_STATE_EMPTY, the block is clean
+- *
+- * Always returns YAFFS_OK.
+- */
+-int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState * pState, int *pSequenceNumber)
+-{
+- struct mtd_info * mtd = dev->genericDevice;
+- int chunkNo = blockNo * dev->nChunksPerBlock;
+- yaffs_ExtendedTags etags;
+- int state = YAFFS_BLOCK_STATE_DEAD;
+- int seqnum = 0;
+- int retval;
+-
+- /* We don't yet have a good place to test for MTD config prerequists.
+- * Do it here as we are called during the initial scan.
+- */
+- if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) {
+- return YAFFS_FAIL;
+- }
+-
+- retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
+- if (etags.blockBad) {
+- yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+- "block %d is marked bad", blockNo);
+- state = YAFFS_BLOCK_STATE_DEAD;
+- }
+- else if (etags.chunkUsed) {
+- state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+- seqnum = etags.sequenceNumber;
+- }
+- else {
+- state = YAFFS_BLOCK_STATE_EMPTY;
+- }
+-
+- *pState = state;
+- *pSequenceNumber = seqnum;
+-
+- /* query always succeeds */
+- return YAFFS_OK;
+-}
+-
+-#endif /*KERNEL_VERSION*/
+-
+---Boundary-00=_5LbTGmt62YoutxM--
+-
+-
+-
+--- a/fs/yaffs2/yaffs_mtdif1.h
++++ b/fs/yaffs2/yaffs_mtdif1.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -14,15 +14,15 @@
+ #ifndef __YAFFS_MTDIF1_H__
+ #define __YAFFS_MTDIF1_H__
+
+-int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
+- const __u8 *data, const yaffs_ExtendedTags *tags);
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_dev_t *dev, int nand_chunk,
++ const __u8 *data, const yaffs_ext_tags *tags);
+
+-int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
+- __u8 *data, yaffs_ExtendedTags *tags);
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_dev_t *dev, int nand_chunk,
++ __u8 *data, yaffs_ext_tags *tags);
+
+-int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
++int nandmtd1_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no);
+
+-int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber);
++int nandmtd1_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number);
+
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif2.c
++++ b/fs/yaffs2/yaffs_mtdif2.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -13,11 +13,8 @@
+
+ /* mtd interface for YAFFS2 */
+
+-const char *yaffs_mtdif2_c_version =
+- "$Id: yaffs_mtdif2.c,v 1.23 2009-03-06 17:20:53 wookey Exp $";
+-
+ #include "yportenv.h"
+-
++#include "yaffs_trace.h"
+
+ #include "yaffs_mtdif2.h"
+
+@@ -27,15 +24,17 @@ const char *yaffs_mtdif2_c_version =
+
+ #include "yaffs_packedtags2.h"
+
++#include "yaffs_linux.h"
++
+ /* NB For use with inband tags....
+ * We assume that the data buffer is of size totalBytersPerChunk so that we can also
+ * use it to load the tags.
+ */
+-int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *tags)
++ const yaffs_ext_tags *tags)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ #if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #else
+@@ -47,13 +46,16 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+
+ yaffs_PackedTags2 pt;
+
++ int packed_tags_size = dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++ void * packed_tags_ptr = dev->param.no_tags_ecc ? (void *) &pt.t : (void *)&pt;
++
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p"
+- TENDSTR), chunkInNAND, data, tags));
++ TENDSTR), nand_chunk, data, tags));
+
+
+- addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
++ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+@@ -61,30 +63,30 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ */
+ if (!data || !tags)
+ BUG();
+- else if (dev->inbandTags) {
++ else if (dev->param.inband_tags) {
+ yaffs_PackedTags2TagsPart *pt2tp;
+- pt2tp = (yaffs_PackedTags2TagsPart *)(data + dev->nDataBytesPerChunk);
++ pt2tp = (yaffs_PackedTags2TagsPart *)(data + dev->data_bytes_per_chunk);
+ yaffs_PackTags2TagsPart(pt2tp, tags);
+ } else
+- yaffs_PackTags2(&pt, tags);
++ yaffs_PackTags2(&pt, tags, !dev->param.no_tags_ecc);
+
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = (dev->inbandTags) ? 0 : sizeof(pt);
+- ops.len = dev->totalBytesPerChunk;
++ ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
++ ops.len = dev->param.total_bytes_per_chunk;
+ ops.ooboffs = 0;
+ ops.datbuf = (__u8 *)data;
+- ops.oobbuf = (dev->inbandTags) ? NULL : (void *)&pt;
++ ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr;
+ retval = mtd->write_oob(mtd, addr, &ops);
+
+ #else
+- if (!dev->inbandTags) {
++ if (!dev->param.inband_tags) {
+ retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, (__u8 *) &pt, NULL);
++ mtd->write_ecc(mtd, addr, dev->data_bytes_per_chunk,
++ &dummy, data, (__u8 *) packed_tags_ptr, NULL);
+ } else {
+ retval =
+- mtd->write(mtd, addr, dev->totalBytesPerChunk, &dummy,
++ mtd->write(mtd, addr, dev->param.total_bytes_per_chunk, &dummy,
+ data);
+ }
+ #endif
+@@ -95,10 +97,10 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
+- __u8 *data, yaffs_ExtendedTags *tags)
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_dev_t *dev, int nand_chunk,
++ __u8 *data, yaffs_ext_tags *tags)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ #if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+ struct mtd_oob_ops ops;
+ #endif
+@@ -106,20 +108,23 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ int retval = 0;
+ int localData = 0;
+
+- loff_t addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
++ loff_t addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ yaffs_PackedTags2 pt;
+
++ int packed_tags_size = dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++ void * packed_tags_ptr = dev->param.no_tags_ecc ? (void *) &pt.t: (void *)&pt;
++
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p"
+- TENDSTR), chunkInNAND, data, tags));
++ TENDSTR), nand_chunk, data, tags));
+
+- if (dev->inbandTags) {
++ if (dev->param.inband_tags) {
+
+ if (!data) {
+ localData = 1;
+- data = yaffs_GetTempBuffer(dev, __LINE__);
++ data = yaffs_get_temp_buffer(dev, __LINE__);
+ }
+
+
+@@ -127,30 +132,30 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+
+
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- if (dev->inbandTags || (data && !tags))
+- retval = mtd->read(mtd, addr, dev->totalBytesPerChunk,
++ if (dev->param.inband_tags || (data && !tags))
++ retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = sizeof(pt);
+- ops.len = data ? dev->nDataBytesPerChunk : sizeof(pt);
++ ops.ooblen = packed_tags_size;
++ ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
+ ops.ooboffs = 0;
+ ops.datbuf = data;
+- ops.oobbuf = dev->spareBuffer;
++ ops.oobbuf = yaffs_dev_to_lc(dev)->spareBuffer;
+ retval = mtd->read_oob(mtd, addr, &ops);
+ }
+ #else
+- if (!dev->inbandTags && data && tags) {
++ if (!dev->param.inband_tags && data && tags) {
+
+- retval = mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++ retval = mtd->read_ecc(mtd, addr, dev->data_bytes_per_chunk,
+ &dummy, data, dev->spareBuffer,
+ NULL);
+ } else {
+ if (data)
+ retval =
+- mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++ mtd->read(mtd, addr, dev->data_bytes_per_chunk, &dummy,
+ data);
+- if (!dev->inbandTags && tags)
++ if (!dev->param.inband_tags && tags)
+ retval =
+ mtd->read_oob(mtd, addr, mtd->oobsize, &dummy,
+ dev->spareBuffer);
+@@ -158,41 +163,47 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ #endif
+
+
+- if (dev->inbandTags) {
++ if (dev->param.inband_tags) {
+ if (tags) {
+ yaffs_PackedTags2TagsPart *pt2tp;
+- pt2tp = (yaffs_PackedTags2TagsPart *)&data[dev->nDataBytesPerChunk];
+- yaffs_UnpackTags2TagsPart(tags, pt2tp);
++ pt2tp = (yaffs_PackedTags2TagsPart *)&data[dev->data_bytes_per_chunk];
++ yaffs_unpack_tags2tags_part(tags, pt2tp);
+ }
+ } else {
+ if (tags) {
+- memcpy(&pt, dev->spareBuffer, sizeof(pt));
+- yaffs_UnpackTags2(tags, &pt);
++ memcpy(packed_tags_ptr, yaffs_dev_to_lc(dev)->spareBuffer, packed_tags_size);
++ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+ }
+
+ if (localData)
+- yaffs_ReleaseTempBuffer(dev, data, __LINE__);
++ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+- if (tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
+- tags->eccResult = YAFFS_ECC_RESULT_UNFIXED;
++ if (tags && retval == -EBADMSG && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
++ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ dev->n_ecc_unfixed++;
++ }
++ if(tags && retval == -EUCLEAN && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
++ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
++ dev->n_ecc_fixed++;
++ }
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++int nandmtd2_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+ T(YAFFS_TRACE_MTD,
+- (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), blockNo));
++ (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), block_no));
+
+ retval =
+ mtd->block_markbad(mtd,
+- blockNo * dev->nChunksPerBlock *
+- dev->totalBytesPerChunk);
++ block_no * dev->param.chunks_per_block *
++ dev->param.total_bytes_per_chunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+@@ -201,41 +212,41 @@ int nandmtd2_MarkNANDBlockBad(struct yaf
+
+ }
+
+-int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber)
++int nandmtd2_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+
+ T(YAFFS_TRACE_MTD,
+- (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), blockNo));
++ (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), block_no));
+ retval =
+ mtd->block_isbad(mtd,
+- blockNo * dev->nChunksPerBlock *
+- dev->totalBytesPerChunk);
++ block_no * dev->param.chunks_per_block *
++ dev->param.total_bytes_per_chunk);
+
+ if (retval) {
+ T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR)));
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+- *sequenceNumber = 0;
++ *seq_number = 0;
+ } else {
+- yaffs_ExtendedTags t;
++ yaffs_ext_tags t;
+ nandmtd2_ReadChunkWithTagsFromNAND(dev,
+- blockNo *
+- dev->nChunksPerBlock, NULL,
++ block_no *
++ dev->param.chunks_per_block, NULL,
+ &t);
+
+- if (t.chunkUsed) {
+- *sequenceNumber = t.sequenceNumber;
++ if (t.chunk_used) {
++ *seq_number = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else {
+- *sequenceNumber = 0;
++ *seq_number = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+ T(YAFFS_TRACE_MTD,
+- (TSTR("block is bad seq %d state %d" TENDSTR), *sequenceNumber,
++ (TSTR("block is bad seq %d state %d" TENDSTR), *seq_number,
+ *state));
+
+ if (retval == 0)
+--- a/fs/yaffs2/yaffs_mtdif2.h
++++ b/fs/yaffs2/yaffs_mtdif2.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -17,13 +17,13 @@
+ #define __YAFFS_MTDIF2_H__
+
+ #include "yaffs_guts.h"
+-int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+-int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
+- __u8 *data, yaffs_ExtendedTags *tags);
+-int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+-int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber);
++ const yaffs_ext_tags *tags);
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_dev_t *dev, int nand_chunk,
++ __u8 *data, yaffs_ext_tags *tags);
++int nandmtd2_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no);
++int nandmtd2_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number);
+
+ #endif
+--- a/fs/yaffs2/yaffs_mtdif.c
++++ b/fs/yaffs2/yaffs_mtdif.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -11,9 +11,6 @@
+ * published by the Free Software Foundation.
+ */
+
+-const char *yaffs_mtdif_c_version =
+- "$Id: yaffs_mtdif.c,v 1.22 2009-03-06 17:20:51 wookey Exp $";
+-
+ #include "yportenv.h"
+
+
+@@ -24,208 +21,26 @@ const char *yaffs_mtdif_c_version =
+ #include "linux/time.h"
+ #include "linux/mtd/nand.h"
+
+-#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18))
+-static struct nand_oobinfo yaffs_oobinfo = {
+- .useecc = 1,
+- .eccbytes = 6,
+- .eccpos = {8, 9, 10, 13, 14, 15}
+-};
+-
+-static struct nand_oobinfo yaffs_noeccinfo = {
+- .useecc = 0,
+-};
+-#endif
+-
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+-static inline void translate_spare2oob(const yaffs_Spare *spare, __u8 *oob)
+-{
+- oob[0] = spare->tagByte0;
+- oob[1] = spare->tagByte1;
+- oob[2] = spare->tagByte2;
+- oob[3] = spare->tagByte3;
+- oob[4] = spare->tagByte4;
+- oob[5] = spare->tagByte5 & 0x3f;
+- oob[5] |= spare->blockStatus == 'Y' ? 0 : 0x80;
+- oob[5] |= spare->pageStatus == 0 ? 0 : 0x40;
+- oob[6] = spare->tagByte6;
+- oob[7] = spare->tagByte7;
+-}
+-
+-static inline void translate_oob2spare(yaffs_Spare *spare, __u8 *oob)
+-{
+- struct yaffs_NANDSpare *nspare = (struct yaffs_NANDSpare *)spare;
+- spare->tagByte0 = oob[0];
+- spare->tagByte1 = oob[1];
+- spare->tagByte2 = oob[2];
+- spare->tagByte3 = oob[3];
+- spare->tagByte4 = oob[4];
+- spare->tagByte5 = oob[5] == 0xff ? 0xff : oob[5] & 0x3f;
+- spare->blockStatus = oob[5] & 0x80 ? 0xff : 'Y';
+- spare->pageStatus = oob[5] & 0x40 ? 0xff : 0;
+- spare->ecc1[0] = spare->ecc1[1] = spare->ecc1[2] = 0xff;
+- spare->tagByte6 = oob[6];
+- spare->tagByte7 = oob[7];
+- spare->ecc2[0] = spare->ecc2[1] = spare->ecc2[2] = 0xff;
+-
+- nspare->eccres1 = nspare->eccres2 = 0; /* FIXME */
+-}
+-#endif
+-
+-int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
+- const __u8 *data, const yaffs_Spare *spare)
+-{
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+- struct mtd_oob_ops ops;
+-#endif
+- size_t dummy;
+- int retval = 0;
+-
+- loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+- __u8 spareAsBytes[8]; /* OOB */
+-
+- if (data && !spare)
+- retval = mtd->write(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data);
+- else if (spare) {
+- if (dev->useNANDECC) {
+- translate_spare2oob(spare, spareAsBytes);
+- ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = 8; /* temp hack */
+- } else {
+- ops.mode = MTD_OOB_RAW;
+- ops.ooblen = YAFFS_BYTES_PER_SPARE;
+- }
+- ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
+- ops.datbuf = (u8 *)data;
+- ops.ooboffs = 0;
+- ops.oobbuf = spareAsBytes;
+- retval = mtd->write_oob(mtd, addr, &ops);
+- }
+-#else
+- __u8 *spareAsBytes = (__u8 *) spare;
+-
+- if (data && spare) {
+- if (dev->useNANDECC)
+- retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, spareAsBytes,
+- &yaffs_oobinfo);
+- else
+- retval =
+- mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, spareAsBytes,
+- &yaffs_noeccinfo);
+- } else {
+- if (data)
+- retval =
+- mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+- data);
+- if (spare)
+- retval =
+- mtd->write_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
+- &dummy, spareAsBytes);
+- }
+-#endif
+-
+- if (retval == 0)
+- return YAFFS_OK;
+- else
+- return YAFFS_FAIL;
+-}
+-
+-int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
+- yaffs_Spare *spare)
+-{
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+- struct mtd_oob_ops ops;
+-#endif
+- size_t dummy;
+- int retval = 0;
++#include "yaffs_linux.h"
+
+- loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
+-#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+- __u8 spareAsBytes[8]; /* OOB */
+-
+- if (data && !spare)
+- retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data);
+- else if (spare) {
+- if (dev->useNANDECC) {
+- ops.mode = MTD_OOB_AUTO;
+- ops.ooblen = 8; /* temp hack */
+- } else {
+- ops.mode = MTD_OOB_RAW;
+- ops.ooblen = YAFFS_BYTES_PER_SPARE;
+- }
+- ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
+- ops.datbuf = data;
+- ops.ooboffs = 0;
+- ops.oobbuf = spareAsBytes;
+- retval = mtd->read_oob(mtd, addr, &ops);
+- if (dev->useNANDECC)
+- translate_oob2spare(spare, spareAsBytes);
+- }
+-#else
+- __u8 *spareAsBytes = (__u8 *) spare;
+-
+- if (data && spare) {
+- if (dev->useNANDECC) {
+- /* Careful, this call adds 2 ints */
+- /* to the end of the spare data. Calling function */
+- /* should allocate enough memory for spare, */
+- /* i.e. [YAFFS_BYTES_PER_SPARE+2*sizeof(int)]. */
+- retval =
+- mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, spareAsBytes,
+- &yaffs_oobinfo);
+- } else {
+- retval =
+- mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
+- &dummy, data, spareAsBytes,
+- &yaffs_noeccinfo);
+- }
+- } else {
+- if (data)
+- retval =
+- mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
+- data);
+- if (spare)
+- retval =
+- mtd->read_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
+- &dummy, spareAsBytes);
+- }
+-#endif
+-
+- if (retval == 0)
+- return YAFFS_OK;
+- else
+- return YAFFS_FAIL;
+-}
+-
+-int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber)
++int nandmtd_EraseBlockInNAND(yaffs_dev_t *dev, int blockNumber)
+ {
+- struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ __u32 addr =
+- ((loff_t) blockNumber) * dev->nDataBytesPerChunk
+- * dev->nChunksPerBlock;
++ ((loff_t) blockNumber) * dev->param.total_bytes_per_chunk
++ * dev->param.chunks_per_block;
+ struct erase_info ei;
++
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+- ei.len = dev->nDataBytesPerChunk * dev->nChunksPerBlock;
++ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+- /* Todo finish off the ei if required */
+-
+- sema_init(&dev->sem, 0);
+-
+ retval = mtd->erase(mtd, &ei);
+
+ if (retval == 0)
+@@ -234,7 +49,7 @@ int nandmtd_EraseBlockInNAND(yaffs_Devic
+ return YAFFS_FAIL;
+ }
+
+-int nandmtd_InitialiseNAND(yaffs_Device *dev)
++int nandmtd_InitialiseNAND(yaffs_dev_t *dev)
+ {
+ return YAFFS_OK;
+ }
+--- a/fs/yaffs2/yaffs_mtdif.h
++++ b/fs/yaffs2/yaffs_mtdif.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -22,11 +22,6 @@
+ extern struct nand_oobinfo yaffs_oobinfo;
+ extern struct nand_oobinfo yaffs_noeccinfo;
+ #endif
+-
+-int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
+- const __u8 *data, const yaffs_Spare *spare);
+-int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
+- yaffs_Spare *spare);
+-int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber);
+-int nandmtd_InitialiseNAND(yaffs_Device *dev);
++int nandmtd_EraseBlockInNAND(yaffs_dev_t *dev, int blockNumber);
++int nandmtd_InitialiseNAND(yaffs_dev_t *dev);
+ #endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_nameval.c
+@@ -0,0 +1,197 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This simple implementation of a name-value store assumes a small number of values and fits
++ * into a small finite buffer.
++ *
++ * Each attribute is stored as a record:
++ * sizeof(int) bytes record size.
++ * strnlen+1 bytes name null terminated.
++ * nbytes value.
++ * ----------
++ * total size stored in record size
++ *
++ * This code has not been tested with unicode yet.
++ */
++
++
++#include "yaffs_nameval.h"
++
++#include "yportenv.h"
++
++static int nval_find(const char *xb, int xb_size, const YCHAR *name,
++ int *exist_size)
++{
++ int pos=0;
++ int size;
++
++ memcpy(&size,xb,sizeof(int));
++ while(size > 0 && (size < xb_size) && (pos + size < xb_size)){
++ if(yaffs_strncmp((YCHAR *)(xb+pos+sizeof(int)),name,size) == 0){
++ if(exist_size)
++ *exist_size = size;
++ return pos;
++ }
++ pos += size;
++ if(pos < xb_size -sizeof(int))
++ memcpy(&size,xb + pos,sizeof(int));
++ else
++ size = 0;
++ }
++ if(exist_size)
++ *exist_size = 0;
++ return -1;
++}
++
++static int nval_used(const char *xb, int xb_size)
++{
++ int pos=0;
++ int size;
++
++ memcpy(&size,xb + pos,sizeof(int));
++ while(size > 0 && (size < xb_size) && (pos + size < xb_size)){
++ pos += size;
++ if(pos < xb_size -sizeof(int))
++ memcpy(&size,xb + pos,sizeof(int));
++ else
++ size = 0;
++ }
++ return pos;
++}
++
++int nval_del(char *xb, int xb_size, const YCHAR *name)
++{
++ int pos = nval_find(xb, xb_size, name, NULL);
++ int size;
++
++ if(pos >= 0 && pos < xb_size){
++ /* Find size, shift rest over this record, then zero out the rest of buffer */
++ memcpy(&size,xb+pos,sizeof(int));
++ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
++ memset(xb + (xb_size - size),0,size);
++ return 0;
++ } else
++ return -ENODATA;
++}
++
++int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf, int bsize, int flags)
++{
++ int pos;
++ int namelen = yaffs_strnlen(name,xb_size);
++ int reclen;
++ int size_exist = 0;
++ int space;
++ int start;
++
++ pos = nval_find(xb,xb_size,name, &size_exist);
++
++ if(flags & XATTR_CREATE && pos >= 0)
++ return -EEXIST;
++ if(flags & XATTR_REPLACE && pos < 0)
++ return -ENODATA;
++
++ start = nval_used(xb,xb_size);
++ space = xb_size - start + size_exist;
++
++ reclen = (sizeof(int) + namelen + 1 + bsize);
++
++ if(reclen > space)
++ return -ENOSPC;
++
++ if(pos >= 0){
++ nval_del(xb,xb_size,name);
++ start = nval_used(xb, xb_size);
++ }
++
++ pos = start;
++
++ memcpy(xb + pos,&reclen,sizeof(int));
++ pos +=sizeof(int);
++ yaffs_strncpy((YCHAR *)(xb + pos), name, reclen);
++ pos+= (namelen+1);
++ memcpy(xb + pos,buf,bsize);
++ return 0;
++}
++
++int nval_get(const char *xb, int xb_size, const YCHAR *name, char *buf, int bsize)
++{
++ int pos = nval_find(xb,xb_size,name,NULL);
++ int size;
++
++ if(pos >= 0 && pos< xb_size){
++
++ memcpy(&size,xb +pos,sizeof(int));
++ pos+=sizeof(int); /* advance past record length */
++ size -= sizeof(int);
++
++ /* Advance over name string */
++ while(xb[pos] && size > 0 && pos < xb_size){
++ pos++;
++ size--;
++ }
++ /*Advance over NUL */
++ pos++;
++ size--;
++
++ if(size <= bsize){
++ memcpy(buf,xb + pos,size);
++ return size;
++ }
++
++ }
++ if(pos >= 0)
++ return -ERANGE;
++ else
++ return -ENODATA;
++}
++
++int nval_list(const char *xb, int xb_size, char *buf, int bsize)
++{
++ int pos = 0;
++ int size;
++ int name_len;
++ int ncopied = 0;
++ int filled = 0;
++
++ memcpy(&size,xb + pos,sizeof(int));
++ while(size > sizeof(int) && size <= xb_size && (pos + size) < xb_size && !filled){
++ pos+= sizeof(int);
++ size-=sizeof(int);
++ name_len = yaffs_strnlen((YCHAR *)(xb + pos), size);
++ if(ncopied + name_len + 1 < bsize){
++ memcpy(buf,xb+pos,name_len * sizeof(YCHAR));
++ buf+= name_len;
++ *buf = '\0';
++ buf++;
++ if(sizeof(YCHAR) > 1){
++ *buf = '\0';
++ buf++;
++ }
++ ncopied += (name_len+1);
++ } else
++ filled = 1;
++ pos+=size;
++ if(pos < xb_size -sizeof(int))
++ memcpy(&size,xb + pos,sizeof(int));
++ else
++ size = 0;
++ }
++ return ncopied;
++}
++
++
++int nval_hasvalues(const char *xb, int xb_size)
++{
++ return nval_used(xb, xb_size) > 0;
++}
+--- /dev/null
++++ b/fs/yaffs2/yaffs_nameval.h
+@@ -0,0 +1,25 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++#ifndef __NAMEVAL_H__
++#define __NAMEVAL_H__
++
++#include "yportenv.h"
++
++int nval_del(char *xb, int xb_size, const YCHAR *name);
++int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf, int bsize, int flags);
++int nval_get(const char *xb, int xb_size, const YCHAR *name, char *buf, int bsize);
++int nval_list(const char *xb, int xb_size, char *buf, int bsize);
++int nval_hasvalues(const char *xb, int xb_size);
++#endif
+--- a/fs/yaffs2/yaffs_nand.c
++++ b/fs/yaffs2/yaffs_nand.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -11,124 +11,129 @@
+ * published by the Free Software Foundation.
+ */
+
+-const char *yaffs_nand_c_version =
+- "$Id: yaffs_nand.c,v 1.10 2009-03-06 17:20:54 wookey Exp $";
+-
+ #include "yaffs_nand.h"
+ #include "yaffs_tagscompat.h"
+ #include "yaffs_tagsvalidity.h"
+
+ #include "yaffs_getblockinfo.h"
+
+-int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++int yaffs_rd_chunk_tags_nand(yaffs_dev_t *dev, int nand_chunk,
+ __u8 *buffer,
+- yaffs_ExtendedTags *tags)
++ yaffs_ext_tags *tags)
+ {
+ int result;
+- yaffs_ExtendedTags localTags;
++ yaffs_ext_tags localTags;
++
++ int realignedChunkInNAND = nand_chunk - dev->chunk_offset;
+
+- int realignedChunkInNAND = chunkInNAND - dev->chunkOffset;
++ dev->n_page_reads++;
+
+ /* If there are no tags provided, use local tags to get prioritised gc working */
+ if (!tags)
+ tags = &localTags;
+
+- if (dev->readChunkWithTagsFromNAND)
+- result = dev->readChunkWithTagsFromNAND(dev, realignedChunkInNAND, buffer,
++ if (dev->param.read_chunk_tags_fn)
++ result = dev->param.read_chunk_tags_fn(dev, realignedChunkInNAND, buffer,
+ tags);
+ else
+- result = yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(dev,
++ result = yaffs_tags_compat_rd(dev,
+ realignedChunkInNAND,
+ buffer,
+ tags);
+ if (tags &&
+- tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR) {
++ tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+- yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->nChunksPerBlock);
+- yaffs_HandleChunkError(dev, bi);
++ yaffs_block_info_t *bi;
++ bi = yaffs_get_block_info(dev, nand_chunk/dev->param.chunks_per_block);
++ yaffs_handle_chunk_error(dev, bi);
+ }
+
+ return result;
+ }
+
+-int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_wr_chunk_tags_nand(yaffs_dev_t *dev,
++ int nand_chunk,
+ const __u8 *buffer,
+- yaffs_ExtendedTags *tags)
++ yaffs_ext_tags *tags)
+ {
+- chunkInNAND -= dev->chunkOffset;
++
++ dev->n_page_writes++;
++
++ nand_chunk -= dev->chunk_offset;
+
+
+ if (tags) {
+- tags->sequenceNumber = dev->sequenceNumber;
+- tags->chunkUsed = 1;
+- if (!yaffs_ValidateTags(tags)) {
++ tags->seq_number = dev->seq_number;
++ tags->chunk_used = 1;
++ if (!yaffs_validate_tags(tags)) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("Writing uninitialised tags" TENDSTR)));
+ YBUG();
+ }
+ T(YAFFS_TRACE_WRITE,
+- (TSTR("Writing chunk %d tags %d %d" TENDSTR), chunkInNAND,
+- tags->objectId, tags->chunkId));
++ (TSTR("Writing chunk %d tags %d %d" TENDSTR), nand_chunk,
++ tags->obj_id, tags->chunk_id));
+ } else {
+ T(YAFFS_TRACE_ERROR, (TSTR("Writing with no tags" TENDSTR)));
+ YBUG();
+ }
+
+- if (dev->writeChunkWithTagsToNAND)
+- return dev->writeChunkWithTagsToNAND(dev, chunkInNAND, buffer,
++ if (dev->param.write_chunk_tags_fn)
++ return dev->param.write_chunk_tags_fn(dev, nand_chunk, buffer,
+ tags);
+ else
+- return yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(dev,
+- chunkInNAND,
++ return yaffs_tags_compat_wr(dev,
++ nand_chunk,
+ buffer,
+ tags);
+ }
+
+-int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo)
++int yaffs_mark_bad(yaffs_dev_t *dev, int block_no)
+ {
+- blockNo -= dev->blockOffset;
++ block_no -= dev->block_offset;
++
+
+-;
+- if (dev->markNANDBlockBad)
+- return dev->markNANDBlockBad(dev, blockNo);
++ if (dev->param.bad_block_fn)
++ return dev->param.bad_block_fn(dev, block_no);
+ else
+- return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo);
++ return yaffs_tags_compat_mark_bad(dev, block_no);
+ }
+
+-int yaffs_QueryInitialBlockState(yaffs_Device *dev,
+- int blockNo,
+- yaffs_BlockState *state,
+- __u32 *sequenceNumber)
++int yaffs_query_init_block_state(yaffs_dev_t *dev,
++ int block_no,
++ yaffs_block_state_t *state,
++ __u32 *seq_number)
+ {
+- blockNo -= dev->blockOffset;
++ block_no -= dev->block_offset;
+
+- if (dev->queryNANDBlock)
+- return dev->queryNANDBlock(dev, blockNo, state, sequenceNumber);
++ if (dev->param.query_block_fn)
++ return dev->param.query_block_fn(dev, block_no, state, seq_number);
+ else
+- return yaffs_TagsCompatabilityQueryNANDBlock(dev, blockNo,
++ return yaffs_tags_compat_query_block(dev, block_no,
+ state,
+- sequenceNumber);
++ seq_number);
+ }
+
+
+-int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+- int blockInNAND)
++int yaffs_erase_block(struct yaffs_dev_s *dev,
++ int flash_block)
+ {
+ int result;
+
+- blockInNAND -= dev->blockOffset;
++ flash_block -= dev->block_offset;
+
++ dev->n_erasures++;
+
+- dev->nBlockErasures++;
+- result = dev->eraseBlockInNAND(dev, blockInNAND);
++ result = dev->param.erase_fn(dev, flash_block);
+
+ return result;
+ }
+
+-int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev)
++int yaffs_init_nand(struct yaffs_dev_s *dev)
+ {
+- return dev->initialiseNAND(dev);
++ if(dev->param.initialise_flash_fn)
++ return dev->param.initialise_flash_fn(dev);
++ return YAFFS_OK;
+ }
+
+
+--- a/fs/yaffs2/yaffs_nandemul2k.h
++++ b/fs/yaffs2/yaffs_nandemul2k.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -20,18 +20,18 @@
+
+ #include "yaffs_guts.h"
+
+-int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+-int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, __u8 *data,
+- yaffs_ExtendedTags *tags);
+-int nandemul2k_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
+-int nandemul2k_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
+- yaffs_BlockState *state, __u32 *sequenceNumber);
+-int nandemul2k_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+- int blockInNAND);
+-int nandemul2k_InitialiseNAND(struct yaffs_DeviceStruct *dev);
++int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_dev_s *dev,
++ int nand_chunk, const __u8 *data,
++ const yaffs_ext_tags *tags);
++int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_dev_s *dev,
++ int nand_chunk, __u8 *data,
++ yaffs_ext_tags *tags);
++int nandemul2k_MarkNANDBlockBad(struct yaffs_dev_s *dev, int block_no);
++int nandemul2k_QueryNANDBlock(struct yaffs_dev_s *dev, int block_no,
++ yaffs_block_state_t *state, __u32 *seq_number);
++int nandemul2k_EraseBlockInNAND(struct yaffs_dev_s *dev,
++ int flash_block);
++int nandemul2k_InitialiseNAND(struct yaffs_dev_s *dev);
+ int nandemul2k_GetBytesPerChunk(void);
+ int nandemul2k_GetChunksPerBlock(void);
+ int nandemul2k_GetNumberOfBlocks(void);
+--- a/fs/yaffs2/yaffs_nand.h
++++ b/fs/yaffs2/yaffs_nand.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -19,26 +19,26 @@
+
+
+
+-int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++int yaffs_rd_chunk_tags_nand(yaffs_dev_t *dev, int nand_chunk,
+ __u8 *buffer,
+- yaffs_ExtendedTags *tags);
++ yaffs_ext_tags *tags);
+
+-int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_wr_chunk_tags_nand(yaffs_dev_t *dev,
++ int nand_chunk,
+ const __u8 *buffer,
+- yaffs_ExtendedTags *tags);
++ yaffs_ext_tags *tags);
+
+-int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo);
++int yaffs_mark_bad(yaffs_dev_t *dev, int block_no);
+
+-int yaffs_QueryInitialBlockState(yaffs_Device *dev,
+- int blockNo,
+- yaffs_BlockState *state,
+- unsigned *sequenceNumber);
++int yaffs_query_init_block_state(yaffs_dev_t *dev,
++ int block_no,
++ yaffs_block_state_t *state,
++ unsigned *seq_number);
+
+-int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
+- int blockInNAND);
++int yaffs_erase_block(struct yaffs_dev_s *dev,
++ int flash_block);
+
+-int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev);
++int yaffs_init_nand(struct yaffs_dev_s *dev);
+
+ #endif
+
+--- a/fs/yaffs2/yaffs_packedtags1.c
++++ b/fs/yaffs2/yaffs_packedtags1.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -14,37 +14,37 @@
+ #include "yaffs_packedtags1.h"
+ #include "yportenv.h"
+
+-void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t)
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ext_tags *t)
+ {
+- pt->chunkId = t->chunkId;
+- pt->serialNumber = t->serialNumber;
+- pt->byteCount = t->byteCount;
+- pt->objectId = t->objectId;
++ pt->chunk_id = t->chunk_id;
++ pt->serial_number = t->serial_number;
++ pt->n_bytes = t->n_bytes;
++ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+- pt->deleted = (t->chunkDeleted) ? 0 : 1;
++ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unusedStuff = 0;
+ pt->shouldBeFF = 0xFFFFFFFF;
+
+ }
+
+-void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt)
++void yaffs_unpack_tags1(yaffs_ext_tags *t, const yaffs_PackedTags1 *pt)
+ {
+ static const __u8 allFF[] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff };
+
+ if (memcmp(allFF, pt, sizeof(yaffs_PackedTags1))) {
+- t->blockBad = 0;
++ t->block_bad = 0;
+ if (pt->shouldBeFF != 0xFFFFFFFF)
+- t->blockBad = 1;
+- t->chunkUsed = 1;
+- t->objectId = pt->objectId;
+- t->chunkId = pt->chunkId;
+- t->byteCount = pt->byteCount;
+- t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
+- t->chunkDeleted = (pt->deleted) ? 0 : 1;
+- t->serialNumber = pt->serialNumber;
++ t->block_bad = 1;
++ t->chunk_used = 1;
++ t->obj_id = pt->obj_id;
++ t->chunk_id = pt->chunk_id;
++ t->n_bytes = pt->n_bytes;
++ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++ t->is_deleted = (pt->deleted) ? 0 : 1;
++ t->serial_number = pt->serial_number;
+ } else {
+- memset(t, 0, sizeof(yaffs_ExtendedTags));
++ memset(t, 0, sizeof(yaffs_ext_tags));
+ }
+ }
+--- a/fs/yaffs2/yaffs_packedtags1.h
++++ b/fs/yaffs2/yaffs_packedtags1.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -21,10 +21,10 @@
+ #include "yaffs_guts.h"
+
+ typedef struct {
+- unsigned chunkId:20;
+- unsigned serialNumber:2;
+- unsigned byteCount:10;
+- unsigned objectId:18;
++ unsigned chunk_id:20;
++ unsigned serial_number:2;
++ unsigned n_bytes:10;
++ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unusedStuff:1;
+@@ -32,6 +32,6 @@ typedef struct {
+
+ } yaffs_PackedTags1;
+
+-void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t);
+-void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt);
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ext_tags *t);
++void yaffs_unpack_tags1(yaffs_ext_tags *t, const yaffs_PackedTags1 *pt);
+ #endif
+--- a/fs/yaffs2/yaffs_packedtags2.c
++++ b/fs/yaffs2/yaffs_packedtags2.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -13,6 +13,7 @@
+
+ #include "yaffs_packedtags2.h"
+ #include "yportenv.h"
++#include "yaffs_trace.h"
+ #include "yaffs_tagsvalidity.h"
+
+ /* This code packs a set of extended tags into a binary structure for
+@@ -24,7 +25,7 @@
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+-/* Extra flags applied to chunkId */
++/* Extra flags applied to chunk_id */
+
+ #define EXTRA_HEADER_INFO_FLAG 0x80000000
+ #define EXTRA_SHRINK_FLAG 0x40000000
+@@ -42,53 +43,53 @@ static void yaffs_DumpPackedTags2TagsPar
+ {
+ T(YAFFS_TRACE_MTD,
+ (TSTR("packed tags obj %d chunk %d byte %d seq %d" TENDSTR),
+- ptt->objectId, ptt->chunkId, ptt->byteCount,
+- ptt->sequenceNumber));
++ ptt->obj_id, ptt->chunk_id, ptt->n_bytes,
++ ptt->seq_number));
+ }
+ static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 *pt)
+ {
+ yaffs_DumpPackedTags2TagsPart(&pt->t);
+ }
+
+-static void yaffs_DumpTags2(const yaffs_ExtendedTags *t)
++static void yaffs_DumpTags2(const yaffs_ext_tags *t)
+ {
+ T(YAFFS_TRACE_MTD,
+ (TSTR
+ ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d"
+- TENDSTR), t->eccResult, t->blockBad, t->chunkUsed, t->objectId,
+- t->chunkId, t->byteCount, t->chunkDeleted, t->serialNumber,
+- t->sequenceNumber));
++ TENDSTR), t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
++ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
++ t->seq_number));
+
+ }
+
+ void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *ptt,
+- const yaffs_ExtendedTags *t)
++ const yaffs_ext_tags *t)
+ {
+- ptt->chunkId = t->chunkId;
+- ptt->sequenceNumber = t->sequenceNumber;
+- ptt->byteCount = t->byteCount;
+- ptt->objectId = t->objectId;
++ ptt->chunk_id = t->chunk_id;
++ ptt->seq_number = t->seq_number;
++ ptt->n_bytes = t->n_bytes;
++ ptt->obj_id = t->obj_id;
+
+- if (t->chunkId == 0 && t->extraHeaderInfoAvailable) {
++ if (t->chunk_id == 0 && t->extra_available) {
+ /* Store the extra header info instead */
+- /* We save the parent object in the chunkId */
+- ptt->chunkId = EXTRA_HEADER_INFO_FLAG
+- | t->extraParentObjectId;
+- if (t->extraIsShrinkHeader)
+- ptt->chunkId |= EXTRA_SHRINK_FLAG;
+- if (t->extraShadows)
+- ptt->chunkId |= EXTRA_SHADOWS_FLAG;
+-
+- ptt->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
+- ptt->objectId |=
+- (t->extraObjectType << EXTRA_OBJECT_TYPE_SHIFT);
+-
+- if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
+- ptt->byteCount = t->extraEquivalentObjectId;
+- else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE)
+- ptt->byteCount = t->extraFileLength;
++ /* We save the parent object in the chunk_id */
++ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG
++ | t->extra_parent_id;
++ if (t->extra_is_shrink)
++ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
++ if (t->extra_shadows)
++ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
++
++ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
++ ptt->obj_id |=
++ (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
++
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ ptt->n_bytes = t->extra_equiv_id;
++ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
++ ptt->n_bytes = t->extra_length;
+ else
+- ptt->byteCount = 0;
++ ptt->n_bytes = 0;
+ }
+
+ yaffs_DumpPackedTags2TagsPart(ptt);
+@@ -96,59 +97,56 @@ void yaffs_PackTags2TagsPart(yaffs_Packe
+ }
+
+
+-void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t)
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ext_tags *t, int tagsECC)
+ {
+ yaffs_PackTags2TagsPart(&pt->t, t);
+
+-#ifndef YAFFS_IGNORE_TAGS_ECC
+- {
+- yaffs_ECCCalculateOther((unsigned char *)&pt->t,
++ if(tagsECC)
++ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(yaffs_PackedTags2TagsPart),
+ &pt->ecc);
+- }
+-#endif
+ }
+
+
+-void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t,
++void yaffs_unpack_tags2tags_part(yaffs_ext_tags *t,
+ yaffs_PackedTags2TagsPart *ptt)
+ {
+
+- memset(t, 0, sizeof(yaffs_ExtendedTags));
++ memset(t, 0, sizeof(yaffs_ext_tags));
+
+- yaffs_InitialiseTags(t);
++ yaffs_init_tags(t);
+
+- if (ptt->sequenceNumber != 0xFFFFFFFF) {
+- t->blockBad = 0;
+- t->chunkUsed = 1;
+- t->objectId = ptt->objectId;
+- t->chunkId = ptt->chunkId;
+- t->byteCount = ptt->byteCount;
+- t->chunkDeleted = 0;
+- t->serialNumber = 0;
+- t->sequenceNumber = ptt->sequenceNumber;
++ if (ptt->seq_number != 0xFFFFFFFF) {
++ t->block_bad = 0;
++ t->chunk_used = 1;
++ t->obj_id = ptt->obj_id;
++ t->chunk_id = ptt->chunk_id;
++ t->n_bytes = ptt->n_bytes;
++ t->is_deleted = 0;
++ t->serial_number = 0;
++ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+
+- if (ptt->chunkId & EXTRA_HEADER_INFO_FLAG) {
+- t->chunkId = 0;
+- t->byteCount = 0;
+-
+- t->extraHeaderInfoAvailable = 1;
+- t->extraParentObjectId =
+- ptt->chunkId & (~(ALL_EXTRA_FLAGS));
+- t->extraIsShrinkHeader =
+- (ptt->chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
+- t->extraShadows =
+- (ptt->chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+- t->extraObjectType =
+- ptt->objectId >> EXTRA_OBJECT_TYPE_SHIFT;
+- t->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
++ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
++ t->chunk_id = 0;
++ t->n_bytes = 0;
++
++ t->extra_available = 1;
++ t->extra_parent_id =
++ ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
++ t->extra_is_shrink =
++ (ptt->chunk_id & EXTRA_SHRINK_FLAG) ? 1 : 0;
++ t->extra_shadows =
++ (ptt->chunk_id & EXTRA_SHADOWS_FLAG) ? 1 : 0;
++ t->extra_obj_type =
++ ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
++ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+- if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
+- t->extraEquivalentObjectId = ptt->byteCount;
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ t->extra_equiv_id = ptt->n_bytes;
+ else
+- t->extraFileLength = ptt->byteCount;
++ t->extra_length = ptt->n_bytes;
+ }
+ }
+
+@@ -158,49 +156,43 @@ void yaffs_UnpackTags2TagsPart(yaffs_Ext
+ }
+
+
+-void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt)
++void yaffs_unpack_tags2(yaffs_ext_tags *t, yaffs_PackedTags2 *pt, int tagsECC)
+ {
+
+- yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+- if (pt->t.sequenceNumber != 0xFFFFFFFF) {
+- /* Page is in use */
+-#ifndef YAFFS_IGNORE_TAGS_ECC
+- {
+- yaffs_ECCOther ecc;
+- int result;
+- yaffs_ECCCalculateOther((unsigned char *)&pt->t,
+- sizeof
+- (yaffs_PackedTags2TagsPart),
+- &ecc);
+- result =
+- yaffs_ECCCorrectOther((unsigned char *)&pt->t,
+- sizeof
+- (yaffs_PackedTags2TagsPart),
+- &pt->ecc, &ecc);
+- switch (result) {
++ if (pt->t.seq_number != 0xFFFFFFFF &&
++ tagsECC){
++ /* Chunk is in use and we need to do ECC */
++
++ yaffs_ECCOther ecc;
++ int result;
++ yaffs_ecc_calc_other((unsigned char *)&pt->t,
++ sizeof(yaffs_PackedTags2TagsPart),
++ &ecc);
++ result = yaffs_ecc_correct_other((unsigned char *)&pt->t,
++ sizeof(yaffs_PackedTags2TagsPart),
++ &pt->ecc, &ecc);
++ switch (result) {
+ case 0:
+- eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+- eccResult = YAFFS_ECC_RESULT_FIXED;
++ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+- eccResult = YAFFS_ECC_RESULT_UNFIXED;
++ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+- eccResult = YAFFS_ECC_RESULT_UNKNOWN;
+- }
++ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+-#endif
+ }
+
+- yaffs_UnpackTags2TagsPart(t, &pt->t);
++ yaffs_unpack_tags2tags_part(t, &pt->t);
+
+- t->eccResult = eccResult;
++ t->ecc_result = ecc_result;
+
+ yaffs_DumpPackedTags2(pt);
+ yaffs_DumpTags2(t);
+-
+ }
+
+--- a/fs/yaffs2/yaffs_packedtags2.h
++++ b/fs/yaffs2/yaffs_packedtags2.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -22,10 +22,10 @@
+ #include "yaffs_ecc.h"
+
+ typedef struct {
+- unsigned sequenceNumber;
+- unsigned objectId;
+- unsigned chunkId;
+- unsigned byteCount;
++ unsigned seq_number;
++ unsigned obj_id;
++ unsigned chunk_id;
++ unsigned n_bytes;
+ } yaffs_PackedTags2TagsPart;
+
+ typedef struct {
+@@ -34,10 +34,10 @@ typedef struct {
+ } yaffs_PackedTags2;
+
+ /* Full packed tags with ECC, used for oob tags */
+-void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t);
+-void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt);
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ext_tags *t, int tagsECC);
++void yaffs_unpack_tags2(yaffs_ext_tags *t, yaffs_PackedTags2 *pt, int tagsECC);
+
+ /* Only the tags part (no ECC for use with inband tags */
+-void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *pt, const yaffs_ExtendedTags *t);
+-void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t, yaffs_PackedTags2TagsPart *pt);
++void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *pt, const yaffs_ext_tags *t);
++void yaffs_unpack_tags2tags_part(yaffs_ext_tags *t, yaffs_PackedTags2TagsPart *pt);
+ #endif
+--- a/fs/yaffs2/yaffs_qsort.h
++++ b/fs/yaffs2/yaffs_qsort.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -17,7 +17,18 @@
+ #ifndef __YAFFS_QSORT_H__
+ #define __YAFFS_QSORT_H__
+
++#ifdef __KERNEL__
++#include <linux/sort.h>
++
++extern void yaffs_qsort(void *const base, size_t total_elems, size_t size,
++ int (*cmp)(const void *, const void *)){
++ sort(base, total_elems, size, cmp, NULL);
++}
++
++#else
++
+ extern void yaffs_qsort(void *const base, size_t total_elems, size_t size,
+ int (*cmp)(const void *, const void *));
+
+ #endif
++#endif
+--- a/fs/yaffs2/yaffs_tagscompat.c
++++ b/fs/yaffs2/yaffs_tagscompat.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -15,19 +15,20 @@
+ #include "yaffs_tagscompat.h"
+ #include "yaffs_ecc.h"
+ #include "yaffs_getblockinfo.h"
++#include "yaffs_trace.h"
+
+-static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND);
++static void yaffs_handle_rd_data_error(yaffs_dev_t *dev, int nand_chunk);
+ #ifdef NOTYET
+-static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND);
+-static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++static void yaffs_check_written_block(yaffs_dev_t *dev, int nand_chunk);
++static void yaffs_handle_chunk_wr_ok(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_Spare *spare);
+-static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
+- const yaffs_Spare *spare);
+-static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND);
++ const yaffs_spare *spare);
++static void yaffs_handle_chunk_update(yaffs_dev_t *dev, int nand_chunk,
++ const yaffs_spare *spare);
++static void yaffs_handle_chunk_wr_error(yaffs_dev_t *dev, int nand_chunk);
+ #endif
+
+-static const char yaffs_countBitsTable[256] = {
++static const char yaffs_count_bits_table[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+@@ -46,26 +47,26 @@ static const char yaffs_countBitsTable[2
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
+ };
+
+-int yaffs_CountBits(__u8 x)
++int yaffs_count_bits(__u8 x)
+ {
+ int retVal;
+- retVal = yaffs_countBitsTable[x];
++ retVal = yaffs_count_bits_table[x];
+ return retVal;
+ }
+
+ /********** Tags ECC calculations *********/
+
+-void yaffs_CalcECC(const __u8 *data, yaffs_Spare *spare)
++void yaffs_calc_ecc(const __u8 *data, yaffs_spare *spare)
+ {
+- yaffs_ECCCalculate(data, spare->ecc1);
+- yaffs_ECCCalculate(&data[256], spare->ecc2);
++ yaffs_ecc_cacl(data, spare->ecc1);
++ yaffs_ecc_cacl(&data[256], spare->ecc2);
+ }
+
+-void yaffs_CalcTagsECC(yaffs_Tags *tags)
++void yaffs_calc_tags_ecc(yaffs_tags_t *tags)
+ {
+ /* Calculate an ecc */
+
+- unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
++ unsigned char *b = ((yaffs_tags_union_t *) tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+@@ -84,24 +85,24 @@ void yaffs_CalcTagsECC(yaffs_Tags *tags)
+
+ }
+
+-int yaffs_CheckECCOnTags(yaffs_Tags *tags)
++int yaffs_check_tags_ecc(yaffs_tags_t *tags)
+ {
+ unsigned ecc = tags->ecc;
+
+- yaffs_CalcTagsECC(tags);
++ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+- unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
++ unsigned char *b = ((yaffs_tags_union_t *) tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+- yaffs_CalcTagsECC(tags);
++ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+@@ -115,76 +116,73 @@ int yaffs_CheckECCOnTags(yaffs_Tags *tag
+
+ /********** Tags **********/
+
+-static void yaffs_LoadTagsIntoSpare(yaffs_Spare *sparePtr,
+- yaffs_Tags *tagsPtr)
++static void yaffs_load_tags_to_spare(yaffs_spare *sparePtr,
++ yaffs_tags_t *tagsPtr)
+ {
+- yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
++ yaffs_tags_union_t *tu = (yaffs_tags_union_t *) tagsPtr;
+
+- yaffs_CalcTagsECC(tagsPtr);
++ yaffs_calc_tags_ecc(tagsPtr);
+
+- sparePtr->tagByte0 = tu->asBytes[0];
+- sparePtr->tagByte1 = tu->asBytes[1];
+- sparePtr->tagByte2 = tu->asBytes[2];
+- sparePtr->tagByte3 = tu->asBytes[3];
+- sparePtr->tagByte4 = tu->asBytes[4];
+- sparePtr->tagByte5 = tu->asBytes[5];
+- sparePtr->tagByte6 = tu->asBytes[6];
+- sparePtr->tagByte7 = tu->asBytes[7];
++ sparePtr->tb0 = tu->as_bytes[0];
++ sparePtr->tb1 = tu->as_bytes[1];
++ sparePtr->tb2 = tu->as_bytes[2];
++ sparePtr->tb3 = tu->as_bytes[3];
++ sparePtr->tb4 = tu->as_bytes[4];
++ sparePtr->tb5 = tu->as_bytes[5];
++ sparePtr->tb6 = tu->as_bytes[6];
++ sparePtr->tb7 = tu->as_bytes[7];
+ }
+
+-static void yaffs_GetTagsFromSpare(yaffs_Device *dev, yaffs_Spare *sparePtr,
+- yaffs_Tags *tagsPtr)
++static void yaffs_get_tags_from_spare(yaffs_dev_t *dev, yaffs_spare *sparePtr,
++ yaffs_tags_t *tagsPtr)
+ {
+- yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
++ yaffs_tags_union_t *tu = (yaffs_tags_union_t *) tagsPtr;
+ int result;
+
+- tu->asBytes[0] = sparePtr->tagByte0;
+- tu->asBytes[1] = sparePtr->tagByte1;
+- tu->asBytes[2] = sparePtr->tagByte2;
+- tu->asBytes[3] = sparePtr->tagByte3;
+- tu->asBytes[4] = sparePtr->tagByte4;
+- tu->asBytes[5] = sparePtr->tagByte5;
+- tu->asBytes[6] = sparePtr->tagByte6;
+- tu->asBytes[7] = sparePtr->tagByte7;
++ tu->as_bytes[0] = sparePtr->tb0;
++ tu->as_bytes[1] = sparePtr->tb1;
++ tu->as_bytes[2] = sparePtr->tb2;
++ tu->as_bytes[3] = sparePtr->tb3;
++ tu->as_bytes[4] = sparePtr->tb4;
++ tu->as_bytes[5] = sparePtr->tb5;
++ tu->as_bytes[6] = sparePtr->tb6;
++ tu->as_bytes[7] = sparePtr->tb7;
+
+- result = yaffs_CheckECCOnTags(tagsPtr);
++ result = yaffs_check_tags_ecc(tagsPtr);
+ if (result > 0)
+- dev->tagsEccFixed++;
++ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+- dev->tagsEccUnfixed++;
++ dev->n_tags_ecc_unfixed++;
+ }
+
+-static void yaffs_SpareInitialise(yaffs_Spare *spare)
++static void yaffs_spare_init(yaffs_spare *spare)
+ {
+- memset(spare, 0xFF, sizeof(yaffs_Spare));
++ memset(spare, 0xFF, sizeof(yaffs_spare));
+ }
+
+-static int yaffs_WriteChunkToNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND, const __u8 *data,
+- yaffs_Spare *spare)
++static int yaffs_wr_nand(struct yaffs_dev_s *dev,
++ int nand_chunk, const __u8 *data,
++ yaffs_spare *spare)
+ {
+- if (chunkInNAND < dev->startBlock * dev->nChunksPerBlock) {
++ if (nand_chunk < dev->param.start_block * dev->param.chunks_per_block) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR("**>> yaffs chunk %d is not valid" TENDSTR),
+- chunkInNAND));
++ nand_chunk));
+ return YAFFS_FAIL;
+ }
+
+- dev->nPageWrites++;
+- return dev->writeChunkToNAND(dev, chunkInNAND, data, spare);
++ return dev->param.write_chunk_fn(dev, nand_chunk, data, spare);
+ }
+
+-static int yaffs_ReadChunkFromNAND(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND,
++static int yaffs_rd_chunk_nand(struct yaffs_dev_s *dev,
++ int nand_chunk,
+ __u8 *data,
+- yaffs_Spare *spare,
+- yaffs_ECCResult *eccResult,
++ yaffs_spare *spare,
++ yaffs_ecc_result *ecc_result,
+ int doErrorCorrection)
+ {
+ int retVal;
+- yaffs_Spare localSpare;
+-
+- dev->nPageReads++;
++ yaffs_spare localSpare;
+
+ if (!spare && data) {
+ /* If we don't have a real spare, then we use a local one. */
+@@ -192,107 +190,107 @@ static int yaffs_ReadChunkFromNAND(struc
+ spare = &localSpare;
+ }
+
+- if (!dev->useNANDECC) {
+- retVal = dev->readChunkFromNAND(dev, chunkInNAND, data, spare);
++ if (!dev->param.use_nand_ecc) {
++ retVal = dev->param.read_chunk_fn(dev, nand_chunk, data, spare);
+ if (data && doErrorCorrection) {
+ /* Do ECC correction */
+ /* Todo handle any errors */
+- int eccResult1, eccResult2;
++ int ecc_result1, ecc_result2;
+ __u8 calcEcc[3];
+
+- yaffs_ECCCalculate(data, calcEcc);
+- eccResult1 =
+- yaffs_ECCCorrect(data, spare->ecc1, calcEcc);
+- yaffs_ECCCalculate(&data[256], calcEcc);
+- eccResult2 =
+- yaffs_ECCCorrect(&data[256], spare->ecc2, calcEcc);
++ yaffs_ecc_cacl(data, calcEcc);
++ ecc_result1 =
++ yaffs_ecc_correct(data, spare->ecc1, calcEcc);
++ yaffs_ecc_cacl(&data[256], calcEcc);
++ ecc_result2 =
++ yaffs_ecc_correct(&data[256], spare->ecc2, calcEcc);
+
+- if (eccResult1 > 0) {
++ if (ecc_result1 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error fix performed on chunk %d:0"
+- TENDSTR), chunkInNAND));
+- dev->eccFixed++;
+- } else if (eccResult1 < 0) {
++ TENDSTR), nand_chunk));
++ dev->n_ecc_fixed++;
++ } else if (ecc_result1 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error unfixed on chunk %d:0"
+- TENDSTR), chunkInNAND));
+- dev->eccUnfixed++;
++ TENDSTR), nand_chunk));
++ dev->n_ecc_unfixed++;
+ }
+
+- if (eccResult2 > 0) {
++ if (ecc_result2 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error fix performed on chunk %d:1"
+- TENDSTR), chunkInNAND));
+- dev->eccFixed++;
+- } else if (eccResult2 < 0) {
++ TENDSTR), nand_chunk));
++ dev->n_ecc_fixed++;
++ } else if (ecc_result2 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>yaffs ecc error unfixed on chunk %d:1"
+- TENDSTR), chunkInNAND));
+- dev->eccUnfixed++;
++ TENDSTR), nand_chunk));
++ dev->n_ecc_unfixed++;
+ }
+
+- if (eccResult1 || eccResult2) {
++ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+- yaffs_HandleReadDataError(dev, chunkInNAND);
++ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+- if (eccResult1 < 0 || eccResult2 < 0)
+- *eccResult = YAFFS_ECC_RESULT_UNFIXED;
+- else if (eccResult1 > 0 || eccResult2 > 0)
+- *eccResult = YAFFS_ECC_RESULT_FIXED;
++ if (ecc_result1 < 0 || ecc_result2 < 0)
++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ else if (ecc_result1 > 0 || ecc_result2 > 0)
++ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+- *eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+ } else {
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+- struct yaffs_NANDSpare nspare;
++ struct yaffs_nand_spare nspare;
+
+ memset(&nspare, 0, sizeof(nspare));
+
+- retVal = dev->readChunkFromNAND(dev, chunkInNAND, data,
+- (yaffs_Spare *) &nspare);
+- memcpy(spare, &nspare, sizeof(yaffs_Spare));
++ retVal = dev->param.read_chunk_fn(dev, nand_chunk, data,
++ (yaffs_spare *) &nspare);
++ memcpy(spare, &nspare, sizeof(yaffs_spare));
+ if (data && doErrorCorrection) {
+ if (nspare.eccres1 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error fix performed on chunk %d:0"
+- TENDSTR), chunkInNAND));
++ TENDSTR), nand_chunk));
+ } else if (nspare.eccres1 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error unfixed on chunk %d:0"
+- TENDSTR), chunkInNAND));
++ TENDSTR), nand_chunk));
+ }
+
+ if (nspare.eccres2 > 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error fix performed on chunk %d:1"
+- TENDSTR), chunkInNAND));
++ TENDSTR), nand_chunk));
+ } else if (nspare.eccres2 < 0) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>>mtd ecc error unfixed on chunk %d:1"
+- TENDSTR), chunkInNAND));
++ TENDSTR), nand_chunk));
+ }
+
+ if (nspare.eccres1 || nspare.eccres2) {
+ /* We had a data problem on this page */
+- yaffs_HandleReadDataError(dev, chunkInNAND);
++ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
+- *eccResult = YAFFS_ECC_RESULT_UNFIXED;
++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
+- *eccResult = YAFFS_ECC_RESULT_FIXED;
++ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+- *eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ }
+ }
+@@ -300,17 +298,17 @@ static int yaffs_ReadChunkFromNAND(struc
+ }
+
+ #ifdef NOTYET
+-static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
+- int chunkInNAND)
++static int yaffs_check_chunk_erased(struct yaffs_dev_s *dev,
++ int nand_chunk)
+ {
+ static int init;
+ static __u8 cmpbuf[YAFFS_BYTES_PER_CHUNK];
+ static __u8 data[YAFFS_BYTES_PER_CHUNK];
+ /* Might as well always allocate the larger size for */
+- /* dev->useNANDECC == true; */
+- static __u8 spare[sizeof(struct yaffs_NANDSpare)];
++ /* dev->param.use_nand_ecc == true; */
++ static __u8 spare[sizeof(struct yaffs_nand_spare)];
+
+- dev->readChunkFromNAND(dev, chunkInNAND, data, (yaffs_Spare *) spare);
++ dev->param.read_chunk_fn(dev, nand_chunk, data, (yaffs_spare *) spare);
+
+ if (!init) {
+ memset(cmpbuf, 0xff, YAFFS_BYTES_PER_CHUNK);
+@@ -331,14 +329,14 @@ static int yaffs_CheckChunkErased(struct
+ * Functions for robustisizing
+ */
+
+-static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND)
++static void yaffs_handle_rd_data_error(yaffs_dev_t *dev, int nand_chunk)
+ {
+- int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+- yaffs_GetBlockInfo(dev, blockInNAND + dev->blockOffset)->needsRetiring = 1;
++ yaffs_get_block_info(dev, flash_block + dev->block_offset)->needs_retiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+- (TSTR("**>>Block %d marked for retirement" TENDSTR), blockInNAND));
++ (TSTR("**>>Block %d marked for retirement" TENDSTR), flash_block));
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+@@ -348,44 +346,44 @@ static void yaffs_HandleReadDataError(ya
+ }
+
+ #ifdef NOTYET
+-static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND)
++static void yaffs_check_written_block(yaffs_dev_t *dev, int nand_chunk)
+ {
+ }
+
+-static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++static void yaffs_handle_chunk_wr_ok(yaffs_dev_t *dev, int nand_chunk,
+ const __u8 *data,
+- const yaffs_Spare *spare)
++ const yaffs_spare *spare)
+ {
+ }
+
+-static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
+- const yaffs_Spare *spare)
++static void yaffs_handle_chunk_update(yaffs_dev_t *dev, int nand_chunk,
++ const yaffs_spare *spare)
+ {
+ }
+
+-static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND)
++static void yaffs_handle_chunk_wr_error(yaffs_dev_t *dev, int nand_chunk)
+ {
+- int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+- yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
++ yaffs_get_block_info(dev, flash_block)->needs_retiring = 1;
+ /* Delete the chunk */
+- yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ }
+
+-static int yaffs_VerifyCompare(const __u8 *d0, const __u8 *d1,
+- const yaffs_Spare *s0, const yaffs_Spare *s1)
++static int yaffs_verify_cmp(const __u8 *d0, const __u8 *d1,
++ const yaffs_spare *s0, const yaffs_spare *s1)
+ {
+
+ if (memcmp(d0, d1, YAFFS_BYTES_PER_CHUNK) != 0 ||
+- s0->tagByte0 != s1->tagByte0 ||
+- s0->tagByte1 != s1->tagByte1 ||
+- s0->tagByte2 != s1->tagByte2 ||
+- s0->tagByte3 != s1->tagByte3 ||
+- s0->tagByte4 != s1->tagByte4 ||
+- s0->tagByte5 != s1->tagByte5 ||
+- s0->tagByte6 != s1->tagByte6 ||
+- s0->tagByte7 != s1->tagByte7 ||
++ s0->tb0 != s1->tb0 ||
++ s0->tb1 != s1->tb1 ||
++ s0->tb2 != s1->tb2 ||
++ s0->tb3 != s1->tb3 ||
++ s0->tb4 != s1->tb4 ||
++ s0->tb5 != s1->tb5 ||
++ s0->tb6 != s1->tb6 ||
++ s0->tb7 != s1->tb7 ||
+ s0->ecc1[0] != s1->ecc1[0] ||
+ s0->ecc1[1] != s1->ecc1[1] ||
+ s0->ecc1[2] != s1->ecc1[2] ||
+@@ -398,53 +396,53 @@ static int yaffs_VerifyCompare(const __u
+ }
+ #endif /* NOTYET */
+
+-int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_tags_compat_wr(yaffs_dev_t *dev,
++ int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *eTags)
++ const yaffs_ext_tags *eTags)
+ {
+- yaffs_Spare spare;
+- yaffs_Tags tags;
++ yaffs_spare spare;
++ yaffs_tags_t tags;
+
+- yaffs_SpareInitialise(&spare);
++ yaffs_spare_init(&spare);
+
+- if (eTags->chunkDeleted)
+- spare.pageStatus = 0;
++ if (eTags->is_deleted)
++ spare.page_status = 0;
+ else {
+- tags.objectId = eTags->objectId;
+- tags.chunkId = eTags->chunkId;
++ tags.obj_id = eTags->obj_id;
++ tags.chunk_id = eTags->chunk_id;
+
+- tags.byteCountLSB = eTags->byteCount & 0x3ff;
++ tags.n_bytes_lsb = eTags->n_bytes & 0x3ff;
+
+- if (dev->nDataBytesPerChunk >= 1024)
+- tags.byteCountMSB = (eTags->byteCount >> 10) & 3;
++ if (dev->data_bytes_per_chunk >= 1024)
++ tags.n_bytes_msb = (eTags->n_bytes >> 10) & 3;
+ else
+- tags.byteCountMSB = 3;
++ tags.n_bytes_msb = 3;
+
+
+- tags.serialNumber = eTags->serialNumber;
++ tags.serial_number = eTags->serial_number;
+
+- if (!dev->useNANDECC && data)
+- yaffs_CalcECC(data, &spare);
++ if (!dev->param.use_nand_ecc && data)
++ yaffs_calc_ecc(data, &spare);
+
+- yaffs_LoadTagsIntoSpare(&spare, &tags);
++ yaffs_load_tags_to_spare(&spare, &tags);
+
+ }
+
+- return yaffs_WriteChunkToNAND(dev, chunkInNAND, data, &spare);
++ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+ }
+
+-int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_tags_compat_rd(yaffs_dev_t *dev,
++ int nand_chunk,
+ __u8 *data,
+- yaffs_ExtendedTags *eTags)
++ yaffs_ext_tags *eTags)
+ {
+
+- yaffs_Spare spare;
+- yaffs_Tags tags;
+- yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_UNKNOWN;
++ yaffs_spare spare;
++ yaffs_tags_t tags;
++ yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+
+- static yaffs_Spare spareFF;
++ static yaffs_spare spareFF;
+ static int init;
+
+ if (!init) {
+@@ -452,33 +450,33 @@ int yaffs_TagsCompatabilityReadChunkWith
+ init = 1;
+ }
+
+- if (yaffs_ReadChunkFromNAND
+- (dev, chunkInNAND, data, &spare, &eccResult, 1)) {
++ if (yaffs_rd_chunk_nand
++ (dev, nand_chunk, data, &spare, &ecc_result, 1)) {
+ /* eTags may be NULL */
+ if (eTags) {
+
+ int deleted =
+- (yaffs_CountBits(spare.pageStatus) < 7) ? 1 : 0;
++ (yaffs_count_bits(spare.page_status) < 7) ? 1 : 0;
+
+- eTags->chunkDeleted = deleted;
+- eTags->eccResult = eccResult;
+- eTags->blockBad = 0; /* We're reading it */
++ eTags->is_deleted = deleted;
++ eTags->ecc_result = ecc_result;
++ eTags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+- eTags->chunkUsed =
++ eTags->chunk_used =
+ (memcmp(&spareFF, &spare, sizeof(spareFF)) !=
+ 0) ? 1 : 0;
+
+- if (eTags->chunkUsed) {
+- yaffs_GetTagsFromSpare(dev, &spare, &tags);
++ if (eTags->chunk_used) {
++ yaffs_get_tags_from_spare(dev, &spare, &tags);
+
+- eTags->objectId = tags.objectId;
+- eTags->chunkId = tags.chunkId;
+- eTags->byteCount = tags.byteCountLSB;
++ eTags->obj_id = tags.obj_id;
++ eTags->chunk_id = tags.chunk_id;
++ eTags->n_bytes = tags.n_bytes_lsb;
+
+- if (dev->nDataBytesPerChunk >= 1024)
+- eTags->byteCount |= (((unsigned) tags.byteCountMSB) << 10);
++ if (dev->data_bytes_per_chunk >= 1024)
++ eTags->n_bytes |= (((unsigned) tags.n_bytes_msb) << 10);
+
+- eTags->serialNumber = tags.serialNumber;
++ eTags->serial_number = tags.serial_number;
+ }
+ }
+
+@@ -488,49 +486,49 @@ int yaffs_TagsCompatabilityReadChunkWith
+ }
+ }
+
+-int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
+- int blockInNAND)
++int yaffs_tags_compat_mark_bad(struct yaffs_dev_s *dev,
++ int flash_block)
+ {
+
+- yaffs_Spare spare;
++ yaffs_spare spare;
+
+- memset(&spare, 0xff, sizeof(yaffs_Spare));
++ memset(&spare, 0xff, sizeof(yaffs_spare));
+
+- spare.blockStatus = 'Y';
++ spare.block_status = 'Y';
+
+- yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock, NULL,
++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+- yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock + 1,
++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+
+ }
+
+-int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+- int blockNo,
+- yaffs_BlockState *state,
+- __u32 *sequenceNumber)
++int yaffs_tags_compat_query_block(struct yaffs_dev_s *dev,
++ int block_no,
++ yaffs_block_state_t *state,
++ __u32 *seq_number)
+ {
+
+- yaffs_Spare spare0, spare1;
+- static yaffs_Spare spareFF;
++ yaffs_spare spare0, spare1;
++ static yaffs_spare spareFF;
+ static int init;
+- yaffs_ECCResult dummy;
++ yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spareFF, 0xFF, sizeof(spareFF));
+ init = 1;
+ }
+
+- *sequenceNumber = 0;
++ *seq_number = 0;
+
+- yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock, NULL,
++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, NULL,
+ &spare0, &dummy, 1);
+- yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock + 1, NULL,
++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1, NULL,
+ &spare1, &dummy, 1);
+
+- if (yaffs_CountBits(spare0.blockStatus & spare1.blockStatus) < 7)
++ if (yaffs_count_bits(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spareFF, &spare0, sizeof(spareFF)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+--- a/fs/yaffs2/yaffs_tagscompat.h
++++ b/fs/yaffs2/yaffs_tagscompat.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -17,23 +17,23 @@
+ #define __YAFFS_TAGSCOMPAT_H__
+
+ #include "yaffs_guts.h"
+-int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
+- int chunkInNAND,
++int yaffs_tags_compat_wr(yaffs_dev_t *dev,
++ int nand_chunk,
+ const __u8 *data,
+- const yaffs_ExtendedTags *tags);
+-int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
+- int chunkInNAND,
++ const yaffs_ext_tags *tags);
++int yaffs_tags_compat_rd(yaffs_dev_t *dev,
++ int nand_chunk,
+ __u8 *data,
+- yaffs_ExtendedTags *tags);
+-int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
+- int blockNo);
+-int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
+- int blockNo,
+- yaffs_BlockState *state,
+- __u32 *sequenceNumber);
++ yaffs_ext_tags *tags);
++int yaffs_tags_compat_mark_bad(struct yaffs_dev_s *dev,
++ int block_no);
++int yaffs_tags_compat_query_block(struct yaffs_dev_s *dev,
++ int block_no,
++ yaffs_block_state_t *state,
++ __u32 *seq_number);
+
+-void yaffs_CalcTagsECC(yaffs_Tags *tags);
+-int yaffs_CheckECCOnTags(yaffs_Tags *tags);
+-int yaffs_CountBits(__u8 byte);
++void yaffs_calc_tags_ecc(yaffs_tags_t *tags);
++int yaffs_check_tags_ecc(yaffs_tags_t *tags);
++int yaffs_count_bits(__u8 byte);
+
+ #endif
+--- a/fs/yaffs2/yaffs_tagsvalidity.c
++++ b/fs/yaffs2/yaffs_tagsvalidity.c
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -13,16 +13,16 @@
+
+ #include "yaffs_tagsvalidity.h"
+
+-void yaffs_InitialiseTags(yaffs_ExtendedTags *tags)
++void yaffs_init_tags(yaffs_ext_tags *tags)
+ {
+- memset(tags, 0, sizeof(yaffs_ExtendedTags));
+- tags->validMarker0 = 0xAAAAAAAA;
+- tags->validMarker1 = 0x55555555;
++ memset(tags, 0, sizeof(yaffs_ext_tags));
++ tags->validity1 = 0xAAAAAAAA;
++ tags->validty1 = 0x55555555;
+ }
+
+-int yaffs_ValidateTags(yaffs_ExtendedTags *tags)
++int yaffs_validate_tags(yaffs_ext_tags *tags)
+ {
+- return (tags->validMarker0 == 0xAAAAAAAA &&
+- tags->validMarker1 == 0x55555555);
++ return (tags->validity1 == 0xAAAAAAAA &&
++ tags->validty1 == 0x55555555);
+
+ }
+--- a/fs/yaffs2/yaffs_tagsvalidity.h
++++ b/fs/yaffs2/yaffs_tagsvalidity.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -19,6 +19,6 @@
+
+ #include "yaffs_guts.h"
+
+-void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
+-int yaffs_ValidateTags(yaffs_ExtendedTags *tags);
++void yaffs_init_tags(yaffs_ext_tags *tags);
++int yaffs_validate_tags(yaffs_ext_tags *tags);
+ #endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_trace.h
+@@ -0,0 +1,60 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#ifndef __YTRACE_H__
++#define __YTRACE_H__
++
++extern unsigned int yaffs_trace_mask;
++extern unsigned int yaffs_wr_attempts;
++
++/*
++ * Tracing flags.
++ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
++ */
++
++#define YAFFS_TRACE_OS 0x00000002
++#define YAFFS_TRACE_ALLOCATE 0x00000004
++#define YAFFS_TRACE_SCAN 0x00000008
++#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
++#define YAFFS_TRACE_ERASE 0x00000020
++#define YAFFS_TRACE_GC 0x00000040
++#define YAFFS_TRACE_WRITE 0x00000080
++#define YAFFS_TRACE_TRACING 0x00000100
++#define YAFFS_TRACE_DELETION 0x00000200
++#define YAFFS_TRACE_BUFFERS 0x00000400
++#define YAFFS_TRACE_NANDACCESS 0x00000800
++#define YAFFS_TRACE_GC_DETAIL 0x00001000
++#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
++#define YAFFS_TRACE_MTD 0x00004000
++#define YAFFS_TRACE_CHECKPOINT 0x00008000
++
++#define YAFFS_TRACE_VERIFY 0x00010000
++#define YAFFS_TRACE_VERIFY_NAND 0x00020000
++#define YAFFS_TRACE_VERIFY_FULL 0x00040000
++#define YAFFS_TRACE_VERIFY_ALL 0x000F0000
++
++#define YAFFS_TRACE_SYNC 0x00100000
++#define YAFFS_TRACE_BACKGROUND 0x00200000
++#define YAFFS_TRACE_LOCK 0x00400000
++
++#define YAFFS_TRACE_ERROR 0x40000000
++#define YAFFS_TRACE_BUG 0x80000000
++#define YAFFS_TRACE_ALWAYS 0xF0000000
++
++
++#define T(mask, p) do { if ((mask) & (yaffs_trace_mask | YAFFS_TRACE_ALWAYS)) TOUT(p); } while (0)
++
++#endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_verify.c
+@@ -0,0 +1,626 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++
++#include "yaffs_verify.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++
++int yaffs_skip_verification(yaffs_dev_t *dev)
++{
++ dev=dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_full_verification(yaffs_dev_t *dev)
++{
++ dev=dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_nand_verification(yaffs_dev_t *dev)
++{
++ dev=dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
++}
++
++
++static const char *block_stateName[] = {
++"Unknown",
++"Needs scanning",
++"Scanning",
++"Empty",
++"Allocating",
++"Full",
++"Dirty",
++"Checkpoint",
++"Collecting",
++"Dead"
++};
++
++
++void yaffs_verify_blk(yaffs_dev_t *dev, yaffs_block_info_t *bi, int n)
++{
++ int actuallyUsed;
++ int inUse;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Report illegal runtime states */
++ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has undefined state %d"TENDSTR), n, bi->block_state));
++
++ switch (bi->block_state) {
++ case YAFFS_BLOCK_STATE_UNKNOWN:
++ case YAFFS_BLOCK_STATE_SCANNING:
++ case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has bad run-state %s"TENDSTR),
++ n, block_stateName[bi->block_state]));
++ }
++
++ /* Check pages in use and soft deletions are legal */
++
++ actuallyUsed = bi->pages_in_use - bi->soft_del_pages;
++
++ if (bi->pages_in_use < 0 || bi->pages_in_use > dev->param.chunks_per_block ||
++ bi->soft_del_pages < 0 || bi->soft_del_pages > dev->param.chunks_per_block ||
++ actuallyUsed < 0 || actuallyUsed > dev->param.chunks_per_block)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pages_in_used %d soft_del_pages %d"TENDSTR),
++ n, bi->pages_in_use, bi->soft_del_pages));
++
++
++ /* Check chunk bitmap legal */
++ inUse = yaffs_count_chunk_bits(dev, n);
++ if (inUse != bi->pages_in_use)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has inconsistent values pages_in_use %d counted chunk bits %d"TENDSTR),
++ n, bi->pages_in_use, inUse));
++
++}
++
++
++
++void yaffs_verify_collected_blk(yaffs_dev_t *dev, yaffs_block_info_t *bi, int n)
++{
++ yaffs_verify_blk(dev, bi, n);
++
++ /* After collection the block should be in the erased state */
++
++ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
++ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
++ T(YAFFS_TRACE_ERROR, (TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
++ n, bi->block_state));
++ }
++}
++
++void yaffs_verify_blocks(yaffs_dev_t *dev)
++{
++ int i;
++ int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
++ int nIllegalBlockStates = 0;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ memset(nBlocksPerState, 0, sizeof(nBlocksPerState));
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ yaffs_block_info_t *bi = yaffs_get_block_info(dev, i);
++ yaffs_verify_blk(dev, bi, i);
++
++ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
++ nBlocksPerState[bi->block_state]++;
++ else
++ nIllegalBlockStates++;
++ }
++
++ T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++ T(YAFFS_TRACE_VERIFY, (TSTR("Block summary"TENDSTR)));
++
++ T(YAFFS_TRACE_VERIFY, (TSTR("%d blocks have illegal states"TENDSTR), nIllegalBlockStates));
++ if (nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++ T(YAFFS_TRACE_VERIFY, (TSTR("Too many allocating blocks"TENDSTR)));
++
++ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("%s %d blocks"TENDSTR),
++ block_stateName[i], nBlocksPerState[i]));
++
++ if (dev->blocks_in_checkpt != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
++ dev->blocks_in_checkpt, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
++
++ if (dev->n_erased_blocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
++ dev->n_erased_blocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
++
++ if (nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
++ nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
++
++ T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++
++}
++
++/*
++ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
++ * case those tests will not be performed.
++ */
++void yaffs_verify_oh(yaffs_obj_t *obj, yaffs_obj_header *oh, yaffs_ext_tags *tags, int parentCheck)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ if (!(tags && obj && oh)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Verifying object header tags %p obj %p oh %p"TENDSTR),
++ tags, obj, oh));
++ return;
++ }
++
++ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++ oh->type > YAFFS_OBJECT_TYPE_MAX)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
++ tags->obj_id, oh->type));
++
++ if (tags->obj_id != obj->obj_id)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch obj_id %d"TENDSTR),
++ tags->obj_id, obj->obj_id));
++
++
++ /*
++ * Check that the object's parent ids match if parentCheck requested.
++ *
++ * Tests do not apply to the root object.
++ */
++
++ if (parentCheck && tags->obj_id > 1 && !obj->parent)
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch parent_id %d obj->parent is NULL"TENDSTR),
++ tags->obj_id, oh->parent_obj_id));
++
++ if (parentCheck && obj->parent &&
++ oh->parent_obj_id != obj->parent->obj_id &&
++ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header mismatch parent_id %d parent_obj_id %d"TENDSTR),
++ tags->obj_id, oh->parent_obj_id, obj->parent->obj_id));
++
++ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header name is NULL"TENDSTR),
++ obj->obj_id));
++
++ if (tags->obj_id > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d header name is 0xFF"TENDSTR),
++ obj->obj_id));
++}
++
++
++#if 0
++/* Not being used, but don't want to throw away yet */
++int yaffs_verify_tnode_worker(yaffs_obj_t *obj, yaffs_tnode_t *tn,
++ __u32 level, int chunk_offset)
++{
++ int i;
++ yaffs_dev_t *dev = obj->my_dev;
++ int ok = 1;
++
++ if (tn) {
++ if (level > 0) {
++
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++ if (tn->internal[i]) {
++ ok = yaffs_verify_tnode_worker(obj,
++ tn->internal[i],
++ level - 1,
++ (chunk_offset<<YAFFS_TNODES_INTERNAL_BITS) + i);
++ }
++ }
++ } else if (level == 0) {
++ yaffs_ext_tags tags;
++ __u32 obj_id = obj->obj_id;
++
++ chunk_offset <<= YAFFS_TNODES_LEVEL0_BITS;
++
++ for (i = 0; i < YAFFS_NTNODES_LEVEL0; i++) {
++ __u32 theChunk = yaffs_get_group_base(dev, tn, i);
++
++ if (theChunk > 0) {
++ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.obj_id,tags.chunk_id,theChunk)); */
++ yaffs_rd_chunk_tags_nand(dev, theChunk, NULL, &tags);
++ if (tags.obj_id != obj_id || tags.chunk_id != chunk_offset) {
++ T(~0, (TSTR("Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++ obj_id, chunk_offset, theChunk,
++ tags.obj_id, tags.chunk_id));
++ }
++ }
++ chunk_offset++;
++ }
++ }
++ }
++
++ return ok;
++
++}
++
++#endif
++
++void yaffs_verify_file(yaffs_obj_t *obj)
++{
++ int requiredTallness;
++ int actualTallness;
++ __u32 lastChunk;
++ __u32 x;
++ __u32 i;
++ yaffs_dev_t *dev;
++ yaffs_ext_tags tags;
++ yaffs_tnode_t *tn;
++ __u32 obj_id;
++
++ if (!obj)
++ return;
++
++ if (yaffs_skip_verification(obj->my_dev))
++ return;
++
++ dev = obj->my_dev;
++ obj_id = obj->obj_id;
++
++ /* Check file size is consistent with tnode depth */
++ lastChunk = obj->variant.file_variant.file_size / dev->data_bytes_per_chunk + 1;
++ x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
++ requiredTallness = 0;
++ while (x > 0) {
++ x >>= YAFFS_TNODES_INTERNAL_BITS;
++ requiredTallness++;
++ }
++
++ actualTallness = obj->variant.file_variant.top_level;
++
++ /* Check that the chunks in the tnode tree are all correct.
++ * We do this by scanning through the tnode tree and
++ * checking the tags for every chunk match.
++ */
++
++ if (yaffs_skip_nand_verification(dev))
++ return;
++
++ for (i = 1; i <= lastChunk; i++) {
++ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
++
++ if (tn) {
++ __u32 theChunk = yaffs_get_group_base(dev, tn, i);
++ if (theChunk > 0) {
++ /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),obj_id,i,theChunk)); */
++ yaffs_rd_chunk_tags_nand(dev, theChunk, NULL, &tags);
++ if (tags.obj_id != obj_id || tags.chunk_id != i) {
++ T(~0, (TSTR("Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++ obj_id, i, theChunk,
++ tags.obj_id, tags.chunk_id));
++ }
++ }
++ }
++ }
++}
++
++
++void yaffs_verify_link(yaffs_obj_t *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ /* Verify sane equivalent object */
++}
++
++void yaffs_verify_symlink(yaffs_obj_t *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ /* Verify symlink string */
++}
++
++void yaffs_verify_special(yaffs_obj_t *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++}
++
++void yaffs_verify_obj(yaffs_obj_t *obj)
++{
++ yaffs_dev_t *dev;
++
++ __u32 chunkMin;
++ __u32 chunkMax;
++
++ __u32 chunk_idOk;
++ __u32 chunkInRange;
++ __u32 chunkShouldNotBeDeleted;
++ __u32 chunkValid;
++
++ if (!obj)
++ return;
++
++ if (obj->being_created)
++ return;
++
++ dev = obj->my_dev;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Check sane object header chunk */
++
++ chunkMin = dev->internal_start_block * dev->param.chunks_per_block;
++ chunkMax = (dev->internal_end_block+1) * dev->param.chunks_per_block - 1;
++
++ chunkInRange = (((unsigned)(obj->hdr_chunk)) >= chunkMin && ((unsigned)(obj->hdr_chunk)) <= chunkMax);
++ chunk_idOk = chunkInRange || (obj->hdr_chunk == 0);
++ chunkValid = chunkInRange &&
++ yaffs_check_chunk_bit(dev,
++ obj->hdr_chunk / dev->param.chunks_per_block,
++ obj->hdr_chunk % dev->param.chunks_per_block);
++ chunkShouldNotBeDeleted = chunkInRange && !chunkValid;
++
++ if (!obj->fake &&
++ (!chunk_idOk || chunkShouldNotBeDeleted)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has chunk_id %d %s %s"TENDSTR),
++ obj->obj_id, obj->hdr_chunk,
++ chunk_idOk ? "" : ",out of range",
++ chunkShouldNotBeDeleted ? ",marked as deleted" : ""));
++ }
++
++ if (chunkValid && !yaffs_skip_nand_verification(dev)) {
++ yaffs_ext_tags tags;
++ yaffs_obj_header *oh;
++ __u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
++
++ oh = (yaffs_obj_header *)buffer;
++
++ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer,
++ &tags);
++
++ yaffs_verify_oh(obj, oh, &tags, 1);
++
++ yaffs_release_temp_buffer(dev, buffer, __LINE__);
++ }
++
++ /* Verify it has a parent */
++ if (obj && !obj->fake &&
++ (!obj->parent || obj->parent->my_dev != dev)) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
++ obj->obj_id, obj->parent));
++ }
++
++ /* Verify parent is a directory */
++ if (obj->parent && obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
++ obj->obj_id, obj->parent->variant_type));
++ }
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ yaffs_verify_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ yaffs_verify_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ yaffs_verify_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ yaffs_verify_link(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ yaffs_verify_special(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ default:
++ T(YAFFS_TRACE_VERIFY,
++ (TSTR("Obj %d has illegaltype %d"TENDSTR),
++ obj->obj_id, obj->variant_type));
++ break;
++ }
++}
++
++void yaffs_verify_objects(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ int i;
++ struct ylist_head *lh;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Iterate through the objects in each hash entry */
++
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each(lh, &dev->obj_bucket[i].list) {
++ if (lh) {
++ obj = ylist_entry(lh, yaffs_obj_t, hash_link);
++ yaffs_verify_obj(obj);
++ }
++ }
++ }
++}
++
++
++void yaffs_verify_obj_in_dir(yaffs_obj_t *obj)
++{
++ struct ylist_head *lh;
++ yaffs_obj_t *listObj;
++
++ int count = 0;
++
++ if (!obj) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("No object to verify" TENDSTR)));
++ YBUG();
++ return;
++ }
++
++ if (yaffs_skip_verification(obj->my_dev))
++ return;
++
++ if (!obj->parent) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object does not have parent" TENDSTR)));
++ YBUG();
++ return;
++ }
++
++ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Parent is not directory" TENDSTR)));
++ YBUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ ylist_for_each(lh, &obj->parent->variant.dir_variant.children) {
++ if (lh) {
++ listObj = ylist_entry(lh, yaffs_obj_t, siblings);
++ yaffs_verify_obj(listObj);
++ if (obj == listObj)
++ count++;
++ }
++ }
++
++ if (count != 1) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory %d times" TENDSTR), count));
++ YBUG();
++ }
++}
++
++void yaffs_verify_dir(yaffs_obj_t *directory)
++{
++ struct ylist_head *lh;
++ yaffs_obj_t *listObj;
++
++ if (!directory) {
++ YBUG();
++ return;
++ }
++
++ if (yaffs_skip_full_verification(directory->my_dev))
++ return;
++
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Directory has wrong type: %d" TENDSTR), directory->variant_type));
++ YBUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ ylist_for_each(lh, &directory->variant.dir_variant.children) {
++ if (lh) {
++ listObj = ylist_entry(lh, yaffs_obj_t, siblings);
++ if (listObj->parent != directory) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory list has wrong parent %p" TENDSTR), listObj->parent));
++ YBUG();
++ }
++ yaffs_verify_obj_in_dir(listObj);
++ }
++ }
++}
++
++static int yaffs_free_verification_failures;
++
++void yaffs_verify_free_chunks(yaffs_dev_t *dev)
++{
++ int counted;
++ int difference;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ counted = yaffs_count_free_chunks(dev);
++
++ difference = dev->n_free_chunks - counted;
++
++ if (difference) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("Freechunks verification failure %d %d %d" TENDSTR),
++ dev->n_free_chunks, counted, difference));
++ yaffs_free_verification_failures++;
++ }
++}
++
++int yaffs_verify_file_sane(yaffs_obj_t *in)
++{
++#if 0
++ int chunk;
++ int n_chunks;
++ int fSize;
++ int failed = 0;
++ int obj_id;
++ yaffs_tnode_t *tn;
++ yaffs_tags_t localTags;
++ yaffs_tags_t *tags = &localTags;
++ int theChunk;
++ int is_deleted;
++
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
++
++ obj_id = in->obj_id;
++ fSize = in->variant.file_variant.file_size;
++ n_chunks =
++ (fSize + in->my_dev->data_bytes_per_chunk - 1) / in->my_dev->data_bytes_per_chunk;
++
++ for (chunk = 1; chunk <= n_chunks; chunk++) {
++ tn = yaffs_find_tnode_0(in->my_dev, &in->variant.file_variant,
++ chunk);
++
++ if (tn) {
++
++ theChunk = yaffs_get_group_base(dev, tn, chunk);
++
++ if (yaffs_check_chunk_bits
++ (dev, theChunk / dev->param.chunks_per_block,
++ theChunk % dev->param.chunks_per_block)) {
++
++ yaffs_rd_chunk_tags_nand(in->my_dev, theChunk,
++ tags,
++ &is_deleted);
++ if (yaffs_tags_match
++ (tags, in->obj_id, chunk, is_deleted)) {
++ /* found it; */
++
++ }
++ } else {
++
++ failed = 1;
++ }
++
++ } else {
++ /* T(("No level 0 found for %d\n", chunk)); */
++ }
++ }
++
++ return failed ? YAFFS_FAIL : YAFFS_OK;
++#else
++ in=in;
++ return YAFFS_OK;
++#endif
++}
+--- /dev/null
++++ b/fs/yaffs2/yaffs_verify.h
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __YAFFS_VERIFY_H__
++#define __YAFFS_VERIFY_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_blk(yaffs_dev_t *dev, yaffs_block_info_t *bi, int n);
++void yaffs_verify_collected_blk(yaffs_dev_t *dev, yaffs_block_info_t *bi, int n);
++void yaffs_verify_blocks(yaffs_dev_t *dev);
++
++void yaffs_verify_oh(yaffs_obj_t *obj, yaffs_obj_header *oh, yaffs_ext_tags *tags, int parentCheck);
++void yaffs_verify_file(yaffs_obj_t *obj);
++void yaffs_verify_link(yaffs_obj_t *obj);
++void yaffs_verify_symlink(yaffs_obj_t *obj);
++void yaffs_verify_special(yaffs_obj_t *obj);
++void yaffs_verify_obj(yaffs_obj_t *obj);
++void yaffs_verify_objects(yaffs_dev_t *dev);
++void yaffs_verify_obj_in_dir(yaffs_obj_t *obj);
++void yaffs_verify_dir(yaffs_obj_t *directory);
++void yaffs_verify_free_chunks(yaffs_dev_t *dev);
++
++int yaffs_verify_file_sane(yaffs_obj_t *obj);
++
++int yaffs_skip_verification(yaffs_dev_t *dev);
++
++#endif
++
+--- /dev/null
++++ b/fs/yaffs2/yaffs_vfs_glue.c
+@@ -0,0 +1,3576 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ * Acknowledgements:
++ * Luc van OostenRyck for numerous patches.
++ * Nick Bane for numerous patches.
++ * Nick Bane for 2.5/2.6 integration.
++ * Andras Toth for mknod rdev issue.
++ * Michael Fischer for finding the problem with inode inconsistency.
++ * Some code bodily lifted from JFFS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ *
++ * This is the file system front-end to YAFFS that hooks it up to
++ * the VFS.
++ *
++ * Special notes:
++ * >> 2.4: sb->u.generic_sbp points to the yaffs_dev_t associated with
++ * this superblock
++ * >> 2.6: sb->s_fs_info points to the yaffs_dev_t associated with this
++ * superblock
++ * >> inode->u.generic_ip points to the associated yaffs_obj_t.
++ */
++
++/*
++ * There are two variants of the VFS glue code. This variant should compile
++ * for any version of Linux.
++ */
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
++#define YAFFS_COMPILE_BACKGROUND
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6, 23))
++#define YAFFS_COMPILE_FREEZER
++#endif
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
++#define YAFFS_COMPILE_EXPORTFS
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35))
++#define YAFFS_USE_SETATTR_COPY
++#define YAFFS_USE_TRUNCATE_SETSIZE
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35))
++#define YAFFS_HAS_EVICT_INODE
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++#define YAFFS_NEW_FOLLOW_LINK 1
++#else
++#define YAFFS_NEW_FOLLOW_LINK 0
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/mtd/mtd.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++#include <linux/namei.h>
++#endif
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++#include <linux/exportfs.h>
++#endif
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#endif
++#ifdef YAFFS_COMPILE_FREEZER
++#include <linux/freezer.h>
++#endif
++
++#include <asm/div64.h>
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++#include <linux/statfs.h>
++
++#define UnlockPage(p) unlock_page(p)
++#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
++
++/* FIXME: use sb->s_id instead ? */
++#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
++
++#else
++
++#include <linux/locks.h>
++#define BDEVNAME_SIZE 0
++#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
++/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
++#define __user
++#endif
++
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define YPROC_ROOT (&proc_root)
++#else
++#define YPROC_ROOT NULL
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
++#define Y_INIT_TIMER(a) init_timer(a)
++#else
++#define Y_INIT_TIMER(a) init_timer_on_stack(a)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++#define WRITE_SIZE_STR "writesize"
++#define WRITE_SIZE(mtd) ((mtd)->writesize)
++#else
++#define WRITE_SIZE_STR "oobblock"
++#define WRITE_SIZE(mtd) ((mtd)->oobblock)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
++#define YAFFS_USE_WRITE_BEGIN_END 1
++#else
++#define YAFFS_USE_WRITE_BEGIN_END 0
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
++{
++ uint64_t result = partition_size;
++ do_div(result, block_size);
++ return (uint32_t)result;
++}
++#else
++#define YCALCBLOCKS(s, b) ((s)/(b))
++#endif
++
++#include <linux/uaccess.h>
++#include <linux/mtd/mtd.h>
++
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_guts.h"
++
++#include "yaffs_linux.h"
++
++#include "yaffs_mtdif.h"
++#include "yaffs_mtdif1.h"
++#include "yaffs_mtdif2.h"
++
++unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS;
++unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++unsigned int yaffs_auto_checkpoint = 1;
++unsigned int yaffs_gc_control = 1;
++unsigned int yaffs_bg_enable = 1;
++
++/* Module Parameters */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++module_param(yaffs_trace_mask, uint, 0644);
++module_param(yaffs_wr_attempts, uint, 0644);
++module_param(yaffs_auto_checkpoint, uint, 0644);
++module_param(yaffs_gc_control, uint, 0644);
++module_param(yaffs_bg_enable, uint, 0644);
++#else
++MODULE_PARM(yaffs_trace_mask, "i");
++MODULE_PARM(yaffs_wr_attempts, "i");
++MODULE_PARM(yaffs_auto_checkpoint, "i");
++MODULE_PARM(yaffs_gc_control, "i");
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++/* use iget and read_inode */
++#define Y_IGET(sb, inum) iget((sb), (inum))
++static void yaffs_read_inode(struct inode *inode);
++
++#else
++/* Call local equivalent */
++#define YAFFS_USE_OWN_IGET
++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino);
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->i_private)
++#else
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->u.generic_ip)
++#endif
++
++#define yaffs_InodeToObject(iptr) ((yaffs_obj_t *)(yaffs_InodeToObjectLV(iptr)))
++#define yaffs_dentry_to_obj(dptr) yaffs_InodeToObject((dptr)->d_inode)
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define yaffs_SuperToDevice(sb) ((yaffs_dev_t *)sb->s_fs_info)
++#else
++#define yaffs_SuperToDevice(sb) ((yaffs_dev_t *)sb->u.generic_sbp)
++#endif
++
++
++#define update_dir_time(dir) do {\
++ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
++ } while(0)
++
++static void yaffs_put_super(struct super_block *sb);
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++ loff_t *pos);
++static ssize_t yaffs_hold_space(struct file *f);
++static void yaffs_release_space(struct file *f);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id);
++#else
++static int yaffs_file_flush(struct file *file);
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++static int yaffs_sync_object(struct file *file, int datasync);
++#else
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++ int datasync);
++#endif
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++ struct nameidata *n);
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *n);
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
++#endif
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++ struct dentry *dentry);
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++ const char *symname);
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ dev_t dev);
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ int dev);
++#endif
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry);
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait);
++static void yaffs_write_super(struct super_block *sb);
++#else
++static int yaffs_sync_fs(struct super_block *sb);
++static int yaffs_write_super(struct super_block *sb);
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
++#endif
++
++#ifdef YAFFS_HAS_PUT_INODE
++static void yaffs_put_inode(struct inode *inode);
++#endif
++
++#ifdef YAFFS_HAS_EVICT_INODE
++static void yaffs_evict_inode(struct inode *);
++#else
++static void yaffs_delete_inode(struct inode *);
++static void yaffs_clear_inode(struct inode *);
++#endif
++
++static int yaffs_readpage(struct file *file, struct page *page);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
++#else
++static int yaffs_writepage(struct page *page);
++#endif
++
++#ifdef CONFIG_YAFFS_XATTR
++int yaffs_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags);
++ssize_t yaffs_getxattr(struct dentry *dentry, const char *name, void *buff,
++ size_t size);
++int yaffs_removexattr(struct dentry *dentry, const char *name);
++ssize_t yaffs_listxattr(struct dentry *dentry, char *buff, size_t size);
++#endif
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END != 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata);
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata);
++#else
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++ unsigned offset, unsigned to);
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++ unsigned to);
++
++#endif
++
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen);
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias);
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
++#endif
++
++static void yaffs_touch_super(yaffs_dev_t *dev);
++
++static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin);
++
++static int yaffs_vfs_setattr(struct inode *, struct iattr *);
++
++
++static struct address_space_operations yaffs_file_address_operations = {
++ .readpage = yaffs_readpage,
++ .writepage = yaffs_writepage,
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++ .write_begin = yaffs_write_begin,
++ .write_end = yaffs_write_end,
++#else
++ .prepare_write = yaffs_prepare_write,
++ .commit_write = yaffs_commit_write,
++#endif
++};
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
++static const struct file_operations yaffs_file_operations = {
++ .read = do_sync_read,
++ .write = do_sync_write,
++ .aio_read = generic_file_aio_read,
++ .aio_write = generic_file_aio_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++ .splice_read = generic_file_splice_read,
++ .splice_write = generic_file_splice_write,
++ .llseek = generic_file_llseek,
++};
++
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++
++static const struct file_operations yaffs_file_operations = {
++ .read = do_sync_read,
++ .write = do_sync_write,
++ .aio_read = generic_file_aio_read,
++ .aio_write = generic_file_aio_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++ .sendfile = generic_file_sendfile,
++};
++
++#else
++
++static const struct file_operations yaffs_file_operations = {
++ .read = generic_file_read,
++ .write = generic_file_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ .sendfile = generic_file_sendfile,
++#endif
++};
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
++static void zero_user_segment(struct page *page, unsigned start, unsigned end)
++{
++ void * kaddr = kmap_atomic(page, KM_USER0);
++ memset(kaddr + start, 0, end - start);
++ kunmap_atomic(kaddr, KM_USER0);
++ flush_dcache_page(page);
++}
++#endif
++
++
++static const struct inode_operations yaffs_file_inode_operations = {
++ .setattr = yaffs_setattr,
++#ifdef CONFIG_YAFFS_XATTR
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++#endif
++};
++
++static const struct inode_operations yaffs_symlink_inode_operations = {
++ .readlink = yaffs_readlink,
++ .follow_link = yaffs_follow_link,
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++ .put_link = yaffs_put_link,
++#endif
++ .setattr = yaffs_setattr,
++#ifdef CONFIG_YAFFS_XATTR
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++#endif
++};
++
++static const struct inode_operations yaffs_dir_inode_operations = {
++ .create = yaffs_create,
++ .lookup = yaffs_lookup,
++ .link = yaffs_link,
++ .unlink = yaffs_unlink,
++ .symlink = yaffs_symlink,
++ .mkdir = yaffs_mkdir,
++ .rmdir = yaffs_unlink,
++ .mknod = yaffs_mknod,
++ .rename = yaffs_rename,
++ .setattr = yaffs_setattr,
++#ifdef CONFIG_YAFFS_XATTR
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++#endif
++};
++
++static const struct file_operations yaffs_dir_operations = {
++ .read = generic_read_dir,
++ .readdir = yaffs_readdir,
++ .fsync = yaffs_sync_object,
++ .llseek = yaffs_dir_llseek,
++};
++
++static const struct super_operations yaffs_super_ops = {
++ .statfs = yaffs_statfs,
++
++#ifndef YAFFS_USE_OWN_IGET
++ .read_inode = yaffs_read_inode,
++#endif
++#ifdef YAFFS_HAS_PUT_INODE
++ .put_inode = yaffs_put_inode,
++#endif
++ .put_super = yaffs_put_super,
++#ifdef YAFFS_HAS_EVICT_INODE
++ .evict_inode = yaffs_evict_inode,
++#else
++ .delete_inode = yaffs_delete_inode,
++ .clear_inode = yaffs_clear_inode,
++#endif
++ .sync_fs = yaffs_sync_fs,
++ .write_super = yaffs_write_super,
++};
++
++
++static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
++{
++#ifdef YAFFS_USE_SETATTR_COPY
++ setattr_copy(inode,attr);
++ return 0;
++#else
++ return inode_setattr(inode, attr);
++#endif
++
++}
++
++static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
++{
++#ifdef YAFFS_USE_TRUNCATE_SETSIZE
++ truncate_setsize(inode,newsize);
++ return 0;
++#else
++ truncate_inode_pages(&inode->i_data,newsize);
++ return 0;
++#endif
++
++}
++
++static unsigned yaffs_gc_control_callback(yaffs_dev_t *dev)
++{
++ return yaffs_gc_control;
++}
++
++static void yaffs_gross_lock(yaffs_dev_t *dev)
++{
++ T(YAFFS_TRACE_LOCK, (TSTR("yaffs locking %p\n"), current));
++ down(&(yaffs_dev_to_lc(dev)->grossLock));
++ T(YAFFS_TRACE_LOCK, (TSTR("yaffs locked %p\n"), current));
++}
++
++static void yaffs_gross_unlock(yaffs_dev_t *dev)
++{
++ T(YAFFS_TRACE_LOCK, (TSTR("yaffs unlocking %p\n"), current));
++ up(&(yaffs_dev_to_lc(dev)->grossLock));
++}
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++
++static struct inode *
++yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino, uint32_t generation)
++{
++ return Y_IGET(sb, ino);
++}
++
++static struct dentry *
++yaffs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type)
++{
++ return generic_fh_to_dentry(sb, fid, fh_len, fh_type, yaffs2_nfs_get_inode) ;
++}
++
++static struct dentry *
++ yaffs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type)
++{
++ return generic_fh_to_parent(sb, fid, fh_len, fh_type, yaffs2_nfs_get_inode);
++}
++
++struct dentry *yaffs2_get_parent(struct dentry *dentry)
++{
++
++ struct super_block *sb = dentry->d_inode->i_sb;
++ struct dentry *parent = ERR_PTR(-ENOENT);
++ struct inode *inode;
++ unsigned long parent_ino;
++ yaffs_obj_t *d_obj;
++ yaffs_obj_t *parent_obj;
++
++ d_obj = yaffs_InodeToObject(dentry->d_inode);
++
++ if (d_obj) {
++ parent_obj = d_obj->parent;
++ if (parent_obj) {
++ parent_ino = yaffs_get_obj_inode(parent_obj);
++ inode = Y_IGET(sb, parent_ino);
++
++ if (IS_ERR(inode)) {
++ parent = ERR_CAST(inode);
++ } else {
++ parent = d_obtain_alias(inode);
++ if (!IS_ERR(parent)) {
++ parent = ERR_PTR(-ENOMEM);
++ iput(inode);
++ }
++ }
++ }
++ }
++
++ return parent;
++}
++
++/* Just declare a zero structure as a NULL value implies
++ * using the default functions of exportfs.
++ */
++
++static struct export_operations yaffs_export_ops =
++{
++ .fh_to_dentry = yaffs2_fh_to_dentry,
++ .fh_to_parent = yaffs2_fh_to_parent,
++ .get_parent = yaffs2_get_parent,
++} ;
++
++#endif
++
++/*-----------------------------------------------------------------*/
++/* Directory search context allows us to unlock access to yaffs during
++ * filldir without causing problems with the directory being modified.
++ * This is similar to the tried and tested mechanism used in yaffs direct.
++ *
++ * A search context iterates along a doubly linked list of siblings in the
++ * directory. If the iterating object is deleted then this would corrupt
++ * the list iteration, likely causing a crash. The search context avoids
++ * this by using the remove_obj_fn to move the search context to the
++ * next object before the object is deleted.
++ *
++ * Many readdirs (and thus seach conexts) may be alive simulateously so
++ * each yaffs_dev_t has a list of these.
++ *
++ * A seach context lives for the duration of a readdir.
++ *
++ * All these functions must be called while yaffs is locked.
++ */
++
++struct yaffs_SearchContext {
++ yaffs_dev_t *dev;
++ yaffs_obj_t *dirObj;
++ yaffs_obj_t *nextReturn;
++ struct ylist_head others;
++};
++
++/*
++ * yaffs_NewSearch() creates a new search context, initialises it and
++ * adds it to the device's search context list.
++ *
++ * Called at start of readdir.
++ */
++static struct yaffs_SearchContext * yaffs_NewSearch(yaffs_obj_t *dir)
++{
++ yaffs_dev_t *dev = dir->my_dev;
++ struct yaffs_SearchContext *sc = YMALLOC(sizeof(struct yaffs_SearchContext));
++ if(sc){
++ sc->dirObj = dir;
++ sc->dev = dev;
++ if( ylist_empty(&sc->dirObj->variant.dir_variant.children))
++ sc->nextReturn = NULL;
++ else
++ sc->nextReturn = ylist_entry(
++ dir->variant.dir_variant.children.next,
++ yaffs_obj_t,siblings);
++ YINIT_LIST_HEAD(&sc->others);
++ ylist_add(&sc->others,&(yaffs_dev_to_lc(dev)->searchContexts));
++ }
++ return sc;
++}
++
++/*
++ * yaffs_search_end() disposes of a search context and cleans up.
++ */
++static void yaffs_search_end(struct yaffs_SearchContext * sc)
++{
++ if(sc){
++ ylist_del(&sc->others);
++ YFREE(sc);
++ }
++}
++
++/*
++ * yaffs_search_advance() moves a search context to the next object.
++ * Called when the search iterates or when an object removal causes
++ * the search context to be moved to the next object.
++ */
++static void yaffs_search_advance(struct yaffs_SearchContext *sc)
++{
++ if(!sc)
++ return;
++
++ if( sc->nextReturn == NULL ||
++ ylist_empty(&sc->dirObj->variant.dir_variant.children))
++ sc->nextReturn = NULL;
++ else {
++ struct ylist_head *next = sc->nextReturn->siblings.next;
++
++ if( next == &sc->dirObj->variant.dir_variant.children)
++ sc->nextReturn = NULL; /* end of list */
++ else
++ sc->nextReturn = ylist_entry(next,yaffs_obj_t,siblings);
++ }
++}
++
++/*
++ * yaffs_remove_obj_callback() is called when an object is unlinked.
++ * We check open search contexts and advance any which are currently
++ * on the object being iterated.
++ */
++static void yaffs_remove_obj_callback(yaffs_obj_t *obj)
++{
++
++ struct ylist_head *i;
++ struct yaffs_SearchContext *sc;
++ struct ylist_head *search_contexts = &(yaffs_dev_to_lc(obj->my_dev)->searchContexts);
++
++
++ /* Iterate through the directory search contexts.
++ * If any are currently on the object being removed, then advance
++ * the search context to the next object to prevent a hanging pointer.
++ */
++ ylist_for_each(i, search_contexts) {
++ if (i) {
++ sc = ylist_entry(i, struct yaffs_SearchContext,others);
++ if(sc->nextReturn == obj)
++ yaffs_search_advance(sc);
++ }
++ }
++
++}
++
++
++/*-----------------------------------------------------------------*/
++
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen)
++{
++ unsigned char *alias;
++ int ret;
++
++ yaffs_dev_t *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++
++ yaffs_gross_unlock(dev);
++
++ if (!alias)
++ return -ENOMEM;
++
++ ret = vfs_readlink(dentry, buffer, buflen, alias);
++ kfree(alias);
++ return ret;
++}
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++#endif
++{
++ unsigned char *alias;
++ int ret;
++ yaffs_dev_t *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++ yaffs_gross_unlock(dev);
++
++ if (!alias) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++ nd_set_link(nd, alias);
++ ret = (int)alias;
++out:
++ return ERR_PTR(ret);
++#else
++ ret = vfs_follow_link(nd, alias);
++ kfree(alias);
++out:
++ return ret;
++#endif
++}
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias) {
++ kfree(alias);
++}
++#endif
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++ yaffs_obj_t *obj);
++
++/*
++ * Lookup is used to find objects in the fs
++ */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *n)
++#else
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
++#endif
++{
++ yaffs_obj_t *obj;
++ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
++
++ yaffs_dev_t *dev = yaffs_InodeToObject(dir)->my_dev;
++
++ if(current != yaffs_dev_to_lc(dev)->readdirProcess)
++ yaffs_gross_lock(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_lookup for %d:%s\n"),
++ yaffs_InodeToObject(dir)->obj_id, dentry->d_name.name));
++
++ obj = yaffs_find_by_name(yaffs_InodeToObject(dir),
++ dentry->d_name.name);
++
++ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
++
++ /* Can't hold gross lock when calling yaffs_get_inode() */
++ if(current != yaffs_dev_to_lc(dev)->readdirProcess)
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_lookup found %d\n"), obj->obj_id));
++
++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++
++ if (inode) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_loookup dentry \n")));
++/* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
++ * d_add even if NULL inode */
++#if 0
++ /*dget(dentry); // try to solve directory bug */
++ d_add(dentry, inode);
++
++ /* return dentry; */
++ return NULL;
++#endif
++ }
++
++ } else {
++ T(YAFFS_TRACE_OS,(TSTR("yaffs_lookup not found\n")));
++
++ }
++
++/* added NCB for 2.5/6 compatability - forces add even if inode is
++ * NULL which creates dentry hash */
++ d_add(dentry, inode);
++
++ return NULL;
++}
++
++
++#ifdef YAFFS_HAS_PUT_INODE
++
++/* For now put inode is just for debugging
++ * Put inode is called when the inode **structure** is put.
++ */
++static void yaffs_put_inode(struct inode *inode)
++{
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_put_inode: ino %d, count %d\n"), (int)inode->i_ino,
++ atomic_read(&inode->i_count)));
++
++}
++#endif
++
++
++static void yaffs_unstitch_obj(struct inode *inode, yaffs_obj_t *obj)
++{
++ /* Clear the association between the inode and
++ * the yaffs_obj_t.
++ */
++ obj->my_inode = NULL;
++ yaffs_InodeToObjectLV(inode) = NULL;
++
++ /* If the object freeing was deferred, then the real
++ * free happens now.
++ * This should fix the inode inconsistency problem.
++ */
++ yaffs_handle_defered_free(obj);
++}
++
++#ifdef YAFFS_HAS_EVICT_INODE
++/* yaffs_evict_inode combines into one operation what was previously done in
++ * yaffs_clear_inode() and yaffs_delete_inode()
++ *
++ */
++static void yaffs_evict_inode( struct inode *inode)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++ int deleteme = 0;
++
++ obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_evict_inode: ino %d, count %d %s\n"), (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
++
++ if (!inode->i_nlink && !is_bad_inode(inode))
++ deleteme = 1;
++ truncate_inode_pages(&inode->i_data,0);
++ end_writeback(inode);
++
++ if(deleteme && obj){
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_del_obj(obj);
++ yaffs_gross_unlock(dev);
++ }
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_unstitch_obj(inode,obj);
++ yaffs_gross_unlock(dev);
++ }
++
++
++}
++#else
++
++/* clear is called to tell the fs to release any per-inode data it holds.
++ * The object might still exist on disk and is just being thrown out of the cache
++ * or else the object has actually been deleted and we're being called via
++ * the chain
++ * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
++ */
++
++static void yaffs_clear_inode(struct inode *inode)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++
++ obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_clear_inode: ino %d, count %d %s\n"), (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
++
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_unstitch_obj(inode,obj);
++ yaffs_gross_unlock(dev);
++ }
++
++}
++
++/* delete is called when the link count is zero and the inode
++ * is put (ie. nobody wants to know about it anymore, time to
++ * delete the file).
++ * NB Must call clear_inode()
++ */
++static void yaffs_delete_inode(struct inode *inode)
++{
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++ yaffs_dev_t *dev;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_delete_inode: ino %d, count %d %s\n"), (int)inode->i_ino,
++ atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object"));
++
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_del_obj(obj);
++ yaffs_gross_unlock(dev);
++ }
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++ truncate_inode_pages(&inode->i_data, 0);
++#endif
++ clear_inode(inode);
++}
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id)
++#else
++static int yaffs_file_flush(struct file *file)
++#endif
++{
++ yaffs_obj_t *obj = yaffs_dentry_to_obj(file->f_dentry);
++
++ yaffs_dev_t *dev = obj->my_dev;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_flush object %d (%s)\n"), obj->obj_id,
++ obj->dirty ? "dirty" : "clean"));
++
++ yaffs_gross_lock(dev);
++
++ yaffs_flush_file(obj, 1, 0);
++
++ yaffs_gross_unlock(dev);
++
++ return 0;
++}
++
++static int yaffs_readpage_nolock(struct file *f, struct page *pg)
++{
++ /* Lifted from jffs2 */
++
++ yaffs_obj_t *obj;
++ unsigned char *pg_buf;
++ int ret;
++
++ yaffs_dev_t *dev;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_readpage_nolock at %08x, size %08x\n"),
++ (unsigned)(pg->index << PAGE_CACHE_SHIFT),
++ (unsigned)PAGE_CACHE_SIZE));
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ BUG_ON(!PageLocked(pg));
++#else
++ if (!PageLocked(pg))
++ PAGE_BUG(pg);
++#endif
++
++ pg_buf = kmap(pg);
++ /* FIXME: Can kmap fail? */
++
++ yaffs_gross_lock(dev);
++
++ ret = yaffs_file_rd(obj, pg_buf,
++ pg->index << PAGE_CACHE_SHIFT,
++ PAGE_CACHE_SIZE);
++
++ yaffs_gross_unlock(dev);
++
++ if (ret >= 0)
++ ret = 0;
++
++ if (ret) {
++ ClearPageUptodate(pg);
++ SetPageError(pg);
++ } else {
++ SetPageUptodate(pg);
++ ClearPageError(pg);
++ }
++
++ flush_dcache_page(pg);
++ kunmap(pg);
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage_nolock done\n")));
++ return ret;
++}
++
++static int yaffs_readpage_unlock(struct file *f, struct page *pg)
++{
++ int ret = yaffs_readpage_nolock(f, pg);
++ UnlockPage(pg);
++ return ret;
++}
++
++static int yaffs_readpage(struct file *f, struct page *pg)
++{
++ int ret;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage\n")));
++ ret=yaffs_readpage_unlock(f, pg);
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_readpage done\n")));
++ return ret;
++}
++
++/* writepage inspired by/stolen from smbfs */
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
++#else
++static int yaffs_writepage(struct page *page)
++#endif
++{
++ yaffs_dev_t *dev;
++ struct address_space *mapping = page->mapping;
++ struct inode *inode;
++ unsigned long end_index;
++ char *buffer;
++ yaffs_obj_t *obj;
++ int nWritten = 0;
++ unsigned n_bytes;
++ loff_t i_size;
++
++ if (!mapping)
++ BUG();
++ inode = mapping->host;
++ if (!inode)
++ BUG();
++ i_size = i_size_read(inode);
++
++ end_index = i_size >> PAGE_CACHE_SHIFT;
++
++ if(page->index < end_index)
++ n_bytes = PAGE_CACHE_SIZE;
++ else {
++ n_bytes = i_size & (PAGE_CACHE_SIZE -1);
++
++ if (page->index > end_index || !n_bytes) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_writepage at %08x, inode size = %08x!!!\n"),
++ (unsigned)(page->index << PAGE_CACHE_SHIFT),
++ (unsigned)inode->i_size));
++ T(YAFFS_TRACE_OS,
++ (TSTR(" -> don't care!!\n")));
++
++ zero_user_segment(page,0,PAGE_CACHE_SIZE);
++ set_page_writeback(page);
++ unlock_page(page);
++ end_page_writeback(page);
++ return 0;
++ }
++ }
++
++ if(n_bytes != PAGE_CACHE_SIZE)
++ zero_user_segment(page,n_bytes,PAGE_CACHE_SIZE);
++
++ get_page(page);
++
++ buffer = kmap(page);
++
++ obj = yaffs_InodeToObject(inode);
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_writepage at %08x, size %08x\n"),
++ (unsigned)(page->index << PAGE_CACHE_SHIFT), n_bytes));
++ T(YAFFS_TRACE_OS,
++ (TSTR("writepag0: obj = %05x, ino = %05x\n"),
++ (int)obj->variant.file_variant.file_size, (int)inode->i_size));
++
++ nWritten = yaffs_wr_file(obj, buffer,
++ page->index << PAGE_CACHE_SHIFT, n_bytes, 0);
++
++ yaffs_touch_super(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("writepag1: obj = %05x, ino = %05x\n"),
++ (int)obj->variant.file_variant.file_size, (int)inode->i_size));
++
++ yaffs_gross_unlock(dev);
++
++ kunmap(page);
++ set_page_writeback(page);
++ unlock_page(page);
++ end_page_writeback(page);
++ put_page(page);
++
++ return (nWritten == n_bytes) ? 0 : -ENOSPC;
++}
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata)
++{
++ struct page *pg = NULL;
++ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
++
++ int ret = 0;
++ int space_held = 0;
++
++ /* Get a page */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ pg = grab_cache_page_write_begin(mapping, index, flags);
++#else
++ pg = __grab_cache_page(mapping, index);
++#endif
++
++ *pagep = pg;
++ if (!pg) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("start yaffs_write_begin index %d(%x) uptodate %d\n"),
++ (int)index,(int)index,Page_Uptodate(pg) ? 1 : 0));
++
++ /* Get fs space */
++ space_held = yaffs_hold_space(filp);
++
++ if (!space_held) {
++ ret = -ENOSPC;
++ goto out;
++ }
++
++ /* Update page if required */
++
++ if (!Page_Uptodate(pg))
++ ret = yaffs_readpage_nolock(filp, pg);
++
++ if (ret)
++ goto out;
++
++ /* Happy path return */
++ T(YAFFS_TRACE_OS, (TSTR("end yaffs_write_begin - ok\n")));
++
++ return 0;
++
++out:
++ T(YAFFS_TRACE_OS,
++ (TSTR("end yaffs_write_begin fail returning %d\n"), ret));
++ if (space_held)
++ yaffs_release_space(filp);
++ if (pg) {
++ unlock_page(pg);
++ page_cache_release(pg);
++ }
++ return ret;
++}
++
++#else
++
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++ unsigned offset, unsigned to)
++{
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_prepair_write\n")));
++
++ if (!Page_Uptodate(pg))
++ return yaffs_readpage_nolock(f, pg);
++ return 0;
++}
++#endif
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata)
++{
++ int ret = 0;
++ void *addr, *kva;
++ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
++
++ kva = kmap(pg);
++ addr = kva + offset_into_page;
++
++ T(YAFFS_TRACE_OS,
++ ("yaffs_write_end addr %p pos %x n_bytes %d\n",
++ addr,(unsigned)pos, copied));
++
++ ret = yaffs_file_write(filp, addr, copied, &pos);
++
++ if (ret != copied) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_write_end not same size ret %d copied %d\n"),
++ ret, copied));
++ SetPageError(pg);
++ } else {
++ /* Nothing */
++ }
++
++ kunmap(pg);
++
++ yaffs_release_space(filp);
++ unlock_page(pg);
++ page_cache_release(pg);
++ return ret;
++}
++#else
++
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++ unsigned to)
++{
++ void *addr, *kva;
++
++ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
++ int n_bytes = to - offset;
++ int nWritten;
++
++ unsigned spos = pos;
++ unsigned saddr;
++
++ kva = kmap(pg);
++ addr = kva + offset;
++
++ saddr = (unsigned) addr;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_commit_write addr %x pos %x n_bytes %d\n"),
++ saddr, spos, n_bytes));
++
++ nWritten = yaffs_file_write(f, addr, n_bytes, &pos);
++
++ if (nWritten != n_bytes) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_commit_write not same size nWritten %d n_bytes %d\n"),
++ nWritten, n_bytes));
++ SetPageError(pg);
++ } else {
++ /* Nothing */
++ }
++
++ kunmap(pg);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_commit_write returning %d\n"),
++ nWritten == n_bytes ? 0 : nWritten));
++
++ return nWritten == n_bytes ? 0 : nWritten;
++}
++#endif
++
++
++static void yaffs_fill_inode_from_obj(struct inode *inode, yaffs_obj_t *obj)
++{
++ if (inode && obj) {
++
++
++ /* Check mode against the variant type and attempt to repair if broken. */
++ __u32 mode = obj->yst_mode;
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (!S_ISREG(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFREG;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ if (!S_ISLNK(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFLNK;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if (!S_ISDIR(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFDIR;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ default:
++ /* TODO? */
++ break;
++ }
++
++ inode->i_flags |= S_NOATIME;
++
++ inode->i_ino = obj->obj_id;
++ inode->i_mode = obj->yst_mode;
++ inode->i_uid = obj->yst_uid;
++ inode->i_gid = obj->yst_gid;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++ inode->i_blksize = inode->i_sb->s_blocksize;
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++ inode->i_rdev = old_decode_dev(obj->yst_rdev);
++ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
++ inode->i_atime.tv_nsec = 0;
++ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
++ inode->i_mtime.tv_nsec = 0;
++ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
++ inode->i_ctime.tv_nsec = 0;
++#else
++ inode->i_rdev = obj->yst_rdev;
++ inode->i_atime = obj->yst_atime;
++ inode->i_mtime = obj->yst_mtime;
++ inode->i_ctime = obj->yst_ctime;
++#endif
++ inode->i_size = yaffs_get_obj_length(obj);
++ inode->i_blocks = (inode->i_size + 511) >> 9;
++
++ inode->i_nlink = yaffs_get_obj_link_count(obj);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_fill_inode mode %x uid %d gid %d size %d count %d\n"),
++ inode->i_mode, inode->i_uid, inode->i_gid,
++ (int)inode->i_size, atomic_read(&inode->i_count)));
++
++ switch (obj->yst_mode & S_IFMT) {
++ default: /* fifo, device or socket */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ init_special_inode(inode, obj->yst_mode,
++ old_decode_dev(obj->yst_rdev));
++#else
++ init_special_inode(inode, obj->yst_mode,
++ (dev_t) (obj->yst_rdev));
++#endif
++ break;
++ case S_IFREG: /* file */
++ inode->i_op = &yaffs_file_inode_operations;
++ inode->i_fop = &yaffs_file_operations;
++ inode->i_mapping->a_ops =
++ &yaffs_file_address_operations;
++ break;
++ case S_IFDIR: /* directory */
++ inode->i_op = &yaffs_dir_inode_operations;
++ inode->i_fop = &yaffs_dir_operations;
++ break;
++ case S_IFLNK: /* symlink */
++ inode->i_op = &yaffs_symlink_inode_operations;
++ break;
++ }
++
++ yaffs_InodeToObjectLV(inode) = obj;
++
++ obj->my_inode = inode;
++
++ } else {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_FileInode invalid parameters\n")));
++ }
++
++}
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++ yaffs_obj_t *obj)
++{
++ struct inode *inode;
++
++ if (!sb) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_get_inode for NULL super_block!!\n")));
++ return NULL;
++
++ }
++
++ if (!obj) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_get_inode for NULL object!!\n")));
++ return NULL;
++
++ }
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_get_inode for object %d\n"), obj->obj_id));
++
++ inode = Y_IGET(sb, obj->obj_id);
++ if (IS_ERR(inode))
++ return NULL;
++
++ /* NB Side effect: iget calls back to yaffs_read_inode(). */
++ /* iget also increments the inode's i_count */
++ /* NB You can't be holding grossLock or deadlock will happen! */
++
++ return inode;
++}
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++ loff_t *pos)
++{
++ yaffs_obj_t *obj;
++ int nWritten, ipos;
++ struct inode *inode;
++ yaffs_dev_t *dev;
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ inode = f->f_dentry->d_inode;
++
++ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
++ ipos = inode->i_size;
++ else
++ ipos = *pos;
++
++ if (!obj)
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_write: hey obj is null!\n")));
++ else
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_write about to write writing %u(%x) bytes"
++ "to object %d at %d(%x)\n"),
++ (unsigned) n, (unsigned) n, obj->obj_id, ipos,ipos));
++
++ nWritten = yaffs_wr_file(obj, buf, ipos, n, 0);
++
++ yaffs_touch_super(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_write: %d(%x) bytes written\n"),
++ (unsigned )n,(unsigned)n));
++
++ if (nWritten > 0) {
++ ipos += nWritten;
++ *pos = ipos;
++ if (ipos > inode->i_size) {
++ inode->i_size = ipos;
++ inode->i_blocks = (ipos + 511) >> 9;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_file_write size updated to %d bytes, "
++ "%d blocks\n"),
++ ipos, (int)(inode->i_blocks)));
++ }
++
++ }
++ yaffs_gross_unlock(dev);
++ return (nWritten == 0) && (n > 0) ? -ENOSPC : nWritten;
++}
++
++/* Space holding and freeing is done to ensure we have space available for write_begin/end */
++/* For now we just assume few parallel writes and check against a small number. */
++/* Todo: need to do this with a counter to handle parallel reads better */
++
++static ssize_t yaffs_hold_space(struct file *f)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++
++ int n_free_chunks;
++
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ n_free_chunks = yaffs_get_n_free_chunks(dev);
++
++ yaffs_gross_unlock(dev);
++
++ return (n_free_chunks > 20) ? 1 : 0;
++}
++
++static void yaffs_release_space(struct file *f)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++
++ yaffs_gross_unlock(dev);
++}
++
++
++static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ long long retval;
++
++ lock_kernel();
++
++ switch (origin){
++ case 2:
++ offset += i_size_read(file->f_path.dentry->d_inode);
++ break;
++ case 1:
++ offset += file->f_pos;
++ }
++ retval = -EINVAL;
++
++ if (offset >= 0){
++ if (offset != file->f_pos)
++ file->f_pos = offset;
++
++ retval = offset;
++ }
++ unlock_kernel();
++ return retval;
++}
++
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++ struct yaffs_SearchContext *sc;
++ struct inode *inode = f->f_dentry->d_inode;
++ unsigned long offset, curoffs;
++ yaffs_obj_t *l;
++ int retVal = 0;
++
++ char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ yaffs_dev_to_lc(dev)->readdirProcess = current;
++
++ offset = f->f_pos;
++
++ sc = yaffs_NewSearch(obj);
++ if(!sc){
++ retVal = -ENOMEM;
++ goto out;
++ }
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_readdir: starting at %d\n"), (int)offset));
++
++ if (offset == 0) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_readdir: entry . ino %d \n"),
++ (int)inode->i_ino));
++ yaffs_gross_unlock(dev);
++ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0){
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ f->f_pos++;
++ }
++ if (offset == 1) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_readdir: entry .. ino %d \n"),
++ (int)f->f_dentry->d_parent->d_inode->i_ino));
++ yaffs_gross_unlock(dev);
++ if (filldir(dirent, "..", 2, offset,
++ f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0){
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ f->f_pos++;
++ }
++
++ curoffs = 1;
++
++ /* If the directory has changed since the open or last call to
++ readdir, rewind to after the 2 canned entries. */
++ if (f->f_version != inode->i_version) {
++ offset = 2;
++ f->f_pos = offset;
++ f->f_version = inode->i_version;
++ }
++
++ while(sc->nextReturn){
++ curoffs++;
++ l = sc->nextReturn;
++ if (curoffs >= offset) {
++ int this_inode = yaffs_get_obj_inode(l);
++ int this_type = yaffs_get_obj_type(l);
++
++ yaffs_get_obj_name(l, name,
++ YAFFS_MAX_NAME_LENGTH + 1);
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_readdir: %s inode %d\n"),
++ name, yaffs_get_obj_inode(l)));
++
++ yaffs_gross_unlock(dev);
++
++ if (filldir(dirent,
++ name,
++ strlen(name),
++ offset,
++ this_inode,
++ this_type) < 0){
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++
++ yaffs_gross_lock(dev);
++
++ offset++;
++ f->f_pos++;
++ }
++ yaffs_search_advance(sc);
++ }
++
++out:
++ yaffs_search_end(sc);
++ yaffs_dev_to_lc(dev)->readdirProcess = NULL;
++ yaffs_gross_unlock(dev);
++
++ return retVal;
++}
++
++
++
++/*
++ * File creation. Allocate an inode, and we're done..
++ */
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++#define YCRED(x) x
++#else
++#define YCRED(x) (x->cred)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ dev_t rdev)
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ int rdev)
++#endif
++{
++ struct inode *inode;
++
++ yaffs_obj_t *obj = NULL;
++ yaffs_dev_t *dev;
++
++ yaffs_obj_t *parent = yaffs_InodeToObject(dir);
++
++ int error = -ENOSPC;
++ uid_t uid = YCRED(current)->fsuid;
++ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
++
++ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++ mode |= S_ISGID;
++
++ if (parent) {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod: parent object %d type %d\n"),
++ parent->obj_id, parent->variant_type));
++ } else {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod: could not get parent object\n")));
++ return -EPERM;
++ }
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making oject for %s, "
++ "mode %x dev %x\n"),
++ dentry->d_name.name, mode, rdev));
++
++ dev = parent->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ switch (mode & S_IFMT) {
++ default:
++ /* Special (socket, fifo, device...) */
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making special\n")));
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ obj = yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++ gid, old_encode_dev(rdev));
++#else
++ obj = yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++ gid, rdev);
++#endif
++ break;
++ case S_IFREG: /* file */
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making file\n")));
++ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
++ gid);
++ break;
++ case S_IFDIR: /* directory */
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod: making directory\n")));
++ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
++ uid, gid);
++ break;
++ case S_IFLNK: /* symlink */
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mknod: making symlink\n")));
++ obj = NULL; /* Do we ever get here? */
++ break;
++ }
++
++ /* Can not call yaffs_get_inode() with gross lock held */
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
++ d_instantiate(dentry, inode);
++ update_dir_time(dir);
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod created object %d count = %d\n"),
++ obj->obj_id, atomic_read(&inode->i_count)));
++ error = 0;
++ yaffs_fill_inode_from_obj(dir,parent);
++ } else {
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_mknod failed making object\n")));
++ error = -ENOMEM;
++ }
++
++ return error;
++}
++
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++ int retVal;
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_mkdir\n")));
++ retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
++ return retVal;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++ struct nameidata *n)
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++ T(YAFFS_TRACE_OS,(TSTR("yaffs_create\n")));
++ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
++}
++
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
++{
++ int retVal;
++
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_unlink %d:%s\n"),
++ (int)(dir->i_ino),
++ dentry->d_name.name));
++ obj = yaffs_InodeToObject(dir);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ retVal = yaffs_unlinker(obj, dentry->d_name.name);
++
++ if (retVal == YAFFS_OK) {
++ dentry->d_inode->i_nlink--;
++ dir->i_version++;
++ yaffs_gross_unlock(dev);
++ mark_inode_dirty(dentry->d_inode);
++ update_dir_time(dir);
++ return 0;
++ }
++ yaffs_gross_unlock(dev);
++ return -ENOTEMPTY;
++}
++
++/*
++ * Create a link...
++ */
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++ struct dentry *dentry)
++{
++ struct inode *inode = old_dentry->d_inode;
++ yaffs_obj_t *obj = NULL;
++ yaffs_obj_t *link = NULL;
++ yaffs_dev_t *dev;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_link\n")));
++
++ obj = yaffs_InodeToObject(inode);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
++ link = yaffs_link_obj(yaffs_InodeToObject(dir), dentry->d_name.name,
++ obj);
++
++ if (link) {
++ old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj);
++ d_instantiate(dentry, old_dentry->d_inode);
++ atomic_inc(&old_dentry->d_inode->i_count);
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_link link count %d i_count %d\n"),
++ old_dentry->d_inode->i_nlink,
++ atomic_read(&old_dentry->d_inode->i_count)));
++ }
++
++ yaffs_gross_unlock(dev);
++
++ if (link){
++ update_dir_time(dir);
++ return 0;
++ }
++
++ return -EPERM;
++}
++
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++ const char *symname)
++{
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++ uid_t uid = YCRED(current)->fsuid;
++ gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_symlink\n")));
++
++ dev = yaffs_InodeToObject(dir)->my_dev;
++ yaffs_gross_lock(dev);
++ obj = yaffs_create_symlink(yaffs_InodeToObject(dir), dentry->d_name.name,
++ S_IFLNK | S_IRWXUGO, uid, gid, symname);
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ struct inode *inode;
++
++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++ d_instantiate(dentry, inode);
++ update_dir_time(dir);
++ T(YAFFS_TRACE_OS, (TSTR("symlink created OK\n")));
++ return 0;
++ } else {
++ T(YAFFS_TRACE_OS, (TSTR("symlink not created\n")));
++ }
++
++ return -ENOMEM;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++static int yaffs_sync_object(struct file *file, int datasync)
++#else
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++ int datasync)
++#endif
++{
++
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++ struct dentry *dentry = file->f_path.dentry;
++#endif
++
++ obj = yaffs_dentry_to_obj(dentry);
++
++ dev = obj->my_dev;
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++ (TSTR("yaffs_sync_object\n")));
++ yaffs_gross_lock(dev);
++ yaffs_flush_file(obj, 1, datasync);
++ yaffs_gross_unlock(dev);
++ return 0;
++}
++
++/*
++ * The VFS layer already does all the dentry stuff for rename.
++ *
++ * NB: POSIX says you can rename an object over an old object of the same name
++ */
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry)
++{
++ yaffs_dev_t *dev;
++ int retVal = YAFFS_FAIL;
++ yaffs_obj_t *target;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_rename\n")));
++ dev = yaffs_InodeToObject(old_dir)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ /* Check if the target is an existing directory that is not empty. */
++ target = yaffs_find_by_name(yaffs_InodeToObject(new_dir),
++ new_dentry->d_name.name);
++
++
++
++ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !ylist_empty(&target->variant.dir_variant.children)) {
++
++ T(YAFFS_TRACE_OS, (TSTR("target is non-empty dir\n")));
++
++ retVal = YAFFS_FAIL;
++ } else {
++ /* Now does unlinking internally using shadowing mechanism */
++ T(YAFFS_TRACE_OS, (TSTR("calling yaffs_rename_obj\n")));
++
++ retVal = yaffs_rename_obj(yaffs_InodeToObject(old_dir),
++ old_dentry->d_name.name,
++ yaffs_InodeToObject(new_dir),
++ new_dentry->d_name.name);
++ }
++ yaffs_gross_unlock(dev);
++
++ if (retVal == YAFFS_OK) {
++ if (target) {
++ new_dentry->d_inode->i_nlink--;
++ mark_inode_dirty(new_dentry->d_inode);
++ }
++
++ update_dir_time(old_dir);
++ if(old_dir != new_dir)
++ update_dir_time(new_dir);
++ return 0;
++ } else {
++ return -ENOTEMPTY;
++ }
++}
++
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_setattr of object %d\n"),
++ yaffs_InodeToObject(inode)->obj_id));
++
++ /* Fail if a requested resize >= 2GB */
++ if (attr->ia_valid & ATTR_SIZE &&
++ (attr->ia_size >> 31))
++ error = -EINVAL;
++
++ if (error == 0)
++ error = inode_change_ok(inode, attr);
++ if (error == 0) {
++ int result;
++ if (!error){
++ error = yaffs_vfs_setattr(inode, attr);
++ T(YAFFS_TRACE_OS,(TSTR("inode_setattr called\n")));
++ if (attr->ia_valid & ATTR_SIZE){
++ yaffs_vfs_setsize(inode,attr->ia_size);
++ inode->i_blocks = (inode->i_size + 511) >> 9;
++ }
++ }
++ dev = yaffs_InodeToObject(inode)->my_dev;
++ if (attr->ia_valid & ATTR_SIZE){
++ T(YAFFS_TRACE_OS,(TSTR("resize to %d(%x)\n"),
++ (int)(attr->ia_size),(int)(attr->ia_size)));
++ }
++ yaffs_gross_lock(dev);
++ result = yaffs_set_attribs(yaffs_InodeToObject(inode), attr);
++ if(result == YAFFS_OK) {
++ error = 0;
++ } else {
++ error = -EPERM;
++ }
++ yaffs_gross_unlock(dev);
++
++ }
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_setattr done returning %d\n"),error));
++
++ return error;
++}
++
++#ifdef CONFIG_YAFFS_XATTR
++int yaffs_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_setxattr of object %d\n"),
++ obj->obj_id));
++
++
++ if (error == 0) {
++ int result;
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ result = yaffs_set_xattrib(obj, name, value, size, flags);
++ if(result == YAFFS_OK)
++ error = 0;
++ else if(result < 0)
++ error = result;
++ yaffs_gross_unlock(dev);
++
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_setxattr done returning %d\n"),error));
++
++ return error;
++}
++
++
++ssize_t yaffs_getxattr(struct dentry *dentry, const char *name, void *buff,
++ size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_getxattr \"%s\" from object %d\n"),
++ name, obj->obj_id));
++
++ if (error == 0) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ error = yaffs_get_xattrib(obj, name, buff, size);
++ yaffs_gross_unlock(dev);
++
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_getxattr done returning %d\n"),error));
++
++ return error;
++}
++
++int yaffs_removexattr(struct dentry *dentry, const char *name)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_removexattr of object %d\n"),
++ obj->obj_id));
++
++
++ if (error == 0) {
++ int result;
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ result = yaffs_remove_xattrib(obj, name);
++ if(result == YAFFS_OK)
++ error = 0;
++ else if(result < 0)
++ error = result;
++ yaffs_gross_unlock(dev);
++
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_removexattr done returning %d\n"),error));
++
++ return error;
++}
++
++ssize_t yaffs_listxattr(struct dentry *dentry, char *buff, size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ yaffs_dev_t *dev;
++ yaffs_obj_t *obj = yaffs_InodeToObject(inode);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_listxattr of object %d\n"),
++ obj->obj_id));
++
++
++ if (error == 0) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ error = yaffs_list_xattrib(obj, buff, size);
++ yaffs_gross_unlock(dev);
++
++ }
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_listxattr done returning %d\n"),error));
++
++ return error;
++}
++
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++ yaffs_dev_t *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++ struct super_block *sb = dentry->d_sb;
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
++{
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++#endif
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_statfs\n")));
++
++ yaffs_gross_lock(dev);
++
++ buf->f_type = YAFFS_MAGIC;
++ buf->f_bsize = sb->s_blocksize;
++ buf->f_namelen = 255;
++
++ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
++ /* Do this if chunk size is not a power of 2 */
++
++ uint64_t bytesInDev;
++ uint64_t bytesFree;
++
++ bytesInDev = ((uint64_t)((dev->param.end_block - dev->param.start_block + 1))) *
++ ((uint64_t)(dev->param.chunks_per_block * dev->data_bytes_per_chunk));
++
++ do_div(bytesInDev, sb->s_blocksize); /* bytesInDev becomes the number of blocks */
++ buf->f_blocks = bytesInDev;
++
++ bytesFree = ((uint64_t)(yaffs_get_n_free_chunks(dev))) *
++ ((uint64_t)(dev->data_bytes_per_chunk));
++
++ do_div(bytesFree, sb->s_blocksize);
++
++ buf->f_bfree = bytesFree;
++
++ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
++
++ buf->f_blocks =
++ (dev->param.end_block - dev->param.start_block + 1) *
++ dev->param.chunks_per_block /
++ (sb->s_blocksize / dev->data_bytes_per_chunk);
++ buf->f_bfree =
++ yaffs_get_n_free_chunks(dev) /
++ (sb->s_blocksize / dev->data_bytes_per_chunk);
++ } else {
++ buf->f_blocks =
++ (dev->param.end_block - dev->param.start_block + 1) *
++ dev->param.chunks_per_block *
++ (dev->data_bytes_per_chunk / sb->s_blocksize);
++
++ buf->f_bfree =
++ yaffs_get_n_free_chunks(dev) *
++ (dev->data_bytes_per_chunk / sb->s_blocksize);
++ }
++
++ buf->f_files = 0;
++ buf->f_ffree = 0;
++ buf->f_bavail = buf->f_bfree;
++
++ yaffs_gross_unlock(dev);
++ return 0;
++}
++
++
++
++static void yaffs_flush_inodes(struct super_block *sb)
++{
++ struct inode *iptr;
++ yaffs_obj_t *obj;
++
++ list_for_each_entry(iptr,&sb->s_inodes, i_sb_list){
++ obj = yaffs_InodeToObject(iptr);
++ if(obj){
++ T(YAFFS_TRACE_OS, (TSTR("flushing obj %d\n"),
++ obj->obj_id));
++ yaffs_flush_file(obj,1,0);
++ }
++ }
++}
++
++
++static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
++{
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++ if(!dev)
++ return;
++
++ yaffs_flush_inodes(sb);
++ yaffs_update_dirty_dirs(dev);
++ yaffs_flush_whole_cache(dev);
++ if(do_checkpoint)
++ yaffs_checkpoint_save(dev);
++}
++
++
++static unsigned yaffs_bg_gc_urgency(yaffs_dev_t *dev)
++{
++ unsigned erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
++ struct yaffs_LinuxContext *context = yaffs_dev_to_lc(dev);
++ unsigned scatteredFree = 0; /* Free chunks not in an erased block */
++
++ if(erasedChunks < dev->n_free_chunks)
++ scatteredFree = (dev->n_free_chunks - erasedChunks);
++
++ if(!context->bgRunning)
++ return 0;
++ else if(scatteredFree < (dev->param.chunks_per_block * 2))
++ return 0;
++ else if(erasedChunks > dev->n_free_chunks/2)
++ return 0;
++ else if(erasedChunks > dev->n_free_chunks/4)
++ return 1;
++ else
++ return 2;
++}
++
++static int yaffs_do_sync_fs(struct super_block *sb,
++ int request_checkpoint)
++{
++
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
++ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
++ int do_checkpoint;
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs_do_sync_fs: gc-urgency %d %s %s%s\n"),
++ gc_urgent,
++ sb->s_dirt ? "dirty" : "clean",
++ request_checkpoint ? "checkpoint requested" : "no checkpoint",
++ oneshot_checkpoint ? " one-shot" : "" ));
++
++ yaffs_gross_lock(dev);
++ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
++ oneshot_checkpoint) &&
++ !dev->is_checkpointed;
++
++ if (sb->s_dirt || do_checkpoint) {
++ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
++ sb->s_dirt = 0;
++ if(oneshot_checkpoint)
++ yaffs_auto_checkpoint &= ~4;
++ }
++ yaffs_gross_unlock(dev);
++
++ return 0;
++}
++
++/*
++ * yaffs background thread functions .
++ * yaffs_bg_thread_fn() the thread function
++ * yaffs_bg_start() launches the background thread.
++ * yaffs_bg_stop() cleans up the background thread.
++ *
++ * NB:
++ * The thread should only run after the yaffs is initialised
++ * The thread should be stopped before yaffs is unmounted.
++ * The thread should not do any writing while the fs is in read only.
++ */
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++
++void yaffs_background_waker(unsigned long data)
++{
++ wake_up_process((struct task_struct *)data);
++}
++
++static int yaffs_bg_thread_fn(void *data)
++{
++ yaffs_dev_t *dev = (yaffs_dev_t *)data;
++ struct yaffs_LinuxContext *context = yaffs_dev_to_lc(dev);
++ unsigned long now = jiffies;
++ unsigned long next_dir_update = now;
++ unsigned long next_gc = now;
++ unsigned long expires;
++ unsigned int urgency;
++
++ int gcResult;
++ struct timer_list timer;
++
++ T(YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs_background starting for dev %p\n"),
++ (void *)dev));
++
++#ifdef YAFFS_COMPILE_FREEZER
++ set_freezable();
++#endif
++ while(context->bgRunning){
++ T(YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs_background\n")));
++
++ if(kthread_should_stop())
++ break;
++
++#ifdef YAFFS_COMPILE_FREEZER
++ if(try_to_freeze())
++ continue;
++#endif
++ yaffs_gross_lock(dev);
++
++ now = jiffies;
++
++ if(time_after(now, next_dir_update) && yaffs_bg_enable){
++ yaffs_update_dirty_dirs(dev);
++ next_dir_update = now + HZ;
++ }
++
++ if(time_after(now,next_gc) && yaffs_bg_enable){
++ if(!dev->is_checkpointed){
++ urgency = yaffs_bg_gc_urgency(dev);
++ gcResult = yaffs_bg_gc(dev, urgency);
++ if(urgency > 1)
++ next_gc = now + HZ/20+1;
++ else if(urgency > 0)
++ next_gc = now + HZ/10+1;
++ else
++ next_gc = now + HZ * 2;
++ } else /*
++ * gc not running so set to next_dir_update
++ * to cut down on wake ups
++ */
++ next_gc = next_dir_update;
++ }
++ yaffs_gross_unlock(dev);
++#if 1
++ expires = next_dir_update;
++ if (time_before(next_gc,expires))
++ expires = next_gc;
++ if(time_before(expires,now))
++ expires = now + HZ;
++
++ Y_INIT_TIMER(&timer);
++ timer.expires = expires+1;
++ timer.data = (unsigned long) current;
++ timer.function = yaffs_background_waker;
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ add_timer(&timer);
++ schedule();
++ del_timer_sync(&timer);
++#else
++ msleep(10);
++#endif
++ }
++
++ return 0;
++}
++
++static int yaffs_bg_start(yaffs_dev_t *dev)
++{
++ int retval = 0;
++ struct yaffs_LinuxContext *context = yaffs_dev_to_lc(dev);
++
++ if(dev->read_only)
++ return -1;
++
++ context->bgRunning = 1;
++
++ context->bgThread = kthread_run(yaffs_bg_thread_fn,
++ (void *)dev,"yaffs-bg-%d",context->mount_id);
++
++ if(IS_ERR(context->bgThread)){
++ retval = PTR_ERR(context->bgThread);
++ context->bgThread = NULL;
++ context->bgRunning = 0;
++ }
++ return retval;
++}
++
++static void yaffs_bg_stop(yaffs_dev_t *dev)
++{
++ struct yaffs_LinuxContext *ctxt = yaffs_dev_to_lc(dev);
++
++ ctxt->bgRunning = 0;
++
++ if( ctxt->bgThread){
++ kthread_stop(ctxt->bgThread);
++ ctxt->bgThread = NULL;
++ }
++}
++#else
++static int yaffs_bg_thread_fn(void *data)
++{
++ return 0;
++}
++
++static int yaffs_bg_start(yaffs_dev_t *dev)
++{
++ return 0;
++}
++
++static void yaffs_bg_stop(yaffs_dev_t *dev)
++{
++}
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static void yaffs_write_super(struct super_block *sb)
++#else
++static int yaffs_write_super(struct super_block *sb)
++#endif
++{
++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs_write_super%s\n"),
++ request_checkpoint ? " checkpt" : ""));
++
++ yaffs_do_sync_fs(sb, request_checkpoint);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++ return 0;
++#endif
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait)
++#else
++static int yaffs_sync_fs(struct super_block *sb)
++#endif
++{
++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++ (TSTR("yaffs_sync_fs%s\n"),
++ request_checkpoint ? " checkpt" : ""));
++
++ yaffs_do_sync_fs(sb, request_checkpoint);
++
++ return 0;
++}
++
++#ifdef YAFFS_USE_OWN_IGET
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
++{
++ struct inode *inode;
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_iget for %lu\n"), ino));
++
++ inode = iget_locked(sb, ino);
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++ if (!(inode->i_state & I_NEW))
++ return inode;
++
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
++
++ yaffs_gross_lock(dev);
++
++ obj = yaffs_find_by_number(dev, inode->i_ino);
++
++ yaffs_fill_inode_from_obj(inode, obj);
++
++ yaffs_gross_unlock(dev);
++
++ unlock_new_inode(inode);
++ return inode;
++}
++
++#else
++
++static void yaffs_read_inode(struct inode *inode)
++{
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
++
++ yaffs_obj_t *obj;
++ yaffs_dev_t *dev = yaffs_SuperToDevice(inode->i_sb);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_read_inode for %d\n"), (int)inode->i_ino));
++
++ if(current != yaffs_dev_to_lc(dev)->readdirProcess)
++ yaffs_gross_lock(dev);
++
++ obj = yaffs_find_by_number(dev, inode->i_ino);
++
++ yaffs_fill_inode_from_obj(inode, obj);
++
++ if(current != yaffs_dev_to_lc(dev)->readdirProcess)
++ yaffs_gross_unlock(dev);
++}
++
++#endif
++
++static YLIST_HEAD(yaffs_context_list);
++struct semaphore yaffs_context_lock;
++
++static void yaffs_put_super(struct super_block *sb)
++{
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_put_super\n")));
++
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++ (TSTR("Shutting down yaffs background thread\n")));
++ yaffs_bg_stop(dev);
++ T(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++ (TSTR("yaffs background thread shut down\n")));
++
++ yaffs_gross_lock(dev);
++
++ yaffs_flush_super(sb,1);
++
++ if (yaffs_dev_to_lc(dev)->putSuperFunc)
++ yaffs_dev_to_lc(dev)->putSuperFunc(sb);
++
++
++ yaffs_deinitialise(dev);
++
++ yaffs_gross_unlock(dev);
++
++ down(&yaffs_context_lock);
++ ylist_del_init(&(yaffs_dev_to_lc(dev)->contextList));
++ up(&yaffs_context_lock);
++
++ if (yaffs_dev_to_lc(dev)->spareBuffer) {
++ YFREE(yaffs_dev_to_lc(dev)->spareBuffer);
++ yaffs_dev_to_lc(dev)->spareBuffer = NULL;
++ }
++
++ kfree(dev);
++}
++
++
++static void yaffs_MTDPutSuper(struct super_block *sb)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(yaffs_SuperToDevice(sb));
++
++ if (mtd->sync)
++ mtd->sync(mtd);
++
++ put_mtd_device(mtd);
++}
++
++
++static void yaffs_touch_super(yaffs_dev_t *dev)
++{
++ struct super_block *sb = yaffs_dev_to_lc(dev)->superBlock;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_touch_super() sb = %p\n"), sb));
++ if (sb)
++ sb->s_dirt = 1;
++}
++
++typedef struct {
++ int inband_tags;
++ int skip_checkpoint_read;
++ int skip_checkpoint_write;
++ int no_cache;
++ int tags_ecc_on;
++ int tags_ecc_overridden;
++ int lazy_loading_enabled;
++ int lazy_loading_overridden;
++ int empty_lost_and_found;
++ int empty_lost_and_found_overridden;
++} yaffs_options;
++
++#define MAX_OPT_LEN 30
++static int yaffs_parse_options(yaffs_options *options, const char *options_str)
++{
++ char cur_opt[MAX_OPT_LEN + 1];
++ int p;
++ int error = 0;
++
++ /* Parse through the options which is a comma seperated list */
++
++ while (options_str && *options_str && !error) {
++ memset(cur_opt, 0, MAX_OPT_LEN + 1);
++ p = 0;
++
++ while(*options_str == ',')
++ options_str++;
++
++ while (*options_str && *options_str != ',') {
++ if (p < MAX_OPT_LEN) {
++ cur_opt[p] = *options_str;
++ p++;
++ }
++ options_str++;
++ }
++
++ if (!strcmp(cur_opt, "inband-tags"))
++ options->inband_tags = 1;
++ else if (!strcmp(cur_opt, "tags-ecc-off")){
++ options->tags_ecc_on = 0;
++ options->tags_ecc_overridden=1;
++ } else if (!strcmp(cur_opt, "tags-ecc-on")){
++ options->tags_ecc_on = 1;
++ options->tags_ecc_overridden = 1;
++ } else if (!strcmp(cur_opt, "lazy-loading-off")){
++ options->lazy_loading_enabled = 0;
++ options->lazy_loading_overridden=1;
++ } else if (!strcmp(cur_opt, "lazy-loading-on")){
++ options->lazy_loading_enabled = 1;
++ options->lazy_loading_overridden = 1;
++ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")){
++ options->empty_lost_and_found = 0;
++ options->empty_lost_and_found_overridden=1;
++ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")){
++ options->empty_lost_and_found = 1;
++ options->empty_lost_and_found_overridden=1;
++ } else if (!strcmp(cur_opt, "no-cache"))
++ options->no_cache = 1;
++ else if (!strcmp(cur_opt, "no-checkpoint-read"))
++ options->skip_checkpoint_read = 1;
++ else if (!strcmp(cur_opt, "no-checkpoint-write"))
++ options->skip_checkpoint_write = 1;
++ else if (!strcmp(cur_opt, "no-checkpoint")) {
++ options->skip_checkpoint_read = 1;
++ options->skip_checkpoint_write = 1;
++ } else {
++ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
++ cur_opt);
++ error = 1;
++ }
++ }
++
++ return error;
++}
++
++static struct super_block *yaffs_internal_read_super(int yaffs_version,
++ struct super_block *sb,
++ void *data, int silent)
++{
++ int nBlocks;
++ struct inode *inode = NULL;
++ struct dentry *root;
++ yaffs_dev_t *dev = 0;
++ char devname_buf[BDEVNAME_SIZE + 1];
++ struct mtd_info *mtd;
++ int err;
++ char *data_str = (char *)data;
++ struct yaffs_LinuxContext *context = NULL;
++ yaffs_param_t *param;
++
++ int read_only = 0;
++
++ yaffs_options options;
++
++ unsigned mount_id;
++ int found;
++ struct yaffs_LinuxContext *context_iterator;
++ struct ylist_head *l;
++
++ sb->s_magic = YAFFS_MAGIC;
++ sb->s_op = &yaffs_super_ops;
++ sb->s_flags |= MS_NOATIME;
++
++ read_only =((sb->s_flags & MS_RDONLY) != 0);
++
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++ sb->s_export_op = &yaffs_export_ops;
++#endif
++
++ if (!sb)
++ printk(KERN_INFO "yaffs: sb is NULL\n");
++ else if (!sb->s_dev)
++ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
++ else if (!yaffs_devname(sb, devname_buf))
++ printk(KERN_INFO "yaffs: devname is NULL\n");
++ else
++ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
++ sb->s_dev,
++ yaffs_devname(sb, devname_buf),
++ read_only ? "ro" : "rw");
++
++ if (!data_str)
++ data_str = "";
++
++ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
++
++ memset(&options, 0, sizeof(options));
++
++ if (yaffs_parse_options(&options, data_str)) {
++ /* Option parsing failed */
++ return NULL;
++ }
++
++
++ sb->s_blocksize = PAGE_CACHE_SIZE;
++ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_read_super: Using yaffs%d\n"), yaffs_version));
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_read_super: block size %d\n"),
++ (int)(sb->s_blocksize)));
++
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: Attempting MTD mount of %u.%u,\"%s\"\n"),
++ MAJOR(sb->s_dev), MINOR(sb->s_dev),
++ yaffs_devname(sb, devname_buf)));
++
++ /* Check it's an mtd device..... */
++ if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
++ return NULL; /* This isn't an mtd device */
++
++ /* Get the device */
++ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
++ if (!mtd) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device #%u doesn't appear to exist\n"),
++ MINOR(sb->s_dev)));
++ return NULL;
++ }
++ /* Check it's NAND */
++ if (mtd->type != MTD_NANDFLASH) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device is not NAND it's type %d\n"),
++ mtd->type));
++ return NULL;
++ }
++
++ T(YAFFS_TRACE_OS, (TSTR(" erase %p\n"), mtd->erase));
++ T(YAFFS_TRACE_OS, (TSTR(" read %p\n"), mtd->read));
++ T(YAFFS_TRACE_OS, (TSTR(" write %p\n"), mtd->write));
++ T(YAFFS_TRACE_OS, (TSTR(" readoob %p\n"), mtd->read_oob));
++ T(YAFFS_TRACE_OS, (TSTR(" writeoob %p\n"), mtd->write_oob));
++ T(YAFFS_TRACE_OS, (TSTR(" block_isbad %p\n"), mtd->block_isbad));
++ T(YAFFS_TRACE_OS, (TSTR(" block_markbad %p\n"), mtd->block_markbad));
++ T(YAFFS_TRACE_OS, (TSTR(" %s %d\n"), WRITE_SIZE_STR, WRITE_SIZE(mtd)));
++ T(YAFFS_TRACE_OS, (TSTR(" oobsize %d\n"), mtd->oobsize));
++ T(YAFFS_TRACE_OS, (TSTR(" erasesize %d\n"), mtd->erasesize));
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++ T(YAFFS_TRACE_OS, (TSTR(" size %u\n"), mtd->size));
++#else
++ T(YAFFS_TRACE_OS, (TSTR(" size %lld\n"), mtd->size));
++#endif
++
++#ifdef CONFIG_YAFFS_AUTO_YAFFS2
++
++ if (yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: auto selecting yaffs2\n")));
++ yaffs_version = 2;
++ }
++
++ /* Added NCB 26/5/2006 for completeness */
++ if (yaffs_version == 2 && !options.inband_tags && WRITE_SIZE(mtd) == 512) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: auto selecting yaffs1\n")));
++ yaffs_version = 1;
++ }
++
++#endif
++
++ if (yaffs_version == 2) {
++ /* Check for version 2 style functions */
++ if (!mtd->erase ||
++ !mtd->block_isbad ||
++ !mtd->block_markbad ||
++ !mtd->read ||
++ !mtd->write ||
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ !mtd->read_oob || !mtd->write_oob) {
++#else
++ !mtd->write_ecc ||
++ !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
++#endif
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device does not support required "
++ "functions\n")));
++ return NULL;
++ }
++
++ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
++ !options.inband_tags) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device does not have the "
++ "right page sizes\n")));
++ return NULL;
++ }
++ } else {
++ /* Check for V1 style functions */
++ if (!mtd->erase ||
++ !mtd->read ||
++ !mtd->write ||
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ !mtd->read_oob || !mtd->write_oob) {
++#else
++ !mtd->write_ecc ||
++ !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
++#endif
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device does not support required "
++ "functions\n")));
++ return NULL;
++ }
++
++ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
++ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs: MTD device does not support have the "
++ "right page sizes\n")));
++ return NULL;
++ }
++ }
++
++ /* OK, so if we got here, we have an MTD that's NAND and looks
++ * like it has the right capabilities
++ * Set the yaffs_dev_t up for mtd
++ */
++
++ if (!read_only && !(mtd->flags & MTD_WRITEABLE)){
++ read_only = 1;
++ printk(KERN_INFO "yaffs: mtd is read only, setting superblock read only");
++ sb->s_flags |= MS_RDONLY;
++ }
++
++ dev = kmalloc(sizeof(yaffs_dev_t), GFP_KERNEL);
++ context = kmalloc(sizeof(struct yaffs_LinuxContext),GFP_KERNEL);
++
++ if(!dev || !context ){
++ if(dev)
++ kfree(dev);
++ if(context)
++ kfree(context);
++ dev = NULL;
++ context = NULL;
++ }
++
++ if (!dev) {
++ /* Deep shit could not allocate device structure */
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs_read_super: Failed trying to allocate "
++ "yaffs_dev_t. \n")));
++ return NULL;
++ }
++ memset(dev, 0, sizeof(yaffs_dev_t));
++ param = &(dev->param);
++
++ memset(context,0,sizeof(struct yaffs_LinuxContext));
++ dev->os_context = context;
++ YINIT_LIST_HEAD(&(context->contextList));
++ context->dev = dev;
++ context->superBlock = sb;
++
++ dev->read_only = read_only;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ sb->s_fs_info = dev;
++#else
++ sb->u.generic_sbp = dev;
++#endif
++
++ dev->driver_context = mtd;
++ param->name = mtd->name;
++
++ /* Set up the memory size parameters.... */
++
++ nBlocks = YCALCBLOCKS(mtd->size, (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
++
++ param->start_block = 0;
++ param->end_block = nBlocks - 1;
++ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
++ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
++ param->n_reserved_blocks = 5;
++ param->n_caches = (options.no_cache) ? 0 : 10;
++ param->inband_tags = options.inband_tags;
++
++#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
++ param->disable_lazy_load = 1;
++#endif
++#ifdef CONFIG_YAFFS_XATTR
++ param->enable_xattr = 1;
++#endif
++ if(options.lazy_loading_overridden)
++ param->disable_lazy_load = !options.lazy_loading_enabled;
++
++#ifdef CONFIG_YAFFS_DISABLE_TAGS_ECC
++ param->no_tags_ecc = 1;
++#endif
++
++#ifdef CONFIG_YAFFS_DISABLE_BACKGROUND
++#else
++ param->defered_dir_update = 1;
++#endif
++
++ if(options.tags_ecc_overridden)
++ param->no_tags_ecc = !options.tags_ecc_on;
++
++#ifdef CONFIG_YAFFS_EMPTY_LOST_AND_FOUND
++ param->empty_lost_n_found = 1;
++#endif
++
++#ifdef CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING
++ param->refresh_period = 0;
++#else
++ param->refresh_period = 500;
++#endif
++
++#ifdef CONFIG_YAFFS__ALWAYS_CHECK_CHUNK_ERASED
++ param->always_check_erased = 1;
++#endif
++
++ if(options.empty_lost_and_found_overridden)
++ param->empty_lost_n_found = options.empty_lost_and_found;
++
++ /* ... and the functions. */
++ if (yaffs_version == 2) {
++ param->write_chunk_tags_fn =
++ nandmtd2_WriteChunkWithTagsToNAND;
++ param->read_chunk_tags_fn =
++ nandmtd2_ReadChunkWithTagsFromNAND;
++ param->bad_block_fn = nandmtd2_MarkNANDBlockBad;
++ param->query_block_fn = nandmtd2_QueryNANDBlock;
++ yaffs_dev_to_lc(dev)->spareBuffer = YMALLOC(mtd->oobsize);
++ param->is_yaffs2 = 1;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ param->total_bytes_per_chunk = mtd->writesize;
++ param->chunks_per_block = mtd->erasesize / mtd->writesize;
++#else
++ param->total_bytes_per_chunk = mtd->oobblock;
++ param->chunks_per_block = mtd->erasesize / mtd->oobblock;
++#endif
++ nBlocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
++
++ param->start_block = 0;
++ param->end_block = nBlocks - 1;
++ } else {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ /* use the MTD interface in yaffs_mtdif1.c */
++ param->write_chunk_tags_fn =
++ nandmtd1_WriteChunkWithTagsToNAND;
++ param->read_chunk_tags_fn =
++ nandmtd1_ReadChunkWithTagsFromNAND;
++ param->bad_block_fn = nandmtd1_MarkNANDBlockBad;
++ param->query_block_fn = nandmtd1_QueryNANDBlock;
++#else
++ param->write_chunk_fn = nandmtd_WriteChunkToNAND;
++ param->read_chunk_fn = nandmtd_ReadChunkFromNAND;
++#endif
++ param->is_yaffs2 = 0;
++ }
++ /* ... and common functions */
++ param->erase_fn = nandmtd_EraseBlockInNAND;
++ param->initialise_flash_fn = nandmtd_InitialiseNAND;
++
++ yaffs_dev_to_lc(dev)->putSuperFunc = yaffs_MTDPutSuper;
++
++ param->sb_dirty_fn = yaffs_touch_super;
++ param->gc_control = yaffs_gc_control_callback;
++
++ yaffs_dev_to_lc(dev)->superBlock= sb;
++
++
++#ifndef CONFIG_YAFFS_DOES_ECC
++ param->use_nand_ecc = 1;
++#endif
++
++#ifdef CONFIG_YAFFS_DISABLE_WIDE_TNODES
++ param->wide_tnodes_disabled = 1;
++#endif
++
++ param->skip_checkpt_rd = options.skip_checkpoint_read;
++ param->skip_checkpt_wr = options.skip_checkpoint_write;
++
++ down(&yaffs_context_lock);
++ /* Get a mount id */
++ found = 0;
++ for(mount_id=0; ! found; mount_id++){
++ found = 1;
++ ylist_for_each(l,&yaffs_context_list){
++ context_iterator = ylist_entry(l,struct yaffs_LinuxContext,contextList);
++ if(context_iterator->mount_id == mount_id)
++ found = 0;
++ }
++ }
++ context->mount_id = mount_id;
++
++ ylist_add_tail(&(yaffs_dev_to_lc(dev)->contextList), &yaffs_context_list);
++ up(&yaffs_context_lock);
++
++ /* Directory search handling...*/
++ YINIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->searchContexts));
++ param->remove_obj_fn = yaffs_remove_obj_callback;
++
++ init_MUTEX(&(yaffs_dev_to_lc(dev)->grossLock));
++
++ yaffs_gross_lock(dev);
++
++ err = yaffs_guts_initialise(dev);
++
++ T(YAFFS_TRACE_OS,
++ (TSTR("yaffs_read_super: guts initialised %s\n"),
++ (err == YAFFS_OK) ? "OK" : "FAILED"));
++
++ if(err == YAFFS_OK)
++ yaffs_bg_start(dev);
++
++ if(!context->bgThread)
++ param->defered_dir_update = 0;
++
++
++ /* Release lock before yaffs_get_inode() */
++ yaffs_gross_unlock(dev);
++
++ /* Create root inode */
++ if (err == YAFFS_OK)
++ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0,
++ yaffs_root(dev));
++
++ if (!inode)
++ return NULL;
++
++ inode->i_op = &yaffs_dir_inode_operations;
++ inode->i_fop = &yaffs_dir_operations;
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: got root inode\n")));
++
++ root = d_alloc_root(inode);
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: d_alloc_root done\n")));
++
++ if (!root) {
++ iput(inode);
++ return NULL;
++ }
++ sb->s_root = root;
++ sb->s_dirt = !dev->is_checkpointed;
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs_read_super: is_checkpointed %d\n"),
++ dev->is_checkpointed));
++
++ T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: done\n")));
++ return sb;
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data, struct vfsmount *mnt)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "yaffs",
++ .get_sb = yaffs_read_super,
++ .kill_sb = kill_block_super,
++ .fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(1, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
++ FS_REQUIRES_DEV);
++#endif
++
++
++#ifdef CONFIG_YAFFS_YAFFS2
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name, void *data,
++ struct vfsmount *mnt)
++{
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs2_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs2_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs2_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "yaffs2",
++ .get_sb = yaffs2_read_super,
++ .kill_sb = kill_block_super,
++ .fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs2_read_super(struct super_block *sb,
++ void *data, int silent)
++{
++ return yaffs_internal_read_super(2, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
++ FS_REQUIRES_DEV);
++#endif
++
++#endif /* CONFIG_YAFFS_YAFFS2 */
++
++static struct proc_dir_entry *my_proc_entry;
++static struct proc_dir_entry *debug_proc_entry;
++
++static char *yaffs_dump_dev_part0(char *buf, yaffs_dev_t * dev)
++{
++ buf += sprintf(buf, "start_block.......... %d\n", dev->param.start_block);
++ buf += sprintf(buf, "end_block............ %d\n", dev->param.end_block);
++ buf += sprintf(buf, "total_bytes_per_chunk %d\n", dev->param.total_bytes_per_chunk);
++ buf += sprintf(buf, "use_nand_ecc......... %d\n", dev->param.use_nand_ecc);
++ buf += sprintf(buf, "no_tags_ecc.......... %d\n", dev->param.no_tags_ecc);
++ buf += sprintf(buf, "is_yaffs2............ %d\n", dev->param.is_yaffs2);
++ buf += sprintf(buf, "inband_tags.......... %d\n", dev->param.inband_tags);
++ buf += sprintf(buf, "empty_lost_n_found... %d\n", dev->param.empty_lost_n_found);
++ buf += sprintf(buf, "disable_lazy_load.... %d\n", dev->param.disable_lazy_load);
++ buf += sprintf(buf, "refresh_period....... %d\n", dev->param.refresh_period);
++ buf += sprintf(buf, "n_caches............. %d\n", dev->param.n_caches);
++ buf += sprintf(buf, "n_reserved_blocks.... %d\n", dev->param.n_reserved_blocks);
++ buf += sprintf(buf, "always_check_erased.. %d\n", dev->param.always_check_erased);
++
++ buf += sprintf(buf, "\n");
++
++ return buf;
++}
++
++
++static char *yaffs_dump_dev_part1(char *buf, yaffs_dev_t * dev)
++{
++ buf += sprintf(buf, "data_bytes_per_chunk. %d\n", dev->data_bytes_per_chunk);
++ buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
++ buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
++ buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
++ buf += sprintf(buf, "blocks_in_checkpt.... %d\n", dev->blocks_in_checkpt);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
++ buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
++ buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
++ buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
++ buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
++ buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
++ buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
++ buf += sprintf(buf, "passive_gc_count..... %u\n", dev->passive_gc_count);
++ buf += sprintf(buf, "oldest_dirty_gc_count %u\n", dev->oldest_dirty_gc_count);
++ buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
++ buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
++ buf += sprintf(buf, "n_retired_writes..... %u\n", dev->n_retired_writes);
++ buf += sprintf(buf, "nRetireBlocks........ %u\n", dev->n_retired_blocks);
++ buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
++ buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
++ buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n", dev->n_tags_ecc_fixed);
++ buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n", dev->n_tags_ecc_unfixed);
++ buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
++ buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
++ buf += sprintf(buf, "n_unlinked_files..... %u\n", dev->n_unlinked_files);
++ buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
++ buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
++
++ return buf;
++}
++
++static int yaffs_proc_read(char *page,
++ char **start,
++ off_t offset, int count, int *eof, void *data)
++{
++ struct ylist_head *item;
++ char *buf = page;
++ int step = offset;
++ int n = 0;
++
++ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
++ * We use 'offset' (*ppos) to indicate where we are in dev_list.
++ * This also assumes the user has posted a read buffer large
++ * enough to hold the complete output; but that's life in /proc.
++ */
++
++ *(int *)start = 1;
++
++ /* Print header first */
++ if (step == 0)
++ buf += sprintf(buf, "Multi-version YAFFS built:" __DATE__ " " __TIME__"\n");
++ else if (step == 1)
++ buf += sprintf(buf,"\n");
++ else {
++ step-=2;
++
++ down(&yaffs_context_lock);
++
++ /* Locate and print the Nth entry. Order N-squared but N is small. */
++ ylist_for_each(item, &yaffs_context_list) {
++ struct yaffs_LinuxContext *dc = ylist_entry(item, struct yaffs_LinuxContext, contextList);
++ yaffs_dev_t *dev = dc->dev;
++
++ if (n < (step & ~1)) {
++ n+=2;
++ continue;
++ }
++ if((step & 1)==0){
++ buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->param.name);
++ buf = yaffs_dump_dev_part0(buf, dev);
++ } else
++ buf = yaffs_dump_dev_part1(buf, dev);
++
++ break;
++ }
++ up(&yaffs_context_lock);
++ }
++
++ return buf - page < count ? buf - page : count;
++}
++
++static int yaffs_stats_proc_read(char *page,
++ char **start,
++ off_t offset, int count, int *eof, void *data)
++{
++ struct ylist_head *item;
++ char *buf = page;
++ int n = 0;
++
++ down(&yaffs_context_lock);
++
++ /* Locate and print the Nth entry. Order N-squared but N is small. */
++ ylist_for_each(item, &yaffs_context_list) {
++ struct yaffs_LinuxContext *dc = ylist_entry(item, struct yaffs_LinuxContext, contextList);
++ yaffs_dev_t *dev = dc->dev;
++
++ int erasedChunks;
++
++ erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
++
++ buf += sprintf(buf,"%d, %d, %d, %u, %u, %u, %u\n",
++ n, dev->n_free_chunks, erasedChunks,
++ dev->bg_gcs, dev->oldest_dirty_gc_count,
++ dev->n_obj, dev->n_tnodes);
++ }
++ up(&yaffs_context_lock);
++
++
++ return buf - page < count ? buf - page : count;
++}
++
++/**
++ * Set the verbosity of the warnings and error messages.
++ *
++ * Note that the names can only be a..z or _ with the current code.
++ */
++
++static struct {
++ char *mask_name;
++ unsigned mask_bitfield;
++} mask_flags[] = {
++ {"allocate", YAFFS_TRACE_ALLOCATE},
++ {"always", YAFFS_TRACE_ALWAYS},
++ {"background", YAFFS_TRACE_BACKGROUND},
++ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
++ {"buffers", YAFFS_TRACE_BUFFERS},
++ {"bug", YAFFS_TRACE_BUG},
++ {"checkpt", YAFFS_TRACE_CHECKPOINT},
++ {"deletion", YAFFS_TRACE_DELETION},
++ {"erase", YAFFS_TRACE_ERASE},
++ {"error", YAFFS_TRACE_ERROR},
++ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
++ {"gc", YAFFS_TRACE_GC},
++ {"lock", YAFFS_TRACE_LOCK},
++ {"mtd", YAFFS_TRACE_MTD},
++ {"nandaccess", YAFFS_TRACE_NANDACCESS},
++ {"os", YAFFS_TRACE_OS},
++ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
++ {"scan", YAFFS_TRACE_SCAN},
++ {"tracing", YAFFS_TRACE_TRACING},
++ {"sync", YAFFS_TRACE_SYNC},
++ {"write", YAFFS_TRACE_WRITE},
++
++ {"verify", YAFFS_TRACE_VERIFY},
++ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
++ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
++ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
++
++ {"all", 0xffffffff},
++ {"none", 0},
++ {NULL, 0},
++};
++
++#define MAX_MASK_NAME_LENGTH 40
++static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++ unsigned rg = 0, mask_bitfield;
++ char *end;
++ char *mask_name;
++ const char *x;
++ char substring[MAX_MASK_NAME_LENGTH + 1];
++ int i;
++ int done = 0;
++ int add, len = 0;
++ int pos = 0;
++
++ rg = yaffs_trace_mask;
++
++ while (!done && (pos < count)) {
++ done = 1;
++ while ((pos < count) && isspace(buf[pos]))
++ pos++;
++
++ switch (buf[pos]) {
++ case '+':
++ case '-':
++ case '=':
++ add = buf[pos];
++ pos++;
++ break;
++
++ default:
++ add = ' ';
++ break;
++ }
++ mask_name = NULL;
++
++ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++
++ if (end > buf + pos) {
++ mask_name = "numeral";
++ len = end - (buf + pos);
++ pos += len;
++ done = 0;
++ } else {
++ for (x = buf + pos, i = 0;
++ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
++ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++ substring[i] = *x;
++ substring[i] = '\0';
++
++ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++ if (strcmp(substring, mask_flags[i].mask_name) == 0) {
++ mask_name = mask_flags[i].mask_name;
++ mask_bitfield = mask_flags[i].mask_bitfield;
++ done = 0;
++ break;
++ }
++ }
++ }
++
++ if (mask_name != NULL) {
++ done = 0;
++ switch (add) {
++ case '-':
++ rg &= ~mask_bitfield;
++ break;
++ case '+':
++ rg |= mask_bitfield;
++ break;
++ case '=':
++ rg = mask_bitfield;
++ break;
++ default:
++ rg |= mask_bitfield;
++ break;
++ }
++ }
++ }
++
++ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
++
++ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
++
++ if (rg & YAFFS_TRACE_ALWAYS) {
++ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++ char flag;
++ flag = ((rg & mask_flags[i].mask_bitfield) ==
++ mask_flags[i].mask_bitfield) ? '+' : '-';
++ printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name);
++ }
++ }
++
++ return count;
++}
++
++
++static int yaffs_proc_write(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++ return yaffs_proc_write_trace_options(file, buf, count, data);
++}
++
++/* Stuff to handle installation of file systems */
++struct file_system_to_install {
++ struct file_system_type *fst;
++ int installed;
++};
++
++static struct file_system_to_install fs_to_install[] = {
++ {&yaffs_fs_type, 0},
++ {&yaffs2_fs_type, 0},
++ {NULL, 0}
++};
++
++static int __init init_yaffs_fs(void)
++{
++ int error = 0;
++ struct file_system_to_install *fsinst;
++
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs built " __DATE__ " " __TIME__ " Installing. \n")));
++
++#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR(" \n\n\n\nYAFFS-WARNING CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED selected.\n\n\n\n")));
++#endif
++
++
++
++
++ init_MUTEX(&yaffs_context_lock);
++
++ /* Install the proc_fs entries */
++ my_proc_entry = create_proc_entry("yaffs",
++ S_IRUGO | S_IFREG,
++ YPROC_ROOT);
++
++ if (my_proc_entry) {
++ my_proc_entry->write_proc = yaffs_proc_write;
++ my_proc_entry->read_proc = yaffs_proc_read;
++ my_proc_entry->data = NULL;
++ } else
++ return -ENOMEM;
++
++ debug_proc_entry = create_proc_entry("yaffs_stats",
++ S_IRUGO | S_IFREG,
++ YPROC_ROOT);
++
++ if (debug_proc_entry) {
++ debug_proc_entry->write_proc = NULL;
++ debug_proc_entry->read_proc = yaffs_stats_proc_read;
++ debug_proc_entry->data = NULL;
++ } else
++ return -ENOMEM;
++
++ /* Now add the file system entries */
++
++ fsinst = fs_to_install;
++
++ while (fsinst->fst && !error) {
++ error = register_filesystem(fsinst->fst);
++ if (!error)
++ fsinst->installed = 1;
++ fsinst++;
++ }
++
++ /* Any errors? uninstall */
++ if (error) {
++ fsinst = fs_to_install;
++
++ while (fsinst->fst) {
++ if (fsinst->installed) {
++ unregister_filesystem(fsinst->fst);
++ fsinst->installed = 0;
++ }
++ fsinst++;
++ }
++ }
++
++ return error;
++}
++
++static void __exit exit_yaffs_fs(void)
++{
++
++ struct file_system_to_install *fsinst;
++
++ T(YAFFS_TRACE_ALWAYS,
++ (TSTR("yaffs built " __DATE__ " " __TIME__ " removing. \n")));
++
++ remove_proc_entry("yaffs", YPROC_ROOT);
++ remove_proc_entry("yaffs_stats", YPROC_ROOT);
++
++ fsinst = fs_to_install;
++
++ while (fsinst->fst) {
++ if (fsinst->installed) {
++ unregister_filesystem(fsinst->fst);
++ fsinst->installed = 0;
++ }
++ fsinst++;
++ }
++}
++
++module_init(init_yaffs_fs)
++module_exit(exit_yaffs_fs)
++
++MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
++MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2010");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs1.c
+@@ -0,0 +1,465 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include "yaffs_yaffs1.h"
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++
++
++int yaffs1_scan(yaffs_dev_t *dev)
++{
++ yaffs_ext_tags tags;
++ int blk;
++ int blockIterator;
++ int startIterator;
++ int endIterator;
++ int result;
++
++ int chunk;
++ int c;
++ int deleted;
++ yaffs_block_state_t state;
++ yaffs_obj_t *hard_list = NULL;
++ yaffs_block_info_t *bi;
++ __u32 seq_number;
++ yaffs_obj_header *oh;
++ yaffs_obj_t *in;
++ yaffs_obj_t *parent;
++
++ int alloc_failed = 0;
++
++ struct yaffs_shadow_fixer_s *shadowFixerList = NULL;
++
++
++ __u8 *chunkData;
++
++
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("yaffs1_scan starts intstartblk %d intendblk %d..." TENDSTR),
++ dev->internal_start_block, dev->internal_end_block));
++
++ chunkData = yaffs_get_temp_buffer(dev, __LINE__);
++
++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++ /* Scan all the blocks to determine their state */
++ bi = dev->block_info;
++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block; blk++) {
++ yaffs_clear_chunk_bits(dev, blk);
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++
++ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++ bi->block_state = state;
++ bi->seq_number = seq_number;
++
++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
++
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
++ state, seq_number));
++
++ if (state == YAFFS_BLOCK_STATE_DEAD) {
++ T(YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("block %d is bad" TENDSTR), blk));
++ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("Block empty " TENDSTR)));
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
++ }
++ bi++;
++ }
++
++ startIterator = dev->internal_start_block;
++ endIterator = dev->internal_end_block;
++
++ /* For each block.... */
++ for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
++ blockIterator++) {
++
++ YYIELD();
++
++ YYIELD();
++
++ blk = blockIterator;
++
++ bi = yaffs_get_block_info(dev, blk);
++ state = bi->block_state;
++
++ deleted = 0;
++
++ /* For each chunk in each block that needs scanning....*/
++ for (c = 0; !alloc_failed && c < dev->param.chunks_per_block &&
++ state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
++ /* Read the tags and decide what to do */
++ chunk = blk * dev->param.chunks_per_block + c;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
++ &tags);
++
++ /* Let's have a good look at this chunk... */
++
++ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED || tags.is_deleted) {
++ /* YAFFS1 only...
++ * A deleted chunk
++ */
++ deleted++;
++ dev->n_free_chunks++;
++ /*T((" %d %d deleted\n",blk,c)); */
++ } else if (!tags.chunk_used) {
++ /* An unassigned chunk in the block
++ * This means that either the block is empty or
++ * this is the one being allocated from
++ */
++
++ if (c == 0) {
++ /* We're looking at the first chunk in the block so the block is unused */
++ state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ } else {
++ /* this is the block being allocated from */
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ (" Allocating from %d %d" TENDSTR),
++ blk, c));
++ state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->alloc_block = blk;
++ dev->alloc_page = c;
++ dev->alloc_block_finder = blk;
++ /* Set block finder here to encourage the allocator to go forth from here. */
++
++ }
++
++ dev->n_free_chunks += (dev->param.chunks_per_block - c);
++ } else if (tags.chunk_id > 0) {
++ /* chunk_id > 0 so it is a data chunk... */
++ unsigned int endpos;
++
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.
++ obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ /* PutChunkIntoFile checks for a clash (two data chunks with
++ * the same chunk_id).
++ */
++
++ if (!in)
++ alloc_failed = 1;
++
++ if (in) {
++ if (!yaffs_put_chunk_in_file(in, tags.chunk_id, chunk, 1))
++ alloc_failed = 1;
++ }
++
++ endpos =
++ (tags.chunk_id - 1) * dev->data_bytes_per_chunk +
++ tags.n_bytes;
++ if (in &&
++ in->variant_type == YAFFS_OBJECT_TYPE_FILE
++ && in->variant.file_variant.scanned_size <
++ endpos) {
++ in->variant.file_variant.
++ scanned_size = endpos;
++ if (!dev->param.use_header_file_size) {
++ in->variant.file_variant.
++ file_size =
++ in->variant.file_variant.
++ scanned_size;
++ }
++
++ }
++ /* T((" %d %d data %d %d\n",blk,c,tags.obj_id,tags.chunk_id)); */
++ } else {
++ /* chunk_id == 0, so it is an ObjectHeader.
++ * Thus, we read in the object header and make the object
++ */
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk,
++ chunkData,
++ NULL);
++
++ oh = (yaffs_obj_header *) chunkData;
++
++ in = yaffs_find_by_number(dev,
++ tags.obj_id);
++ if (in && in->variant_type != oh->type) {
++ /* This should not happen, but somehow
++ * Wev'e ended up with an obj_id that has been reused but not yet
++ * deleted, and worse still it has changed type. Delete the old object.
++ */
++
++ yaffs_del_obj(in);
++
++ in = 0;
++ }
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.
++ obj_id,
++ oh->type);
++
++ if (!in)
++ alloc_failed = 1;
++
++ if (in && oh->shadows_obj > 0) {
++
++ struct yaffs_shadow_fixer_s *fixer;
++ fixer = YMALLOC(sizeof(struct yaffs_shadow_fixer_s));
++ if (fixer) {
++ fixer->next = shadowFixerList;
++ shadowFixerList = fixer;
++ fixer->obj_id = tags.obj_id;
++ fixer->shadowed_id = oh->shadows_obj;
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ (" Shadow fixer: %d shadows %d" TENDSTR),
++ fixer->obj_id, fixer->shadowed_id));
++
++ }
++
++ }
++
++ if (in && in->valid) {
++ /* We have already filled this one. We have a duplicate and need to resolve it. */
++
++ unsigned existingSerial = in->serial;
++ unsigned newSerial = tags.serial_number;
++
++ if (((existingSerial + 1) & 3) == newSerial) {
++ /* Use new one - destroy the exisiting one */
++ yaffs_chunk_del(dev,
++ in->hdr_chunk,
++ 1, __LINE__);
++ in->valid = 0;
++ } else {
++ /* Use existing - destroy this one. */
++ yaffs_chunk_del(dev, chunk, 1,
++ __LINE__);
++ }
++ }
++
++ if (in && !in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
++ /* We only load some info, don't fiddle with directory structure */
++ in->valid = 1;
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
++#endif
++ in->hdr_chunk = chunk;
++ in->serial = tags.serial_number;
++
++ } else if (in && !in->valid) {
++ /* we need to load this info */
++
++ in->valid = 1;
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
++#endif
++ in->hdr_chunk = chunk;
++ in->serial = tags.serial_number;
++
++ yaffs_set_obj_name_from_oh(in, oh);
++ in->dirty = 0;
++
++ /* directory stuff...
++ * hook up to parent
++ */
++
++ parent =
++ yaffs_find_or_create_by_number
++ (dev, oh->parent_obj_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ if (!parent)
++ alloc_failed = 1;
++ if (parent && parent->variant_type ==
++ YAFFS_OBJECT_TYPE_UNKNOWN) {
++ /* Set up as a directory */
++ parent->variant_type =
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ YINIT_LIST_HEAD(&parent->variant.
++ dir_variant.
++ children);
++ } else if (!parent || parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ /* Hoosterman, another problem....
++ * We're trying to use a non-directory as a directory
++ */
++
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++ TENDSTR)));
++ parent = dev->lost_n_found;
++ }
++
++ yaffs_add_obj_to_dir(parent, in);
++
++ if (0 && (parent == dev->del_dir ||
++ parent == dev->unlinked_dir)) {
++ in->deleted = 1; /* If it is unlinked at start up then it wants deleting */
++ dev->n_deleted_files++;
++ }
++ /* Note re hardlinks.
++ * Since we might scan a hardlink before its equivalent object is scanned
++ * we put them all in a list.
++ * After scanning is complete, we should have all the objects, so we run through this
++ * list and fix up all the chains.
++ */
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Todo got a problem */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (dev->param.use_header_file_size)
++
++ in->variant.file_variant.
++ file_size =
++ oh->file_size;
++
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ in->variant.hardlink_variant.
++ equiv_id =
++ oh->equiv_id;
++ in->hard_links.next =
++ (struct ylist_head *)
++ hard_list;
++ hard_list = in;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ in->variant.symlink_variant.alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.symlink_variant.alias)
++ alloc_failed = 1;
++ break;
++ }
++
++ }
++ }
++ }
++
++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++ /* If we got this far while scanning, then the block is fully allocated.*/
++ state = YAFFS_BLOCK_STATE_FULL;
++ }
++
++ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ /* If the block was partially allocated then treat it as fully allocated.*/
++ state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
++
++ bi->block_state = state;
++
++ /* Now let's see if it was dirty */
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++ yaffs_block_became_dirty(dev, blk);
++ }
++
++ }
++
++
++ /* Ok, we've done all the scanning.
++ * Fix up the hard link chains.
++ * We should now have scanned all the objects, now it's time to add these
++ * hardlinks.
++ */
++
++ yaffs_link_fixup(dev, hard_list);
++
++ /* Fix up any shadowed objects */
++ {
++ struct yaffs_shadow_fixer_s *fixer;
++ yaffs_obj_t *obj;
++
++ while (shadowFixerList) {
++ fixer = shadowFixerList;
++ shadowFixerList = fixer->next;
++ /* Complete the rename transaction by deleting the shadowed object
++ * then setting the object header to unshadowed.
++ */
++ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
++ if (obj)
++ yaffs_del_obj(obj);
++
++ obj = yaffs_find_by_number(dev, fixer->obj_id);
++
++ if (obj)
++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
++
++ YFREE(fixer);
++ }
++ }
++
++ yaffs_release_temp_buffer(dev, chunkData, __LINE__);
++
++ if (alloc_failed)
++ return YAFFS_FAIL;
++
++ T(YAFFS_TRACE_SCAN, (TSTR("yaffs1_scan ends" TENDSTR)));
++
++
++ return YAFFS_OK;
++}
++
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs1.h
+@@ -0,0 +1,22 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_YAFFS1_H__
++#define __YAFFS_YAFFS1_H__
++
++#include "yaffs_guts.h"
++int yaffs1_scan(yaffs_dev_t *dev);
++
++#endif
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs2.c
+@@ -0,0 +1,1540 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_checkptrw.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_qsort.h"
++#include "yaffs_nand.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_verify.h"
++
++/*
++ * Checkpoints are really no benefit on very small partitions.
++ *
++ * To save space on small partitions don't bother with checkpoints unless
++ * the partition is at least this big.
++ */
++#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
++
++#define YAFFS_SMALL_HOLE_THRESHOLD 4
++
++
++/*
++ * Oldest Dirty Sequence Number handling.
++ */
++
++/* yaffs_calc_oldest_dirty_seq()
++ * yaffs2_find_oldest_dirty_seq()
++ * Calculate the oldest dirty sequence number if we don't know it.
++ */
++void yaffs_calc_oldest_dirty_seq(yaffs_dev_t *dev)
++{
++ int i;
++ unsigned seq;
++ unsigned block_no = 0;
++ yaffs_block_info_t *b;
++
++ if(!dev->param.is_yaffs2)
++ return;
++
++ /* Find the oldest dirty sequence number. */
++ seq = dev->seq_number + 1;
++ b = dev->block_info;
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
++ (b->pages_in_use - b->soft_del_pages) < dev->param.chunks_per_block &&
++ b->seq_number < seq) {
++ seq = b->seq_number;
++ block_no = i;
++ }
++ b++;
++ }
++
++ if(block_no){
++ dev->oldest_dirty_seq = seq;
++ dev->oldest_dirty_block = block_no;
++ }
++
++}
++
++
++void yaffs2_find_oldest_dirty_seq(yaffs_dev_t *dev)
++{
++ if(!dev->param.is_yaffs2)
++ return;
++
++ if(!dev->oldest_dirty_seq)
++ yaffs_calc_oldest_dirty_seq(dev);
++}
++
++/*
++ * yaffs_clear_oldest_dirty_seq()
++ * Called when a block is erased or marked bad. (ie. when its seq_number
++ * becomes invalid). If the value matches the oldest then we clear
++ * dev->oldest_dirty_seq to force its recomputation.
++ */
++void yaffs2_clear_oldest_dirty_seq(yaffs_dev_t *dev, yaffs_block_info_t *bi)
++{
++
++ if(!dev->param.is_yaffs2)
++ return;
++
++ if(!bi || bi->seq_number == dev->oldest_dirty_seq){
++ dev->oldest_dirty_seq = 0;
++ dev->oldest_dirty_block = 0;
++ }
++}
++
++/*
++ * yaffs2_update_oldest_dirty_seq()
++ * Update the oldest dirty sequence number whenever we dirty a block.
++ * Only do this if the oldest_dirty_seq is actually being tracked.
++ */
++void yaffs2_update_oldest_dirty_seq(yaffs_dev_t *dev, unsigned block_no, yaffs_block_info_t *bi)
++{
++ if(!dev->param.is_yaffs2)
++ return;
++
++ if(dev->oldest_dirty_seq){
++ if(dev->oldest_dirty_seq > bi->seq_number){
++ dev->oldest_dirty_seq = bi->seq_number;
++ dev->oldest_dirty_block = block_no;
++ }
++ }
++}
++
++int yaffs_block_ok_for_gc(yaffs_dev_t *dev,
++ yaffs_block_info_t *bi)
++{
++
++ if (!dev->param.is_yaffs2)
++ return 1; /* disqualification only applies to yaffs2. */
++
++ if (!bi->has_shrink_hdr)
++ return 1; /* can gc */
++
++ yaffs2_find_oldest_dirty_seq(dev);
++
++ /* Can't do gc of this block if there are any blocks older than this one that have
++ * discarded pages.
++ */
++ return (bi->seq_number <= dev->oldest_dirty_seq);
++}
++
++/*
++ * yaffs2_find_refresh_block()
++ * periodically finds the oldest full block by sequence number for refreshing.
++ * Only for yaffs2.
++ */
++__u32 yaffs2_find_refresh_block(yaffs_dev_t *dev)
++{
++ __u32 b ;
++
++ __u32 oldest = 0;
++ __u32 oldestSequence = 0;
++
++ yaffs_block_info_t *bi;
++
++ if(!dev->param.is_yaffs2)
++ return oldest;
++
++ /*
++ * If refresh period < 10 then refreshing is disabled.
++ */
++ if(dev->param.refresh_period < 10)
++ return oldest;
++
++ /*
++ * Fix broken values.
++ */
++ if(dev->refresh_skip > dev->param.refresh_period)
++ dev->refresh_skip = dev->param.refresh_period;
++
++ if(dev->refresh_skip > 0)
++ return oldest;
++
++ /*
++ * Refresh skip is now zero.
++ * We'll do a refresh this time around....
++ * Update the refresh skip and find the oldest block.
++ */
++ dev->refresh_skip = dev->param.refresh_period;
++ dev->refresh_count++;
++ bi = dev->block_info;
++ for (b = dev->internal_start_block; b <=dev->internal_end_block; b++){
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL){
++
++ if(oldest < 1 ||
++ bi->seq_number < oldestSequence){
++ oldest = b;
++ oldestSequence = bi->seq_number;
++ }
++ }
++ bi++;
++ }
++
++ if (oldest > 0) {
++ T(YAFFS_TRACE_GC,
++ (TSTR("GC refresh count %d selected block %d with seq_number %d" TENDSTR),
++ dev->refresh_count, oldest, oldestSequence));
++ }
++
++ return oldest;
++}
++
++int yaffs2_checkpt_required(yaffs_dev_t *dev)
++{
++ int nblocks;
++
++ if(!dev->param.is_yaffs2)
++ return 0;
++
++ nblocks = dev->internal_end_block - dev->internal_start_block + 1 ;
++
++ return !dev->param.skip_checkpt_wr &&
++ !dev->read_only &&
++ (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
++}
++
++int yaffs_calc_checkpt_blocks_required(yaffs_dev_t *dev)
++{
++ int retval;
++
++ if(!dev->param.is_yaffs2)
++ return 0;
++
++ if (!dev->checkpoint_blocks_required &&
++ yaffs2_checkpt_required(dev)){
++ /* Not a valid value so recalculate */
++ int n_bytes = 0;
++ int nBlocks;
++ int devBlocks = (dev->param.end_block - dev->param.start_block + 1);
++
++ n_bytes += sizeof(yaffs_checkpt_validty_t);
++ n_bytes += sizeof(yaffs_checkpt_dev_t);
++ n_bytes += devBlocks * sizeof(yaffs_block_info_t);
++ n_bytes += devBlocks * dev->chunk_bit_stride;
++ n_bytes += (sizeof(yaffs_checkpt_obj_t) + sizeof(__u32)) * (dev->n_obj);
++ n_bytes += (dev->tnode_size + sizeof(__u32)) * (dev->n_tnodes);
++ n_bytes += sizeof(yaffs_checkpt_validty_t);
++ n_bytes += sizeof(__u32); /* checksum*/
++
++ /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
++
++ nBlocks = (n_bytes/(dev->data_bytes_per_chunk * dev->param.chunks_per_block)) + 3;
++
++ dev->checkpoint_blocks_required = nBlocks;
++ }
++
++ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
++ if(retval < 0)
++ retval = 0;
++ return retval;
++}
++
++/*--------------------- Checkpointing --------------------*/
++
++
++static int yaffs2_wr_checkpt_validity_marker(yaffs_dev_t *dev, int head)
++{
++ yaffs_checkpt_validty_t cp;
++
++ memset(&cp, 0, sizeof(cp));
++
++ cp.struct_type = sizeof(cp);
++ cp.magic = YAFFS_MAGIC;
++ cp.version = YAFFS_CHECKPOINT_VERSION;
++ cp.head = (head) ? 1 : 0;
++
++ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ?
++ 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_validty_marker(yaffs_dev_t *dev, int head)
++{
++ yaffs_checkpt_validty_t cp;
++ int ok;
++
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ if (ok)
++ ok = (cp.struct_type == sizeof(cp)) &&
++ (cp.magic == YAFFS_MAGIC) &&
++ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
++ (cp.head == ((head) ? 1 : 0));
++ return ok ? 1 : 0;
++}
++
++static void yaffs2_dev_to_checkpt_dev(yaffs_checkpt_dev_t *cp,
++ yaffs_dev_t *dev)
++{
++ cp->n_erased_blocks = dev->n_erased_blocks;
++ cp->alloc_block = dev->alloc_block;
++ cp->alloc_page = dev->alloc_page;
++ cp->n_free_chunks = dev->n_free_chunks;
++
++ cp->n_deleted_files = dev->n_deleted_files;
++ cp->n_unlinked_files = dev->n_unlinked_files;
++ cp->n_bg_deletions = dev->n_bg_deletions;
++ cp->seq_number = dev->seq_number;
++
++}
++
++static void yaffs_checkpt_dev_to_dev(yaffs_dev_t *dev,
++ yaffs_checkpt_dev_t *cp)
++{
++ dev->n_erased_blocks = cp->n_erased_blocks;
++ dev->alloc_block = cp->alloc_block;
++ dev->alloc_page = cp->alloc_page;
++ dev->n_free_chunks = cp->n_free_chunks;
++
++ dev->n_deleted_files = cp->n_deleted_files;
++ dev->n_unlinked_files = cp->n_unlinked_files;
++ dev->n_bg_deletions = cp->n_bg_deletions;
++ dev->seq_number = cp->seq_number;
++}
++
++
++static int yaffs2_wr_checkpt_dev(yaffs_dev_t *dev)
++{
++ yaffs_checkpt_dev_t cp;
++ __u32 n_bytes;
++ __u32 nBlocks = (dev->internal_end_block - dev->internal_start_block + 1);
++
++ int ok;
++
++ /* Write device runtime values*/
++ yaffs2_dev_to_checkpt_dev(&cp, dev);
++ cp.struct_type = sizeof(cp);
++
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ /* Write block info */
++ if (ok) {
++ n_bytes = nBlocks * sizeof(yaffs_block_info_t);
++ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
++ }
++
++ /* Write chunk bits */
++ if (ok) {
++ n_bytes = nBlocks * dev->chunk_bit_stride;
++ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
++ }
++ return ok ? 1 : 0;
++
++}
++
++static int yaffs2_rd_checkpt_dev(yaffs_dev_t *dev)
++{
++ yaffs_checkpt_dev_t cp;
++ __u32 n_bytes;
++ __u32 nBlocks = (dev->internal_end_block - dev->internal_start_block + 1);
++
++ int ok;
++
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (!ok)
++ return 0;
++
++ if (cp.struct_type != sizeof(cp))
++ return 0;
++
++
++ yaffs_checkpt_dev_to_dev(dev, &cp);
++
++ n_bytes = nBlocks * sizeof(yaffs_block_info_t);
++
++ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
++
++ if (!ok)
++ return 0;
++ n_bytes = nBlocks * dev->chunk_bit_stride;
++
++ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
++
++ return ok ? 1 : 0;
++}
++
++static void yaffs2_obj_checkpt_obj(yaffs_checkpt_obj_t *cp,
++ yaffs_obj_t *obj)
++{
++
++ cp->obj_id = obj->obj_id;
++ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
++ cp->hdr_chunk = obj->hdr_chunk;
++ cp->variant_type = obj->variant_type;
++ cp->deleted = obj->deleted;
++ cp->soft_del = obj->soft_del;
++ cp->unlinked = obj->unlinked;
++ cp->fake = obj->fake;
++ cp->rename_allowed = obj->rename_allowed;
++ cp->unlink_allowed = obj->unlink_allowed;
++ cp->serial = obj->serial;
++ cp->n_data_chunks = obj->n_data_chunks;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
++}
++
++static int taffs2_checkpt_obj_to_obj(yaffs_obj_t *obj, yaffs_checkpt_obj_t *cp)
++{
++
++ yaffs_obj_t *parent;
++
++ if (obj->variant_type != cp->variant_type) {
++ T(YAFFS_TRACE_ERROR, (TSTR("Checkpoint read object %d type %d "
++ TCONT("chunk %d does not match existing object type %d")
++ TENDSTR), cp->obj_id, cp->variant_type, cp->hdr_chunk,
++ obj->variant_type));
++ return 0;
++ }
++
++ obj->obj_id = cp->obj_id;
++
++ if (cp->parent_id)
++ parent = yaffs_find_or_create_by_number(
++ obj->my_dev,
++ cp->parent_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ else
++ parent = NULL;
++
++ if (parent) {
++ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ T(YAFFS_TRACE_ALWAYS, (TSTR("Checkpoint read object %d parent %d type %d"
++ TCONT(" chunk %d Parent type, %d, not directory")
++ TENDSTR),
++ cp->obj_id, cp->parent_id, cp->variant_type,
++ cp->hdr_chunk, parent->variant_type));
++ return 0;
++ }
++ yaffs_add_obj_to_dir(parent, obj);
++ }
++
++ obj->hdr_chunk = cp->hdr_chunk;
++ obj->variant_type = cp->variant_type;
++ obj->deleted = cp->deleted;
++ obj->soft_del = cp->soft_del;
++ obj->unlinked = cp->unlinked;
++ obj->fake = cp->fake;
++ obj->rename_allowed = cp->rename_allowed;
++ obj->unlink_allowed = cp->unlink_allowed;
++ obj->serial = cp->serial;
++ obj->n_data_chunks = cp->n_data_chunks;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
++
++ if (obj->hdr_chunk > 0)
++ obj->lazy_loaded = 1;
++ return 1;
++}
++
++
++
++static int yaffs2_checkpt_tnode_worker(yaffs_obj_t *in, yaffs_tnode_t *tn,
++ __u32 level, int chunk_offset)
++{
++ int i;
++ yaffs_dev_t *dev = in->my_dev;
++ int ok = 1;
++
++ if (tn) {
++ if (level > 0) {
++
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++ if (tn->internal[i]) {
++ ok = yaffs2_checkpt_tnode_worker(in,
++ tn->internal[i],
++ level - 1,
++ (chunk_offset<<YAFFS_TNODES_INTERNAL_BITS) + i);
++ }
++ }
++ } else if (level == 0) {
++ __u32 baseOffset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
++ ok = (yaffs2_checkpt_wr(dev, &baseOffset, sizeof(baseOffset)) == sizeof(baseOffset));
++ if (ok)
++ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) == dev->tnode_size);
++ }
++ }
++
++ return ok;
++
++}
++
++static int yaffs2_wr_checkpt_tnodes(yaffs_obj_t *obj)
++{
++ __u32 endMarker = ~0;
++ int ok = 1;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
++ ok = yaffs2_checkpt_tnode_worker(obj,
++ obj->variant.file_variant.top,
++ obj->variant.file_variant.top_level,
++ 0);
++ if (ok)
++ ok = (yaffs2_checkpt_wr(obj->my_dev, &endMarker, sizeof(endMarker)) ==
++ sizeof(endMarker));
++ }
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_tnodes(yaffs_obj_t *obj)
++{
++ __u32 baseChunk;
++ int ok = 1;
++ yaffs_dev_t *dev = obj->my_dev;
++ yaffs_file_s *fileStructPtr = &obj->variant.file_variant;
++ yaffs_tnode_t *tn;
++ int nread = 0;
++
++ ok = (yaffs2_checkpt_rd(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++
++ while (ok && (~baseChunk)) {
++ nread++;
++ /* Read level 0 tnode */
++
++
++ tn = yaffs_get_tnode(dev);
++ if (tn){
++ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) == dev->tnode_size);
++ } else
++ ok = 0;
++
++ if (tn && ok)
++ ok = yaffs_add_find_tnode_0(dev,
++ fileStructPtr,
++ baseChunk,
++ tn) ? 1 : 0;
++
++ if (ok)
++ ok = (yaffs2_checkpt_rd(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++
++ }
++
++ T(YAFFS_TRACE_CHECKPOINT, (
++ TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
++ nread, baseChunk, ok));
++
++ return ok ? 1 : 0;
++}
++
++
++static int yaffs2_wr_checkpt_objs(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ yaffs_checkpt_obj_t cp;
++ int i;
++ int ok = 1;
++ struct ylist_head *lh;
++
++
++ /* Iterate through the objects in each hash entry,
++ * dumping them to the checkpointing stream.
++ */
++
++ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
++ ylist_for_each(lh, &dev->obj_bucket[i].list) {
++ if (lh) {
++ obj = ylist_entry(lh, yaffs_obj_t, hash_link);
++ if (!obj->defered_free) {
++ yaffs2_obj_checkpt_obj(&cp, obj);
++ cp.struct_type = sizeof(cp);
++
++ T(YAFFS_TRACE_CHECKPOINT, (
++ TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %p" TENDSTR),
++ cp.obj_id, cp.parent_id, cp.variant_type, cp.hdr_chunk, obj));
++
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ if (ok && obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ ok = yaffs2_wr_checkpt_tnodes(obj);
++ }
++ }
++ }
++ }
++
++ /* Dump end of list */
++ memset(&cp, 0xFF, sizeof(yaffs_checkpt_obj_t));
++ cp.struct_type = sizeof(cp);
++
++ if (ok)
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_objs(yaffs_dev_t *dev)
++{
++ yaffs_obj_t *obj;
++ yaffs_checkpt_obj_t cp;
++ int ok = 1;
++ int done = 0;
++ yaffs_obj_t *hard_list = NULL;
++
++ while (ok && !done) {
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (cp.struct_type != sizeof(cp)) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR),
++ cp.struct_type, (int)sizeof(cp), ok));
++ ok = 0;
++ }
++
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
++ cp.obj_id, cp.parent_id, cp.variant_type, cp.hdr_chunk));
++
++ if (ok && cp.obj_id == ~0)
++ done = 1;
++ else if (ok) {
++ obj = yaffs_find_or_create_by_number(dev, cp.obj_id, cp.variant_type);
++ if (obj) {
++ ok = taffs2_checkpt_obj_to_obj(obj, &cp);
++ if (!ok)
++ break;
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
++ ok = yaffs2_rd_checkpt_tnodes(obj);
++ } else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++ obj->hard_links.next =
++ (struct ylist_head *) hard_list;
++ hard_list = obj;
++ }
++ } else
++ ok = 0;
++ }
++ }
++
++ if (ok)
++ yaffs_link_fixup(dev, hard_list);
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_wr_checkpt_sum(yaffs_dev_t *dev)
++{
++ __u32 checkpt_sum;
++ int ok;
++
++ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
++
++ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) == sizeof(checkpt_sum));
++
++ if (!ok)
++ return 0;
++
++ return 1;
++}
++
++static int yaffs2_rd_checkpt_sum(yaffs_dev_t *dev)
++{
++ __u32 checkpt_sum0;
++ __u32 checkpt_sum1;
++ int ok;
++
++ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
++
++ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) == sizeof(checkpt_sum1));
++
++ if (!ok)
++ return 0;
++
++ if (checkpt_sum0 != checkpt_sum1)
++ return 0;
++
++ return 1;
++}
++
++
++static int yaffs2_wr_checkpt_data(yaffs_dev_t *dev)
++{
++ int ok = 1;
++
++ if (!yaffs2_checkpt_required(dev)) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR)));
++ ok = 0;
++ }
++
++ if (ok)
++ ok = yaffs2_checkpt_open(dev, 1);
++
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint device" TENDSTR)));
++ ok = yaffs2_wr_checkpt_dev(dev);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint objects" TENDSTR)));
++ ok = yaffs2_wr_checkpt_objs(dev);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
++ }
++
++ if (ok)
++ ok = yaffs2_wr_checkpt_sum(dev);
++
++ if (!yaffs_checkpt_close(dev))
++ ok = 0;
++
++ if (ok)
++ dev->is_checkpointed = 1;
++ else
++ dev->is_checkpointed = 0;
++
++ return dev->is_checkpointed;
++}
++
++static int yaffs2_rd_checkpt_data(yaffs_dev_t *dev)
++{
++ int ok = 1;
++
++ if(!dev->param.is_yaffs2)
++ ok = 0;
++
++ if (ok && dev->param.skip_checkpt_rd) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR)));
++ ok = 0;
++ }
++
++ if (ok)
++ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
++
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++ ok = yaffs2_rd_checkpt_validty_marker(dev, 1);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint device" TENDSTR)));
++ ok = yaffs2_rd_checkpt_dev(dev);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint objects" TENDSTR)));
++ ok = yaffs2_rd_checkpt_objs(dev);
++ }
++ if (ok) {
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++ ok = yaffs2_rd_checkpt_validty_marker(dev, 0);
++ }
++
++ if (ok) {
++ ok = yaffs2_rd_checkpt_sum(dev);
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint checksum %d" TENDSTR), ok));
++ }
++
++ if (!yaffs_checkpt_close(dev))
++ ok = 0;
++
++ if (ok)
++ dev->is_checkpointed = 1;
++ else
++ dev->is_checkpointed = 0;
++
++ return ok ? 1 : 0;
++
++}
++
++void yaffs2_checkpt_invalidate(yaffs_dev_t *dev)
++{
++ if (dev->is_checkpointed ||
++ dev->blocks_in_checkpt > 0) {
++ dev->is_checkpointed = 0;
++ yaffs2_checkpt_invalidate_stream(dev);
++ }
++ if (dev->param.sb_dirty_fn)
++ dev->param.sb_dirty_fn(dev);
++}
++
++
++int yaffs_checkpoint_save(yaffs_dev_t *dev)
++{
++
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("save entry: is_checkpointed %d"TENDSTR), dev->is_checkpointed));
++
++ yaffs_verify_objects(dev);
++ yaffs_verify_blocks(dev);
++ yaffs_verify_free_chunks(dev);
++
++ if (!dev->is_checkpointed) {
++ yaffs2_checkpt_invalidate(dev);
++ yaffs2_wr_checkpt_data(dev);
++ }
++
++ T(YAFFS_TRACE_ALWAYS, (TSTR("save exit: is_checkpointed %d"TENDSTR), dev->is_checkpointed));
++
++ return dev->is_checkpointed;
++}
++
++int yaffs2_checkpt_restore(yaffs_dev_t *dev)
++{
++ int retval;
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore entry: is_checkpointed %d"TENDSTR), dev->is_checkpointed));
++
++ retval = yaffs2_rd_checkpt_data(dev);
++
++ if (dev->is_checkpointed) {
++ yaffs_verify_objects(dev);
++ yaffs_verify_blocks(dev);
++ yaffs_verify_free_chunks(dev);
++ }
++
++ T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore exit: is_checkpointed %d"TENDSTR), dev->is_checkpointed));
++
++ return retval;
++}
++
++int yaffs2_handle_hole(yaffs_obj_t *obj, loff_t new_size)
++{
++ /* if newsSize > oldFileSize.
++ * We're going to be writing a hole.
++ * If the hole is small then write zeros otherwise write a start of hole marker.
++ */
++
++
++ loff_t oldFileSize;
++ int increase;
++ int smallHole ;
++ int result = YAFFS_OK;
++ yaffs_dev_t *dev = NULL;
++
++ __u8 *localBuffer = NULL;
++
++ int smallIncreaseOk = 0;
++
++ if(!obj)
++ return YAFFS_FAIL;
++
++ if(obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
++
++ dev = obj->my_dev;
++
++ /* Bail out if not yaffs2 mode */
++ if(!dev->param.is_yaffs2)
++ return YAFFS_OK;
++
++ oldFileSize = obj->variant.file_variant.file_size;
++
++ if (new_size <= oldFileSize)
++ return YAFFS_OK;
++
++ increase = new_size - oldFileSize;
++
++ if(increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
++ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
++ smallHole = 1;
++ else
++ smallHole = 0;
++
++ if(smallHole)
++ localBuffer= yaffs_get_temp_buffer(dev, __LINE__);
++
++ if(localBuffer){
++ /* fill hole with zero bytes */
++ int pos = oldFileSize;
++ int thisWrite;
++ int written;
++ memset(localBuffer,0,dev->data_bytes_per_chunk);
++ smallIncreaseOk = 1;
++
++ while(increase > 0 && smallIncreaseOk){
++ thisWrite = increase;
++ if(thisWrite > dev->data_bytes_per_chunk)
++ thisWrite = dev->data_bytes_per_chunk;
++ written = yaffs_do_file_wr(obj,localBuffer,pos,thisWrite,0);
++ if(written == thisWrite){
++ pos += thisWrite;
++ increase -= thisWrite;
++ } else
++ smallIncreaseOk = 0;
++ }
++
++ yaffs_release_temp_buffer(dev,localBuffer,__LINE__);
++
++ /* If we were out of space then reverse any chunks we've added */
++ if(!smallIncreaseOk)
++ yaffs_resize_file_down(obj, oldFileSize);
++ }
++
++ if (!smallIncreaseOk &&
++ obj->parent &&
++ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED){
++ /* Write a hole start header with the old file size */
++ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
++ }
++
++ return result;
++
++}
++
++
++typedef struct {
++ int seq;
++ int block;
++} yaffs_BlockIndex;
++
++
++static int yaffs2_ybicmp(const void *a, const void *b)
++{
++ register int aseq = ((yaffs_BlockIndex *)a)->seq;
++ register int bseq = ((yaffs_BlockIndex *)b)->seq;
++ register int ablock = ((yaffs_BlockIndex *)a)->block;
++ register int bblock = ((yaffs_BlockIndex *)b)->block;
++ if (aseq == bseq)
++ return ablock - bblock;
++ else
++ return aseq - bseq;
++}
++
++int yaffs2_scan_backwards(yaffs_dev_t *dev)
++{
++ yaffs_ext_tags tags;
++ int blk;
++ int blockIterator;
++ int startIterator;
++ int endIterator;
++ int nBlocksToScan = 0;
++
++ int chunk;
++ int result;
++ int c;
++ int deleted;
++ yaffs_block_state_t state;
++ yaffs_obj_t *hard_list = NULL;
++ yaffs_block_info_t *bi;
++ __u32 seq_number;
++ yaffs_obj_header *oh;
++ yaffs_obj_t *in;
++ yaffs_obj_t *parent;
++ int nBlocks = dev->internal_end_block - dev->internal_start_block + 1;
++ int itsUnlinked;
++ __u8 *chunkData;
++
++ int file_size;
++ int is_shrink;
++ int foundChunksInBlock;
++ int equiv_id;
++ int alloc_failed = 0;
++
++
++ yaffs_BlockIndex *blockIndex = NULL;
++ int altBlockIndex = 0;
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ ("yaffs2_scan_backwards starts intstartblk %d intendblk %d..."
++ TENDSTR), dev->internal_start_block, dev->internal_end_block));
++
++
++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++ blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
++
++ if (!blockIndex) {
++ blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
++ altBlockIndex = 1;
++ }
++
++ if (!blockIndex) {
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("yaffs2_scan_backwards() could not allocate block index!" TENDSTR)));
++ return YAFFS_FAIL;
++ }
++
++ dev->blocks_in_checkpt = 0;
++
++ chunkData = yaffs_get_temp_buffer(dev, __LINE__);
++
++ /* Scan all the blocks to determine their state */
++ bi = dev->block_info;
++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block; blk++) {
++ yaffs_clear_chunk_bits(dev, blk);
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++
++ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++ bi->block_state = state;
++ bi->seq_number = seq_number;
++
++ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ bi->block_state = state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
++
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
++ state, seq_number));
++
++
++ if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ dev->blocks_in_checkpt++;
++
++ } else if (state == YAFFS_BLOCK_STATE_DEAD) {
++ T(YAFFS_TRACE_BAD_BLOCKS,
++ (TSTR("block %d is bad" TENDSTR), blk));
++ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("Block empty " TENDSTR)));
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
++ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++
++ /* Determine the highest sequence number */
++ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++
++ blockIndex[nBlocksToScan].seq = seq_number;
++ blockIndex[nBlocksToScan].block = blk;
++
++ nBlocksToScan++;
++
++ if (seq_number >= dev->seq_number)
++ dev->seq_number = seq_number;
++ } else {
++ /* TODO: Nasty sequence number! */
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ ("Block scanning block %d has bad sequence number %d"
++ TENDSTR), blk, seq_number));
++
++ }
++ }
++ bi++;
++ }
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("%d blocks to be sorted..." TENDSTR), nBlocksToScan));
++
++
++
++ YYIELD();
++
++ /* Sort the blocks by sequence number*/
++ yaffs_qsort(blockIndex, nBlocksToScan, sizeof(yaffs_BlockIndex), yaffs2_ybicmp);
++
++ YYIELD();
++
++ T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
++
++ /* Now scan the blocks looking at the data. */
++ startIterator = 0;
++ endIterator = nBlocksToScan - 1;
++ T(YAFFS_TRACE_SCAN_DEBUG,
++ (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
++
++ /* For each block.... backwards */
++ for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
++ blockIterator--) {
++ /* Cooperative multitasking! This loop can run for so
++ long that watchdog timers expire. */
++ YYIELD();
++
++ /* get the block to scan in the correct order */
++ blk = blockIndex[blockIterator].block;
++
++ bi = yaffs_get_block_info(dev, blk);
++
++
++ state = bi->block_state;
++
++ deleted = 0;
++
++ /* For each chunk in each block that needs scanning.... */
++ foundChunksInBlock = 0;
++ for (c = dev->param.chunks_per_block - 1;
++ !alloc_failed && c >= 0 &&
++ (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++ state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
++ /* Scan backwards...
++ * Read the tags and decide what to do
++ */
++
++ chunk = blk * dev->param.chunks_per_block + c;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
++ &tags);
++
++ /* Let's have a good look at this chunk... */
++
++ if (!tags.chunk_used) {
++ /* An unassigned chunk in the block.
++ * If there are used chunks after this one, then
++ * it is a chunk that was skipped due to failing the erased
++ * check. Just skip it so that it can be deleted.
++ * But, more typically, We get here when this is an unallocated
++ * chunk and his means that either the block is empty or
++ * this is the one being allocated from
++ */
++
++ if (foundChunksInBlock) {
++ /* This is a chunk that was skipped due to failing the erased check */
++ } else if (c == 0) {
++ /* We're looking at the first chunk in the block so the block is unused */
++ state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ } else {
++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++ state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ if (dev->seq_number == bi->seq_number) {
++ /* this is the block being allocated from */
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR
++ (" Allocating from %d %d"
++ TENDSTR), blk, c));
++
++ state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->alloc_block = blk;
++ dev->alloc_page = c;
++ dev->alloc_block_finder = blk;
++ } else {
++ /* This is a partially written block that is not
++ * the current allocation block.
++ */
++
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("Partially written block %d detected" TENDSTR),
++ blk));
++ }
++ }
++ }
++
++ dev->n_free_chunks++;
++
++ } else if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
++ T(YAFFS_TRACE_SCAN,
++ (TSTR(" Unfixed ECC in chunk(%d:%d), chunk ignored"TENDSTR),
++ blk, c));
++
++ dev->n_free_chunks++;
++
++ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
++ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
++ (tags.chunk_id > 0 && tags.n_bytes > dev->data_bytes_per_chunk) ||
++ tags.seq_number != bi->seq_number ) {
++ T(YAFFS_TRACE_SCAN,
++ (TSTR("Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored"TENDSTR),
++ blk, c,tags.obj_id, tags.chunk_id, tags.n_bytes));
++
++ dev->n_free_chunks++;
++
++ } else if (tags.chunk_id > 0) {
++ /* chunk_id > 0 so it is a data chunk... */
++ unsigned int endpos;
++ __u32 chunkBase =
++ (tags.chunk_id - 1) * dev->data_bytes_per_chunk;
++
++ foundChunksInBlock = 1;
++
++
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.
++ obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ if (!in) {
++ /* Out of memory */
++ alloc_failed = 1;
++ }
++
++ if (in &&
++ in->variant_type == YAFFS_OBJECT_TYPE_FILE
++ && chunkBase < in->variant.file_variant.shrink_size) {
++ /* This has not been invalidated by a resize */
++ if (!yaffs_put_chunk_in_file(in, tags.chunk_id, chunk, -1)) {
++ alloc_failed = 1;
++ }
++
++ /* File size is calculated by looking at the data chunks if we have not
++ * seen an object header yet. Stop this practice once we find an object header.
++ */
++ endpos = chunkBase + tags.n_bytes;
++
++ if (!in->valid && /* have not got an object header yet */
++ in->variant.file_variant.scanned_size < endpos) {
++ in->variant.file_variant.scanned_size = endpos;
++ in->variant.file_variant.file_size = endpos;
++ }
++
++ } else if (in) {
++ /* This chunk has been invalidated by a resize, or a past file deletion
++ * so delete the chunk*/
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++
++ }
++ } else {
++ /* chunk_id == 0, so it is an ObjectHeader.
++ * Thus, we read in the object header and make the object
++ */
++ foundChunksInBlock = 1;
++
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ oh = NULL;
++ in = NULL;
++
++ if (tags.extra_available) {
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ tags.extra_obj_type);
++ if (!in)
++ alloc_failed = 1;
++ }
++
++ if (!in ||
++ (!in->valid && dev->param.disable_lazy_load) ||
++ tags.extra_shadows ||
++ (!in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
++
++ /* If we don't have valid info then we need to read the chunk
++ * TODO In future we can probably defer reading the chunk and
++ * living with invalid data until needed.
++ */
++
++ result = yaffs_rd_chunk_tags_nand(dev,
++ chunk,
++ chunkData,
++ NULL);
++
++ oh = (yaffs_obj_header *) chunkData;
++
++ if (dev->param.inband_tags) {
++ /* Fix up the header if they got corrupted by inband tags */
++ oh->shadows_obj = oh->inband_shadowed_obj_id;
++ oh->is_shrink = oh->inband_is_shrink;
++ }
++
++ if (!in) {
++ in = yaffs_find_or_create_by_number(dev, tags.obj_id, oh->type);
++ if (!in)
++ alloc_failed = 1;
++ }
++
++ }
++
++ if (!in) {
++ /* TODO Hoosterman we have a problem! */
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy: Could not make object for object %d at chunk %d during scan"
++ TENDSTR), tags.obj_id, chunk));
++ continue;
++ }
++
++ if (in->valid) {
++ /* We have already filled this one.
++ * We have a duplicate that will be discarded, but
++ * we first have to suck out resize info if it is a file.
++ */
++
++ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
++ ((oh &&
++ oh->type == YAFFS_OBJECT_TYPE_FILE) ||
++ (tags.extra_available &&
++ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE))) {
++ __u32 thisSize =
++ (oh) ? oh->file_size : tags.
++ extra_length;
++ __u32 parent_obj_id =
++ (oh) ? oh->
++ parent_obj_id : tags.
++ extra_parent_id;
++
++
++ is_shrink =
++ (oh) ? oh->is_shrink : tags.
++ extra_is_shrink;
++
++ /* If it is deleted (unlinked at start also means deleted)
++ * we treat the file size as being zeroed at this point.
++ */
++ if (parent_obj_id ==
++ YAFFS_OBJECTID_DELETED
++ || parent_obj_id ==
++ YAFFS_OBJECTID_UNLINKED) {
++ thisSize = 0;
++ is_shrink = 1;
++ }
++
++ if (is_shrink && in->variant.file_variant.shrink_size > thisSize)
++ in->variant.file_variant.shrink_size = thisSize;
++
++ if (is_shrink)
++ bi->has_shrink_hdr = 1;
++
++ }
++ /* Use existing - destroy this one. */
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++
++ }
++
++ if (!in->valid && in->variant_type !=
++ (oh ? oh->type : tags.extra_obj_type))
++ T(YAFFS_TRACE_ERROR, (
++ TSTR("yaffs tragedy: Bad object type, "
++ TCONT("%d != %d, for object %d at chunk ")
++ TCONT("%d during scan")
++ TENDSTR), oh ?
++ oh->type : tags.extra_obj_type,
++ in->variant_type, tags.obj_id,
++ chunk));
++
++ if (!in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id ==
++ YAFFS_OBJECTID_LOSTNFOUND)) {
++ /* We only load some info, don't fiddle with directory structure */
++ in->valid = 1;
++
++ if (oh) {
++
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
++
++ in->lazy_loaded = 0;
++
++#endif
++ } else
++ in->lazy_loaded = 1;
++
++ in->hdr_chunk = chunk;
++
++ } else if (!in->valid) {
++ /* we need to load this info */
++
++ in->valid = 1;
++ in->hdr_chunk = chunk;
++
++ if (oh) {
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++ in->win_atime[0] = oh->win_atime[0];
++ in->win_ctime[0] = oh->win_ctime[0];
++ in->win_mtime[0] = oh->win_mtime[0];
++ in->win_atime[1] = oh->win_atime[1];
++ in->win_ctime[1] = oh->win_ctime[1];
++ in->win_mtime[1] = oh->win_mtime[1];
++#else
++ in->yst_uid = oh->yst_uid;
++ in->yst_gid = oh->yst_gid;
++ in->yst_atime = oh->yst_atime;
++ in->yst_mtime = oh->yst_mtime;
++ in->yst_ctime = oh->yst_ctime;
++ in->yst_rdev = oh->yst_rdev;
++#endif
++
++ if (oh->shadows_obj > 0)
++ yaffs_handle_shadowed_obj(dev,
++ oh->
++ shadows_obj,
++ 1);
++
++
++
++ yaffs_set_obj_name_from_oh(in, oh);
++ parent =
++ yaffs_find_or_create_by_number
++ (dev, oh->parent_obj_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++
++ file_size = oh->file_size;
++ is_shrink = oh->is_shrink;
++ equiv_id = oh->equiv_id;
++
++ } else {
++ in->variant_type = tags.extra_obj_type;
++ parent =
++ yaffs_find_or_create_by_number
++ (dev, tags.extra_parent_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ file_size = tags.extra_length;
++ is_shrink = tags.extra_is_shrink;
++ equiv_id = tags.extra_equiv_id;
++ in->lazy_loaded = 1;
++
++ }
++ in->dirty = 0;
++
++ if (!parent)
++ alloc_failed = 1;
++
++ /* directory stuff...
++ * hook up to parent
++ */
++
++ if (parent && parent->variant_type ==
++ YAFFS_OBJECT_TYPE_UNKNOWN) {
++ /* Set up as a directory */
++ parent->variant_type =
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ YINIT_LIST_HEAD(&parent->variant.
++ dir_variant.
++ children);
++ } else if (!parent || parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ /* Hoosterman, another problem....
++ * We're trying to use a non-directory as a directory
++ */
++
++ T(YAFFS_TRACE_ERROR,
++ (TSTR
++ ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++ TENDSTR)));
++ parent = dev->lost_n_found;
++ }
++
++ yaffs_add_obj_to_dir(parent, in);
++
++ itsUnlinked = (parent == dev->del_dir) ||
++ (parent == dev->unlinked_dir);
++
++ if (is_shrink) {
++ /* Mark the block as having a shrinkHeader */
++ bi->has_shrink_hdr = 1;
++ }
++
++ /* Note re hardlinks.
++ * Since we might scan a hardlink before its equivalent object is scanned
++ * we put them all in a list.
++ * After scanning is complete, we should have all the objects, so we run
++ * through this list and fix up all the chains.
++ */
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Todo got a problem */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++
++ if (in->variant.file_variant.
++ scanned_size < file_size) {
++ /* This covers the case where the file size is greater
++ * than where the data is
++ * This will happen if the file is resized to be larger
++ * than its current data extents.
++ */
++ in->variant.file_variant.file_size = file_size;
++ in->variant.file_variant.scanned_size = file_size;
++ }
++
++ if (in->variant.file_variant.shrink_size > file_size)
++ in->variant.file_variant.shrink_size = file_size;
++
++
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ if (!itsUnlinked) {
++ in->variant.hardlink_variant.equiv_id =
++ equiv_id;
++ in->hard_links.next =
++ (struct ylist_head *) hard_list;
++ hard_list = in;
++ }
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ if (oh) {
++ in->variant.symlink_variant.alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.symlink_variant.alias)
++ alloc_failed = 1;
++ }
++ break;
++ }
++
++ }
++
++ }
++
++ } /* End of scanning for each chunk */
++
++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++ /* If we got this far while scanning, then the block is fully allocated. */
++ state = YAFFS_BLOCK_STATE_FULL;
++ }
++
++
++ bi->block_state = state;
++
++ /* Now let's see if it was dirty */
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++ yaffs_block_became_dirty(dev, blk);
++ }
++
++ }
++
++ yaffs_skip_rest_of_block(dev);
++
++ if (altBlockIndex)
++ YFREE_ALT(blockIndex);
++ else
++ YFREE(blockIndex);
++
++ /* Ok, we've done all the scanning.
++ * Fix up the hard link chains.
++ * We should now have scanned all the objects, now it's time to add these
++ * hardlinks.
++ */
++ yaffs_link_fixup(dev, hard_list);
++
++
++ yaffs_release_temp_buffer(dev, chunkData, __LINE__);
++
++ if (alloc_failed)
++ return YAFFS_FAIL;
++
++ T(YAFFS_TRACE_SCAN, (TSTR("yaffs2_scan_backwards ends" TENDSTR)));
++
++ return YAFFS_OK;
++}
+--- /dev/null
++++ b/fs/yaffs2/yaffs_yaffs2.h
+@@ -0,0 +1,36 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2010 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __YAFFS_YAFFS2_H__
++#define __YAFFS_YAFFS2_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_calc_oldest_dirty_seq(yaffs_dev_t *dev);
++void yaffs2_find_oldest_dirty_seq(yaffs_dev_t *dev);
++void yaffs2_clear_oldest_dirty_seq(yaffs_dev_t *dev, yaffs_block_info_t *bi);
++void yaffs2_update_oldest_dirty_seq(yaffs_dev_t *dev, unsigned block_no, yaffs_block_info_t *bi);
++int yaffs_block_ok_for_gc(yaffs_dev_t *dev, yaffs_block_info_t *bi);
++__u32 yaffs2_find_refresh_block(yaffs_dev_t *dev);
++int yaffs2_checkpt_required(yaffs_dev_t *dev);
++int yaffs_calc_checkpt_blocks_required(yaffs_dev_t *dev);
++
++
++void yaffs2_checkpt_invalidate(yaffs_dev_t *dev);
++int yaffs2_checkpt_save(yaffs_dev_t *dev);
++int yaffs2_checkpt_restore(yaffs_dev_t *dev);
++
++int yaffs2_handle_hole(yaffs_obj_t *obj, loff_t new_size);
++int yaffs2_scan_backwards(yaffs_dev_t *dev);
++
++#endif
+--- a/fs/yaffs2/yportenv.h
++++ b/fs/yaffs2/yportenv.h
+@@ -1,7 +1,7 @@
+ /*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+- * Copyright (C) 2002-2007 Aleph One Ltd.
++ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+@@ -41,12 +41,14 @@
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ #include <linux/config.h>
+ #endif
++
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
++#include <linux/xattr.h>
+
+ #define YCHAR char
+ #define YUCHAR unsigned char
+@@ -55,11 +57,11 @@
+ #define yaffs_strcpy(a, b) strcpy(a, b)
+ #define yaffs_strncpy(a, b, c) strncpy(a, b, c)
+ #define yaffs_strncmp(a, b, c) strncmp(a, b, c)
+-#define yaffs_strlen(s) strlen(s)
++#define yaffs_strnlen(s,m) strnlen(s,m)
+ #define yaffs_sprintf sprintf
+ #define yaffs_toupper(a) toupper(a)
+
+-#define Y_INLINE inline
++#define Y_INLINE __inline__
+
+ #define YAFFS_LOSTNFOUND_NAME "lost+found"
+ #define YAFFS_LOSTNFOUND_PREFIX "obj"
+@@ -71,11 +73,11 @@
+ #define YFREE_ALT(x) vfree(x)
+ #define YMALLOC_DMA(x) YMALLOC(x)
+
+-/* KR - added for use in scan so processes aren't blocked indefinitely. */
+ #define YYIELD() schedule()
++#define Y_DUMP_STACK() dump_stack()
+
+-#define YAFFS_ROOT_MODE 0666
+-#define YAFFS_LOSTNFOUND_MODE 0666
++#define YAFFS_ROOT_MODE 0755
++#define YAFFS_LOSTNFOUND_MODE 0700
+
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ #define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+@@ -85,19 +87,14 @@
+ #define Y_TIME_CONVERT(x) (x)
+ #endif
+
+-#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_sum_cmp(x, y) ((x) == (y))
+ #define yaffs_strcmp(a, b) strcmp(a, b)
+
+ #define TENDSTR "\n"
+-#define TSTR(x) KERN_WARNING x
++#define TSTR(x) KERN_DEBUG x
+ #define TCONT(x) x
+ #define TOUT(p) printk p
+
+-#define yaffs_trace(mask, fmt, args...) \
+- do { if ((mask) & (yaffs_traceMask|YAFFS_TRACE_ERROR)) \
+- printk(KERN_WARNING "yaffs: " fmt, ## args); \
+- } while (0)
+-
+ #define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+@@ -116,7 +113,6 @@
+ #include "stdio.h"
+ #include "string.h"
+
+-#include "devextras.h"
+
+ #define YMALLOC(x) malloc(x)
+ #define YFREE(x) free(x)
+@@ -129,7 +125,7 @@
+ #define yaffs_strcat(a, b) strcat(a, b)
+ #define yaffs_strcpy(a, b) strcpy(a, b)
+ #define yaffs_strncpy(a, b, c) strncpy(a, b, c)
+-#define yaffs_strlen(s) strlen(s)
++#define yaffs_strnlen(s,m) strnlen(s,m)
+ #define yaffs_sprintf sprintf
+ #define yaffs_toupper(a) toupper(a)
+
+@@ -146,10 +142,10 @@
+ #define YAFFS_LOSTNFOUND_PREFIX "obj"
+ /* #define YPRINTF(x) printf x */
+
+-#define YAFFS_ROOT_MODE 0666
+-#define YAFFS_LOSTNFOUND_MODE 0666
++#define YAFFS_ROOT_MODE 0755
++#define YAFFS_LOSTNFOUND_MODE 0700
+
+-#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_sum_cmp(x, y) ((x) == (y))
+ #define yaffs_strcmp(a, b) strcmp(a, b)
+
+ #else
+@@ -158,46 +154,180 @@
+
+ #endif
+
+-/* see yaffs_fs.c */
+-extern unsigned int yaffs_traceMask;
+-extern unsigned int yaffs_wr_attempts;
++#if defined(CONFIG_YAFFS_DIRECT) || defined(CONFIG_YAFFS_WINCE)
+
+-/*
+- * Tracing flags.
+- * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+- */
++#ifdef CONFIG_YAFFSFS_PROVIDE_VALUES
++
++#ifndef O_RDONLY
++#define O_RDONLY 00
++#endif
++
++#ifndef O_WRONLY
++#define O_WRONLY 01
++#endif
++
++#ifndef O_RDWR
++#define O_RDWR 02
++#endif
++
++#ifndef O_CREAT
++#define O_CREAT 0100
++#endif
++
++#ifndef O_EXCL
++#define O_EXCL 0200
++#endif
++
++#ifndef O_TRUNC
++#define O_TRUNC 01000
++#endif
++
++#ifndef O_APPEND
++#define O_APPEND 02000
++#endif
++
++#ifndef SEEK_SET
++#define SEEK_SET 0
++#endif
++
++#ifndef SEEK_CUR
++#define SEEK_CUR 1
++#endif
++
++#ifndef SEEK_END
++#define SEEK_END 2
++#endif
++
++#ifndef EBUSY
++#define EBUSY 16
++#endif
++
++#ifndef ENODEV
++#define ENODEV 19
++#endif
++
++#ifndef EINVAL
++#define EINVAL 22
++#endif
++
++#ifndef EBADF
++#define EBADF 9
++#endif
++
++#ifndef EACCES
++#define EACCES 13
++#endif
++
++#ifndef EXDEV
++#define EXDEV 18
++#endif
++
++#ifndef ENOENT
++#define ENOENT 2
++#endif
++
++#ifndef ENOSPC
++#define ENOSPC 28
++#endif
++
++#ifndef ERANGE
++#define ERANGE 34
++#endif
++
++#ifndef ENODATA
++#define ENODATA 61
++#endif
++
++#ifndef ENOTEMPTY
++#define ENOTEMPTY 39
++#endif
++
++#ifndef ENAMETOOLONG
++#define ENAMETOOLONG 36
++#endif
++
++#ifndef ENOMEM
++#define ENOMEM 12
++#endif
++
++#ifndef EEXIST
++#define EEXIST 17
++#endif
++
++#ifndef ENOTDIR
++#define ENOTDIR 20
++#endif
++
++#ifndef EISDIR
++#define EISDIR 21
++#endif
++
++
++// Mode flags
++
++#ifndef S_IFMT
++#define S_IFMT 0170000
++#endif
++
++#ifndef S_IFLNK
++#define S_IFLNK 0120000
++#endif
+
+-#define YAFFS_TRACE_OS 0x00000002
+-#define YAFFS_TRACE_ALLOCATE 0x00000004
+-#define YAFFS_TRACE_SCAN 0x00000008
+-#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+-#define YAFFS_TRACE_ERASE 0x00000020
+-#define YAFFS_TRACE_GC 0x00000040
+-#define YAFFS_TRACE_WRITE 0x00000080
+-#define YAFFS_TRACE_TRACING 0x00000100
+-#define YAFFS_TRACE_DELETION 0x00000200
+-#define YAFFS_TRACE_BUFFERS 0x00000400
+-#define YAFFS_TRACE_NANDACCESS 0x00000800
+-#define YAFFS_TRACE_GC_DETAIL 0x00001000
+-#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+-#define YAFFS_TRACE_MTD 0x00004000
+-#define YAFFS_TRACE_CHECKPOINT 0x00008000
+-
+-#define YAFFS_TRACE_VERIFY 0x00010000
+-#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+-#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+-#define YAFFS_TRACE_VERIFY_ALL 0x000F0000
+-
+-
+-#define YAFFS_TRACE_ERROR 0x40000000
+-#define YAFFS_TRACE_BUG 0x80000000
+-#define YAFFS_TRACE_ALWAYS 0xF0000000
++#ifndef S_IFDIR
++#define S_IFDIR 0040000
++#endif
++
++#ifndef S_IFREG
++#define S_IFREG 0100000
++#endif
+
++#ifndef S_IREAD
++#define S_IREAD 0000400
++#endif
++
++#ifndef S_IWRITE
++#define S_IWRITE 0000200
++#endif
+
+-#define T(mask, p) do { if ((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p); } while (0)
++#ifndef S_IEXEC
++#define S_IEXEC 0000100
++#endif
++
++#ifndef XATTR_CREATE
++#define XATTR_CREATE 1
++#endif
++
++#ifndef XATTR_REPLACE
++#define XATTR_REPLACE 2
++#endif
++
++#ifndef R_OK
++#define R_OK 4
++#define W_OK 2
++#define X_OK 1
++#define F_OK 0
++#endif
++
++#else
++#include <errno.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#endif
++
++#endif
++
++#ifndef Y_DUMP_STACK
++#define Y_DUMP_STACK() do { } while (0)
++#endif
+
+ #ifndef YBUG
+-#define YBUG() do {T(YAFFS_TRACE_BUG, (TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR), __LINE__)); } while (0)
++#define YBUG() do {\
++ T(YAFFS_TRACE_BUG,\
++ (TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR),\
++ __LINE__));\
++ Y_DUMP_STACK();\
++} while (0)
+ #endif
+
++
+ #endif
diff --git a/target/linux/generic/patches-3.3/503-yaffs_symlink_bug.patch b/target/linux/generic/patches-3.3/503-yaffs_symlink_bug.patch
new file mode 100644
index 000000000..dabf2871b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/503-yaffs_symlink_bug.patch
@@ -0,0 +1,17 @@
+--- a/fs/yaffs2/yaffs_guts.c
++++ b/fs/yaffs2/yaffs_guts.c
+@@ -1709,11 +1709,11 @@ static int yaffs_change_obj_name(yaffs_o
+ }
+
+ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+- if (obj->my_dev->param.is_yaffs2)
++ // if (obj->my_dev->param.is_yaffs2)
+ unlinkOp = (new_dir == obj->my_dev->unlinked_dir);
+- else
++ /* else
+ unlinkOp = (new_dir == obj->my_dev->unlinked_dir
+- && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
++ && obj->variant_type == YAFFS_OBJECT_TYPE_FILE); */
+
+ deleteOp = (new_dir == obj->my_dev->del_dir);
+
diff --git a/target/linux/generic/patches-3.3/504-yaffs_mutex_fix.patch b/target/linux/generic/patches-3.3/504-yaffs_mutex_fix.patch
new file mode 100644
index 000000000..b34b12f78
--- /dev/null
+++ b/target/linux/generic/patches-3.3/504-yaffs_mutex_fix.patch
@@ -0,0 +1,20 @@
+--- a/fs/yaffs2/yaffs_vfs_glue.c
++++ b/fs/yaffs2/yaffs_vfs_glue.c
+@@ -3036,7 +3036,7 @@ static struct super_block *yaffs_interna
+ YINIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->searchContexts));
+ param->remove_obj_fn = yaffs_remove_obj_callback;
+
+- init_MUTEX(&(yaffs_dev_to_lc(dev)->grossLock));
++ sema_init(&(yaffs_dev_to_lc(dev)->grossLock), 1);
+
+ yaffs_gross_lock(dev);
+
+@@ -3494,7 +3494,7 @@ static int __init init_yaffs_fs(void)
+
+
+
+- init_MUTEX(&yaffs_context_lock);
++ sema_init((&yaffs_context_lock), 1);
+
+ /* Install the proc_fs entries */
+ my_proc_entry = create_proc_entry("yaffs",
diff --git a/target/linux/generic/patches-3.3/505-2.6.39_fix.patch b/target/linux/generic/patches-3.3/505-2.6.39_fix.patch
new file mode 100644
index 000000000..5108f7be3
--- /dev/null
+++ b/target/linux/generic/patches-3.3/505-2.6.39_fix.patch
@@ -0,0 +1,147 @@
+--- a/fs/yaffs2/yaffs_vfs_glue.c
++++ b/fs/yaffs2/yaffs_vfs_glue.c
+@@ -72,7 +72,7 @@
+ #include <linux/init.h>
+ #include <linux/fs.h>
+ #include <linux/proc_fs.h>
+-#include <linux/smp_lock.h>
++#include <linux/mutex.h>
+ #include <linux/pagemap.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/interrupt.h>
+@@ -97,6 +97,8 @@
+
+ #include <asm/div64.h>
+
++static DEFINE_MUTEX(yaffs_mutex);
++
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ #include <linux/statfs.h>
+@@ -1538,7 +1540,7 @@ static loff_t yaffs_dir_llseek(struct fi
+ {
+ long long retval;
+
+- lock_kernel();
++ mutex_lock(&yaffs_mutex);
+
+ switch (origin){
+ case 2:
+@@ -1555,7 +1557,7 @@ static loff_t yaffs_dir_llseek(struct fi
+
+ retval = offset;
+ }
+- unlock_kernel();
++ mutex_unlock(&yaffs_mutex);
+ return retval;
+ }
+
+@@ -3087,98 +3089,52 @@ static struct super_block *yaffs_interna
+ return sb;
+ }
+
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+ {
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs_read_super(struct file_system_type *fs,
++static struct dentry *yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+- void *data, struct vfsmount *mnt)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs_internal_read_super_mtd, mnt);
+-}
+-#else
+-static struct super_block *yaffs_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data)
++ void *data)
+ {
+
+- return get_sb_bdev(fs, flags, dev_name, data,
++ return mount_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd);
+ }
+-#endif
+
+ static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+- .get_sb = yaffs_read_super,
++ .mount = yaffs_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
+-#else
+-static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+- int silent)
+-{
+- return yaffs_internal_read_super(1, sb, data, silent);
+-}
+-
+-static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+- FS_REQUIRES_DEV);
+-#endif
+-
+
+ #ifdef CONFIG_YAFFS_YAFFS2
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+ {
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+-static int yaffs2_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name, void *data,
+- struct vfsmount *mnt)
++static struct dentry *yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
+ {
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs2_internal_read_super_mtd, mnt);
++ return mount_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd);
+ }
+-#else
+-static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+- int flags, const char *dev_name,
+- void *data)
+-{
+-
+- return get_sb_bdev(fs, flags, dev_name, data,
+- yaffs2_internal_read_super_mtd);
+-}
+-#endif
+
+ static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+- .get_sb = yaffs2_read_super,
++ .mount = yaffs2_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
+-#else
+-static struct super_block *yaffs2_read_super(struct super_block *sb,
+- void *data, int silent)
+-{
+- return yaffs_internal_read_super(2, sb, data, silent);
+-}
+-
+-static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+- FS_REQUIRES_DEV);
+-#endif
+
+ #endif /* CONFIG_YAFFS_YAFFS2 */
+
diff --git a/target/linux/generic/patches-3.3/506-yaffs2-3.2_fix.patch b/target/linux/generic/patches-3.3/506-yaffs2-3.2_fix.patch
new file mode 100644
index 000000000..f84332ab2
--- /dev/null
+++ b/target/linux/generic/patches-3.3/506-yaffs2-3.2_fix.patch
@@ -0,0 +1,289 @@
+--- a/fs/yaffs2/yaffs_vfs_glue.c
++++ b/fs/yaffs2/yaffs_vfs_glue.c
+@@ -220,11 +220,34 @@ static struct inode *yaffs_iget(struct s
+ #define yaffs_SuperToDevice(sb) ((yaffs_dev_t *)sb->u.generic_sbp)
+ #endif
+
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
++static inline void yaffs_set_nlink(struct inode *inode, unsigned int nlink)
++{
++ set_nlink(inode, nlink);
++}
++
++static inline void yaffs_dec_link_count(struct inode *inode)
++{
++ inode_dec_link_count(inode);
++}
++#else
++static inline void yaffs_set_nlink(struct inode *inode, unsigned int nlink)
++{
++ inode->i_nlink = nlink;
++}
++
++static inline void yaffs_dec_link_count(struct inode *inode)
++{
++ inode->i_nlink--;
++ mark_inode_dirty(inode)
++}
++#endif
++
+
+ #define update_dir_time(dir) do {\
+ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
+ } while(0)
+-
++
+ static void yaffs_put_super(struct super_block *sb);
+
+ static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+@@ -238,7 +261,10 @@ static int yaffs_file_flush(struct file
+ static int yaffs_file_flush(struct file *file);
+ #endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
++static int yaffs_sync_object(struct file *file, loff_t start, loff_t end,
++ int datasync);
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+ static int yaffs_sync_object(struct file *file, int datasync);
+ #else
+ static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+@@ -513,7 +539,7 @@ static unsigned yaffs_gc_control_callbac
+ {
+ return yaffs_gc_control;
+ }
+-
++
+ static void yaffs_gross_lock(yaffs_dev_t *dev)
+ {
+ T(YAFFS_TRACE_LOCK, (TSTR("yaffs locking %p\n"), current));
+@@ -1362,7 +1388,7 @@ static void yaffs_fill_inode_from_obj(st
+ inode->i_size = yaffs_get_obj_length(obj);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+- inode->i_nlink = yaffs_get_obj_link_count(obj);
++ yaffs_set_nlink(inode, yaffs_get_obj_link_count(obj));
+
+ T(YAFFS_TRACE_OS,
+ (TSTR("yaffs_fill_inode mode %x uid %d gid %d size %d count %d\n"),
+@@ -1810,10 +1836,9 @@ static int yaffs_unlink(struct inode *di
+ retVal = yaffs_unlinker(obj, dentry->d_name.name);
+
+ if (retVal == YAFFS_OK) {
+- dentry->d_inode->i_nlink--;
++ yaffs_dec_link_count(dentry->d_inode);
+ dir->i_version++;
+ yaffs_gross_unlock(dev);
+- mark_inode_dirty(dentry->d_inode);
+ update_dir_time(dir);
+ return 0;
+ }
+@@ -1844,7 +1869,8 @@ static int yaffs_link(struct dentry *old
+ obj);
+
+ if (link) {
+- old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj);
++ yaffs_set_nlink(old_dentry->d_inode,
++ yaffs_get_obj_link_count(obj));
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ T(YAFFS_TRACE_OS,
+@@ -1894,7 +1920,10 @@ static int yaffs_symlink(struct inode *d
+ return -ENOMEM;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
++static int yaffs_sync_object(struct file *file, loff_t start, loff_t end,
++ int datasync)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+ static int yaffs_sync_object(struct file *file, int datasync)
+ #else
+ static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+@@ -1961,11 +1990,9 @@ static int yaffs_rename(struct inode *ol
+ yaffs_gross_unlock(dev);
+
+ if (retVal == YAFFS_OK) {
+- if (target) {
+- new_dentry->d_inode->i_nlink--;
+- mark_inode_dirty(new_dentry->d_inode);
+- }
+-
++ if (target)
++ yaffs_dec_link_count(new_dentry->d_inode);
++
+ update_dir_time(old_dir);
+ if(old_dir != new_dir)
+ update_dir_time(new_dir);
+@@ -1985,7 +2012,7 @@ static int yaffs_setattr(struct dentry *
+ (TSTR("yaffs_setattr of object %d\n"),
+ yaffs_InodeToObject(inode)->obj_id));
+
+- /* Fail if a requested resize >= 2GB */
++ /* Fail if a requested resize >= 2GB */
+ if (attr->ia_valid & ATTR_SIZE &&
+ (attr->ia_size >> 31))
+ error = -EINVAL;
+@@ -2216,7 +2243,7 @@ static void yaffs_flush_inodes(struct su
+ {
+ struct inode *iptr;
+ yaffs_obj_t *obj;
+-
++
+ list_for_each_entry(iptr,&sb->s_inodes, i_sb_list){
+ obj = yaffs_InodeToObject(iptr);
+ if(obj){
+@@ -2230,10 +2257,10 @@ static void yaffs_flush_inodes(struct su
+
+ static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+ {
+- yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb);
+ if(!dev)
+ return;
+-
++
+ yaffs_flush_inodes(sb);
+ yaffs_update_dirty_dirs(dev);
+ yaffs_flush_whole_cache(dev);
+@@ -2301,7 +2328,7 @@ static int yaffs_do_sync_fs(struct super
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+- * NB:
++ * NB:
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+@@ -2872,7 +2899,7 @@ static struct super_block *yaffs_interna
+
+ dev = kmalloc(sizeof(yaffs_dev_t), GFP_KERNEL);
+ context = kmalloc(sizeof(struct yaffs_LinuxContext),GFP_KERNEL);
+-
++
+ if(!dev || !context ){
+ if(dev)
+ kfree(dev);
+@@ -2905,7 +2932,7 @@ static struct super_block *yaffs_interna
+ #else
+ sb->u.generic_sbp = dev;
+ #endif
+-
++
+ dev->driver_context = mtd;
+ param->name = mtd->name;
+
+@@ -3005,7 +3032,7 @@ static struct super_block *yaffs_interna
+ param->gc_control = yaffs_gc_control_callback;
+
+ yaffs_dev_to_lc(dev)->superBlock= sb;
+-
++
+
+ #ifndef CONFIG_YAFFS_DOES_ECC
+ param->use_nand_ecc = 1;
+@@ -3047,10 +3074,10 @@ static struct super_block *yaffs_interna
+ T(YAFFS_TRACE_OS,
+ (TSTR("yaffs_read_super: guts initialised %s\n"),
+ (err == YAFFS_OK) ? "OK" : "FAILED"));
+-
++
+ if(err == YAFFS_OK)
+ yaffs_bg_start(dev);
+-
++
+ if(!context->bgThread)
+ param->defered_dir_update = 0;
+
+@@ -3125,7 +3152,7 @@ static struct dentry *yaffs2_read_super(
+ void *data)
+ {
+ return mount_bdev(fs, flags, dev_name, data,
+- yaffs_internal_read_super_mtd);
++ yaffs2_internal_read_super_mtd);
+ }
+
+ static struct file_system_type yaffs2_fs_type = {
+@@ -3223,7 +3250,7 @@ static int yaffs_proc_read(char *page,
+ buf += sprintf(buf,"\n");
+ else {
+ step-=2;
+-
++
+ down(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+@@ -3240,7 +3267,7 @@ static int yaffs_proc_read(char *page,
+ buf = yaffs_dump_dev_part0(buf, dev);
+ } else
+ buf = yaffs_dump_dev_part1(buf, dev);
+-
++
+ break;
+ }
+ up(&yaffs_context_lock);
+@@ -3267,7 +3294,7 @@ static int yaffs_stats_proc_read(char *p
+ int erasedChunks;
+
+ erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+-
++
+ buf += sprintf(buf,"%d, %d, %d, %u, %u, %u, %u\n",
+ n, dev->n_free_chunks, erasedChunks,
+ dev->bg_gcs, dev->oldest_dirty_gc_count,
+--- a/fs/yaffs2/yaffs_mtdif1.c
++++ b/fs/yaffs2/yaffs_mtdif1.c
+@@ -34,6 +34,7 @@
+ #include "linux/version.h"
+ #include "linux/types.h"
+ #include "linux/mtd/mtd.h"
++#include "mtd/mtd-abi.h"
+
+ /* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
+ #if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
+@@ -127,7 +128,7 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya
+ #endif
+
+ memset(&ops, 0, sizeof(ops));
+- ops.mode = MTD_OOB_AUTO;
++ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = (data) ? chunkBytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = (__u8 *)data;
+@@ -179,7 +180,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y
+ int deleted;
+
+ memset(&ops, 0, sizeof(ops));
+- ops.mode = MTD_OOB_AUTO;
++ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = (data) ? chunkBytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = data;
+--- a/fs/yaffs2/yaffs_mtdif2.c
++++ b/fs/yaffs2/yaffs_mtdif2.c
+@@ -21,6 +21,7 @@
+ #include "linux/mtd/mtd.h"
+ #include "linux/types.h"
+ #include "linux/time.h"
++#include "mtd/mtd-abi.h"
+
+ #include "yaffs_packedtags2.h"
+
+@@ -71,7 +72,7 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya
+ yaffs_PackTags2(&pt, tags, !dev->param.no_tags_ecc);
+
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+- ops.mode = MTD_OOB_AUTO;
++ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
+ ops.len = dev->param.total_bytes_per_chunk;
+ ops.ooboffs = 0;
+@@ -136,7 +137,7 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y
+ retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
+ &dummy, data);
+ else if (tags) {
+- ops.mode = MTD_OOB_AUTO;
++ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.ooblen = packed_tags_size;
+ ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
+ ops.ooboffs = 0;
diff --git a/target/linux/generic/patches-3.3/507-yaffs2-3.3_fix.patch b/target/linux/generic/patches-3.3/507-yaffs2-3.3_fix.patch
new file mode 100644
index 000000000..a269dc256
--- /dev/null
+++ b/target/linux/generic/patches-3.3/507-yaffs2-3.3_fix.patch
@@ -0,0 +1,71 @@
+--- a/fs/yaffs2/yaffs_vfs_glue.c
++++ b/fs/yaffs2/yaffs_vfs_glue.c
+@@ -274,8 +274,13 @@ static int yaffs_sync_object(struct file
+ static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
+
+ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++ struct nameidata *n);
++#else
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n);
++#endif
+ static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n);
+ #else
+@@ -287,9 +292,17 @@ static int yaffs_link(struct dentry *old
+ static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
+ static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname);
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
++#else
+ static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
++#endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++ dev_t dev);
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t dev);
+ #else
+@@ -1708,7 +1721,10 @@ out:
+ #define YCRED(x) (x->cred)
+ #endif
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++ dev_t rdev)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+ #else
+@@ -1798,7 +1814,11 @@ static int yaffs_mknod(struct inode *dir
+ return error;
+ }
+
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
++#else
+ static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++#endif
+ {
+ int retVal;
+ T(YAFFS_TRACE_OS, (TSTR("yaffs_mkdir\n")));
+@@ -1806,7 +1826,10 @@ static int yaffs_mkdir(struct inode *dir
+ return retVal;
+ }
+
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++ struct nameidata *n)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+ #else
diff --git a/target/linux/generic/patches-3.3/510-jffs2_make_lzma_available.patch b/target/linux/generic/patches-3.3/510-jffs2_make_lzma_available.patch
new file mode 100644
index 000000000..fa79ba313
--- /dev/null
+++ b/target/linux/generic/patches-3.3/510-jffs2_make_lzma_available.patch
@@ -0,0 +1,5142 @@
+--- a/fs/jffs2/Kconfig
++++ b/fs/jffs2/Kconfig
+@@ -139,6 +139,15 @@ config JFFS2_LZO
+ This feature was added in July, 2007. Say 'N' if you need
+ compatibility with older bootloaders or kernels.
+
++config JFFS2_LZMA
++ bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS
++ select LZMA_COMPRESS
++ select LZMA_DECOMPRESS
++ depends on JFFS2_FS
++ default n
++ help
++ JFFS2 wrapper to the LZMA C SDK
++
+ config JFFS2_RTIME
+ bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
+ depends on JFFS2_FS
+--- a/fs/jffs2/Makefile
++++ b/fs/jffs2/Makefile
+@@ -18,4 +18,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rub
+ jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
+ jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+ jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o
++jffs2-$(CONFIG_JFFS2_LZMA) += compr_lzma.o
+ jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o
++
++CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma
+--- a/fs/jffs2/compr.c
++++ b/fs/jffs2/compr.c
+@@ -374,6 +374,9 @@ int __init jffs2_compressors_init(void)
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_init();
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_init();
++#endif
+ /* Setting default compression mode */
+ #ifdef CONFIG_JFFS2_CMODE_NONE
+ jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+@@ -397,6 +400,9 @@ int __init jffs2_compressors_init(void)
+ int jffs2_compressors_exit(void)
+ {
+ /* Unregistering compressors */
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_exit();
++#endif
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_exit();
+ #endif
+--- a/fs/jffs2/compr.h
++++ b/fs/jffs2/compr.h
+@@ -29,9 +29,9 @@
+ #define JFFS2_DYNRUBIN_PRIORITY 20
+ #define JFFS2_LZARI_PRIORITY 30
+ #define JFFS2_RTIME_PRIORITY 50
+-#define JFFS2_ZLIB_PRIORITY 60
+-#define JFFS2_LZO_PRIORITY 80
+-
++#define JFFS2_LZMA_PRIORITY 70
++#define JFFS2_ZLIB_PRIORITY 80
++#define JFFS2_LZO_PRIORITY 90
+
+ #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
+ #define JFFS2_DYNRUBIN_DISABLED /* for decompression */
+@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void);
+ int jffs2_lzo_init(void);
+ void jffs2_lzo_exit(void);
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++int jffs2_lzma_init(void);
++void jffs2_lzma_exit(void);
++#endif
+
+ #endif /* __JFFS2_COMPR_H__ */
+--- /dev/null
++++ b/fs/jffs2/compr_lzma.c
+@@ -0,0 +1,128 @@
++/*
++ * JFFS2 -- Journalling Flash File System, Version 2.
++ *
++ * For licensing information, see the file 'LICENCE' in this directory.
++ *
++ * JFFS2 wrapper to the LZMA C SDK
++ *
++ */
++
++#include <linux/lzma.h>
++#include "compr.h"
++
++#ifdef __KERNEL__
++ static DEFINE_MUTEX(deflate_mutex);
++#endif
++
++CLzmaEncHandle *p;
++Byte propsEncoded[LZMA_PROPS_SIZE];
++SizeT propsSize = sizeof(propsEncoded);
++
++STATIC void lzma_free_workspace(void)
++{
++ LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc);
++}
++
++STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props)
++{
++ if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL)
++ {
++ PRINT_ERROR("Failed to allocate lzma deflate workspace\n");
++ return -ENOMEM;
++ }
++
++ if (LzmaEnc_SetProps(p, props) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t *sourcelen, uint32_t *dstlen)
++{
++ SizeT compress_size = (SizeT)(*dstlen);
++ int ret;
++
++ #ifdef __KERNEL__
++ mutex_lock(&deflate_mutex);
++ #endif
++
++ ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen,
++ 0, NULL, &lzma_alloc, &lzma_alloc);
++
++ #ifdef __KERNEL__
++ mutex_unlock(&deflate_mutex);
++ #endif
++
++ if (ret != SZ_OK)
++ return -1;
++
++ *dstlen = (uint32_t)compress_size;
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t srclen, uint32_t destlen)
++{
++ int ret;
++ SizeT dl = (SizeT)destlen;
++ SizeT sl = (SizeT)srclen;
++ ELzmaStatus status;
++
++ ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded,
++ propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc);
++
++ if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen)
++ return -1;
++
++ return 0;
++}
++
++static struct jffs2_compressor jffs2_lzma_comp = {
++ .priority = JFFS2_LZMA_PRIORITY,
++ .name = "lzma",
++ .compr = JFFS2_COMPR_LZMA,
++ .compress = &jffs2_lzma_compress,
++ .decompress = &jffs2_lzma_decompress,
++ .disabled = 0,
++};
++
++int INIT jffs2_lzma_init(void)
++{
++ int ret;
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++
++ props.dictSize = LZMA_BEST_DICT(0x2000);
++ props.level = LZMA_BEST_LEVEL;
++ props.lc = LZMA_BEST_LC;
++ props.lp = LZMA_BEST_LP;
++ props.pb = LZMA_BEST_PB;
++ props.fb = LZMA_BEST_FB;
++
++ ret = lzma_alloc_workspace(&props);
++ if (ret < 0)
++ return ret;
++
++ ret = jffs2_register_compressor(&jffs2_lzma_comp);
++ if (ret)
++ lzma_free_workspace();
++
++ return ret;
++}
++
++void jffs2_lzma_exit(void)
++{
++ jffs2_unregister_compressor(&jffs2_lzma_comp);
++ lzma_free_workspace();
++}
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -371,14 +371,41 @@ static int __init init_jffs2_fs(void)
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
+
+- printk(KERN_INFO "JFFS2 version 2.2."
++ printk(KERN_INFO "JFFS2 version 2.2"
+ #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ " (NAND)"
+ #endif
+ #ifdef CONFIG_JFFS2_SUMMARY
+- " (SUMMARY) "
++ " (SUMMARY)"
+ #endif
+- " © 2001-2006 Red Hat, Inc.\n");
++#ifdef CONFIG_JFFS2_ZLIB
++ " (ZLIB)"
++#endif
++#ifdef CONFIG_JFFS2_LZO
++ " (LZO)"
++#endif
++#ifdef CONFIG_JFFS2_LZMA
++ " (LZMA)"
++#endif
++#ifdef CONFIG_JFFS2_RTIME
++ " (RTIME)"
++#endif
++#ifdef CONFIG_JFFS2_RUBIN
++ " (RUBIN)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_NONE
++ " (CMODE_NONE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_PRIORITY
++ " (CMODE_PRIORITY)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_SIZE
++ " (CMODE_SIZE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
++ " (CMODE_FAVOURLZO)"
++#endif
++ " (c) 2001-2006 Red Hat, Inc.\n");
+
+ jffs2_inode_cachep = kmem_cache_create("jffs2_i",
+ sizeof(struct jffs2_inode_info),
+--- a/include/linux/jffs2.h
++++ b/include/linux/jffs2.h
+@@ -46,6 +46,7 @@
+ #define JFFS2_COMPR_DYNRUBIN 0x05
+ #define JFFS2_COMPR_ZLIB 0x06
+ #define JFFS2_COMPR_LZO 0x07
++#define JFFS2_COMPR_LZMA 0x08
+ /* Compatibility flags. */
+ #define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */
+ #define JFFS2_NODE_ACCURATE 0x2000
+--- /dev/null
++++ b/include/linux/lzma.h
+@@ -0,0 +1,62 @@
++#ifndef __LZMA_H__
++#define __LZMA_H__
++
++#ifdef __KERNEL__
++ #include <linux/kernel.h>
++ #include <linux/sched.h>
++ #include <linux/slab.h>
++ #include <linux/vmalloc.h>
++ #include <linux/init.h>
++ #define LZMA_MALLOC vmalloc
++ #define LZMA_FREE vfree
++ #define PRINT_ERROR(msg) printk(KERN_WARNING #msg)
++ #define INIT __init
++ #define STATIC static
++#else
++ #include <stdint.h>
++ #include <stdlib.h>
++ #include <stdio.h>
++ #include <unistd.h>
++ #include <string.h>
++ #include <asm/types.h>
++ #include <errno.h>
++ #include <linux/jffs2.h>
++ #ifndef PAGE_SIZE
++ extern int page_size;
++ #define PAGE_SIZE page_size
++ #endif
++ #define LZMA_MALLOC malloc
++ #define LZMA_FREE free
++ #define PRINT_ERROR(msg) fprintf(stderr, msg)
++ #define INIT
++ #define STATIC
++#endif
++
++#include "lzma/LzmaDec.h"
++#include "lzma/LzmaEnc.h"
++
++#define LZMA_BEST_LEVEL (9)
++#define LZMA_BEST_LC (0)
++#define LZMA_BEST_LP (0)
++#define LZMA_BEST_PB (0)
++#define LZMA_BEST_FB (273)
++
++#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2)
++
++static void *p_lzma_malloc(void *p, size_t size)
++{
++ if (size == 0)
++ return NULL;
++
++ return LZMA_MALLOC(size);
++}
++
++static void p_lzma_free(void *p, void *address)
++{
++ if (address != NULL)
++ LZMA_FREE(address);
++}
++
++static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free};
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzFind.h
+@@ -0,0 +1,115 @@
++/* LzFind.h -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_FIND_H
++#define __LZ_FIND_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++typedef UInt32 CLzRef;
++
++typedef struct _CMatchFinder
++{
++ Byte *buffer;
++ UInt32 pos;
++ UInt32 posLimit;
++ UInt32 streamPos;
++ UInt32 lenLimit;
++
++ UInt32 cyclicBufferPos;
++ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
++
++ UInt32 matchMaxLen;
++ CLzRef *hash;
++ CLzRef *son;
++ UInt32 hashMask;
++ UInt32 cutValue;
++
++ Byte *bufferBase;
++ ISeqInStream *stream;
++ int streamEndWasReached;
++
++ UInt32 blockSize;
++ UInt32 keepSizeBefore;
++ UInt32 keepSizeAfter;
++
++ UInt32 numHashBytes;
++ int directInput;
++ size_t directInputRem;
++ int btMode;
++ int bigHash;
++ UInt32 historySize;
++ UInt32 fixedHashSize;
++ UInt32 hashSizeSum;
++ UInt32 numSons;
++ SRes result;
++ UInt32 crc[256];
++} CMatchFinder;
++
++#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
++#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)])
++
++#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
++
++int MatchFinder_NeedMove(CMatchFinder *p);
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
++void MatchFinder_MoveBlock(CMatchFinder *p);
++void MatchFinder_ReadIfRequired(CMatchFinder *p);
++
++void MatchFinder_Construct(CMatchFinder *p);
++
++/* Conditions:
++ historySize <= 3 GB
++ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
++*/
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc);
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems);
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
++ UInt32 *distances, UInt32 maxLen);
++
++/*
++Conditions:
++ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
++ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
++*/
++
++typedef void (*Mf_Init_Func)(void *object);
++typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index);
++typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
++typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
++typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
++typedef void (*Mf_Skip_Func)(void *object, UInt32);
++
++typedef struct _IMatchFinder
++{
++ Mf_Init_Func Init;
++ Mf_GetIndexByte_Func GetIndexByte;
++ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
++ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
++ Mf_GetMatches_Func GetMatches;
++ Mf_Skip_Func Skip;
++} IMatchFinder;
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
++
++void MatchFinder_Init(CMatchFinder *p);
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzHash.h
+@@ -0,0 +1,54 @@
++/* LzHash.h -- HASH functions for LZ algorithms
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_HASH_H
++#define __LZ_HASH_H
++
++#define kHash2Size (1 << 10)
++#define kHash3Size (1 << 16)
++#define kHash4Size (1 << 20)
++
++#define kFix3HashSize (kHash2Size)
++#define kFix4HashSize (kHash2Size + kHash3Size)
++#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
++
++#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
++
++#define HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
++
++#define HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
++
++#define HASH5_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
++ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
++ hash4Value &= (kHash4Size - 1); }
++
++/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
++#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
++
++
++#define MT_HASH2_CALC \
++ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
++
++#define MT_HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
++
++#define MT_HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaDec.h
+@@ -0,0 +1,231 @@
++/* LzmaDec.h -- LZMA Decoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_DEC_H
++#define __LZMA_DEC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* #define _LZMA_PROB32 */
++/* _LZMA_PROB32 can increase the speed on some CPUs,
++ but memory usage for CLzmaDec::probs will be doubled in that case */
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++
++/* ---------- LZMA Properties ---------- */
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaProps
++{
++ unsigned lc, lp, pb;
++ UInt32 dicSize;
++} CLzmaProps;
++
++/* LzmaProps_Decode - decodes properties
++Returns:
++ SZ_OK
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
++
++
++/* ---------- LZMA Decoder state ---------- */
++
++/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
++ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
++
++#define LZMA_REQUIRED_INPUT_MAX 20
++
++typedef struct
++{
++ CLzmaProps prop;
++ CLzmaProb *probs;
++ Byte *dic;
++ const Byte *buf;
++ UInt32 range, code;
++ SizeT dicPos;
++ SizeT dicBufSize;
++ UInt32 processedPos;
++ UInt32 checkDicSize;
++ unsigned state;
++ UInt32 reps[4];
++ unsigned remainLen;
++ int needFlush;
++ int needInitState;
++ UInt32 numProbs;
++ unsigned tempBufSize;
++ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
++} CLzmaDec;
++
++#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
++
++void LzmaDec_Init(CLzmaDec *p);
++
++/* There are two types of LZMA streams:
++ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
++ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
++
++typedef enum
++{
++ LZMA_FINISH_ANY, /* finish at any point */
++ LZMA_FINISH_END /* block must be finished at the end */
++} ELzmaFinishMode;
++
++/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
++
++ You must use LZMA_FINISH_END, when you know that current output buffer
++ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
++
++ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
++ and output value of destLen will be less than output buffer size limit.
++ You can check status result also.
++
++ You can use multiple checks to test data integrity after full decompression:
++ 1) Check Result and "status" variable.
++ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
++ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
++ You must use correct finish mode in that case. */
++
++typedef enum
++{
++ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
++ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
++ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
++ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
++} ELzmaStatus;
++
++/* ELzmaStatus is used only as output value for function call */
++
++
++/* ---------- Interfaces ---------- */
++
++/* There are 3 levels of interfaces:
++ 1) Dictionary Interface
++ 2) Buffer Interface
++ 3) One Call Interface
++ You can select any of these interfaces, but don't mix functions from different
++ groups for same object. */
++
++
++/* There are two variants to allocate state for Dictionary Interface:
++ 1) LzmaDec_Allocate / LzmaDec_Free
++ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
++ You can use variant 2, if you set dictionary buffer manually.
++ For Buffer Interface you must always use variant 1.
++
++LzmaDec_Allocate* can return:
++ SZ_OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);
++
++SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);
++
++/* ---------- Dictionary Interface ---------- */
++
++/* You can use it, if you want to eliminate the overhead for data copying from
++ dictionary to some other external buffer.
++ You must work with CLzmaDec variables directly in this interface.
++
++ STEPS:
++ LzmaDec_Constr()
++ LzmaDec_Allocate()
++ for (each new stream)
++ {
++ LzmaDec_Init()
++ while (it needs more decompression)
++ {
++ LzmaDec_DecodeToDic()
++ use data from CLzmaDec::dic and update CLzmaDec::dicPos
++ }
++ }
++ LzmaDec_Free()
++*/
++
++/* LzmaDec_DecodeToDic
++
++ The decoding to internal dictionary buffer (CLzmaDec::dic).
++ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (dicLimit).
++ LZMA_FINISH_ANY - Decode just dicLimit bytes.
++ LZMA_FINISH_END - Stream must be finished after dicLimit.
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_NEEDS_MORE_INPUT
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++*/
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- Buffer Interface ---------- */
++
++/* It's zlib-like interface.
++ See LzmaDec_DecodeToDic description for information about STEPS and return results,
++ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
++ to work with CLzmaDec variables manually.
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++*/
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaDecode
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
++*/
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaEnc.h
+@@ -0,0 +1,80 @@
++/* LzmaEnc.h -- LZMA Encoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_ENC_H
++#define __LZMA_ENC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaEncProps
++{
++ int level; /* 0 <= level <= 9 */
++ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
++ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version
++ default = (1 << 24) */
++ int lc; /* 0 <= lc <= 8, default = 3 */
++ int lp; /* 0 <= lp <= 4, default = 0 */
++ int pb; /* 0 <= pb <= 4, default = 2 */
++ int algo; /* 0 - fast, 1 - normal, default = 1 */
++ int fb; /* 5 <= fb <= 273, default = 32 */
++ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
++ int numHashBytes; /* 2, 3 or 4, default = 4 */
++ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
++ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
++ int numThreads; /* 1 or 2, default = 2 */
++} CLzmaEncProps;
++
++void LzmaEncProps_Init(CLzmaEncProps *p);
++void LzmaEncProps_Normalize(CLzmaEncProps *p);
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
++
++
++/* ---------- CLzmaEncHandle Interface ---------- */
++
++/* LzmaEnc_* functions can return the following exit codes:
++Returns:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater in props
++ SZ_ERROR_WRITE - Write callback error.
++ SZ_ERROR_PROGRESS - some break from progress callback
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++typedef void * CLzmaEncHandle;
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc);
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
++SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaEncode
++Return code:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater
++ SZ_ERROR_OUTPUT_EOF - output buffer overflow
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/Types.h
+@@ -0,0 +1,226 @@
++/* Types.h -- Basic types
++2009-11-23 : Igor Pavlov : Public domain */
++
++#ifndef __7Z_TYPES_H
++#define __7Z_TYPES_H
++
++#include <stddef.h>
++
++#ifdef _WIN32
++#include <windows.h>
++#endif
++
++#ifndef EXTERN_C_BEGIN
++#ifdef __cplusplus
++#define EXTERN_C_BEGIN extern "C" {
++#define EXTERN_C_END }
++#else
++#define EXTERN_C_BEGIN
++#define EXTERN_C_END
++#endif
++#endif
++
++EXTERN_C_BEGIN
++
++#define SZ_OK 0
++
++#define SZ_ERROR_DATA 1
++#define SZ_ERROR_MEM 2
++#define SZ_ERROR_CRC 3
++#define SZ_ERROR_UNSUPPORTED 4
++#define SZ_ERROR_PARAM 5
++#define SZ_ERROR_INPUT_EOF 6
++#define SZ_ERROR_OUTPUT_EOF 7
++#define SZ_ERROR_READ 8
++#define SZ_ERROR_WRITE 9
++#define SZ_ERROR_PROGRESS 10
++#define SZ_ERROR_FAIL 11
++#define SZ_ERROR_THREAD 12
++
++#define SZ_ERROR_ARCHIVE 16
++#define SZ_ERROR_NO_ARCHIVE 17
++
++typedef int SRes;
++
++#ifdef _WIN32
++typedef DWORD WRes;
++#else
++typedef int WRes;
++#endif
++
++#ifndef RINOK
++#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
++#endif
++
++typedef unsigned char Byte;
++typedef short Int16;
++typedef unsigned short UInt16;
++
++#ifdef _LZMA_UINT32_IS_ULONG
++typedef long Int32;
++typedef unsigned long UInt32;
++#else
++typedef int Int32;
++typedef unsigned int UInt32;
++#endif
++
++#ifdef _SZ_NO_INT_64
++
++/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
++ NOTES: Some code will work incorrectly in that case! */
++
++typedef long Int64;
++typedef unsigned long UInt64;
++
++#else
++
++#if defined(_MSC_VER) || defined(__BORLANDC__)
++typedef __int64 Int64;
++typedef unsigned __int64 UInt64;
++#else
++typedef long long int Int64;
++typedef unsigned long long int UInt64;
++#endif
++
++#endif
++
++#ifdef _LZMA_NO_SYSTEM_SIZE_T
++typedef UInt32 SizeT;
++#else
++typedef size_t SizeT;
++#endif
++
++typedef int Bool;
++#define True 1
++#define False 0
++
++
++#ifdef _WIN32
++#define MY_STD_CALL __stdcall
++#else
++#define MY_STD_CALL
++#endif
++
++#ifdef _MSC_VER
++
++#if _MSC_VER >= 1300
++#define MY_NO_INLINE __declspec(noinline)
++#else
++#define MY_NO_INLINE
++#endif
++
++#define MY_CDECL __cdecl
++#define MY_FAST_CALL __fastcall
++
++#else
++
++#define MY_CDECL
++#define MY_FAST_CALL
++
++#endif
++
++
++/* The following interfaces use first parameter as pointer to structure */
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) < input(*size)) is allowed */
++} ISeqInStream;
++
++/* it can return SZ_ERROR_INPUT_EOF */
++SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);
++SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);
++SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);
++
++typedef struct
++{
++ size_t (*Write)(void *p, const void *buf, size_t size);
++ /* Returns: result - the number of actually written bytes.
++ (result < size) means error */
++} ISeqOutStream;
++
++typedef enum
++{
++ SZ_SEEK_SET = 0,
++ SZ_SEEK_CUR = 1,
++ SZ_SEEK_END = 2
++} ESzSeek;
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ISeekInStream;
++
++typedef struct
++{
++ SRes (*Look)(void *p, void **buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) > input(*size)) is not allowed
++ (output(*size) < input(*size)) is allowed */
++ SRes (*Skip)(void *p, size_t offset);
++ /* offset must be <= output(*size) of Look */
++
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* reads directly (without buffer). It's same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ILookInStream;
++
++SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);
++SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);
++
++/* reads via ILookInStream::Read */
++SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);
++SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);
++
++#define LookToRead_BUF_SIZE (1 << 14)
++
++typedef struct
++{
++ ILookInStream s;
++ ISeekInStream *realStream;
++ size_t pos;
++ size_t size;
++ Byte buf[LookToRead_BUF_SIZE];
++} CLookToRead;
++
++void LookToRead_CreateVTable(CLookToRead *p, int lookahead);
++void LookToRead_Init(CLookToRead *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToLook;
++
++void SecToLook_CreateVTable(CSecToLook *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToRead;
++
++void SecToRead_CreateVTable(CSecToRead *p);
++
++typedef struct
++{
++ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);
++ /* Returns: result. (result != SZ_OK) means break.
++ Value (UInt64)(Int64)-1 for size means unknown value. */
++} ICompressProgress;
++
++typedef struct
++{
++ void *(*Alloc)(void *p, size_t size);
++ void (*Free)(void *p, void *address); /* address can be 0 */
++} ISzAlloc;
++
++#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)
++#define IAlloc_Free(p, a) (p)->Free((p), a)
++
++EXTERN_C_END
++
++#endif
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -118,6 +118,12 @@ config LZO_DECOMPRESS
+
+ source "lib/xz/Kconfig"
+
++config LZMA_COMPRESS
++ tristate
++
++config LZMA_DECOMPRESS
++ tristate
++
+ #
+ # These all provide a common interface (hence the apparent duplication with
+ # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -2,6 +2,16 @@
+ # Makefile for some libs needed in the kernel.
+ #
+
++ifdef CONFIG_JFFS2_ZLIB
++ CONFIG_ZLIB_INFLATE:=y
++ CONFIG_ZLIB_DEFLATE:=y
++endif
++
++ifdef CONFIG_JFFS2_LZMA
++ CONFIG_LZMA_DECOMPRESS:=y
++ CONFIG_LZMA_COMPRESS:=y
++endif
++
+ ifdef CONFIG_FUNCTION_TRACER
+ ORIG_CFLAGS := $(KBUILD_CFLAGS)
+ KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
+@@ -73,6 +83,8 @@ obj-$(CONFIG_LZO_COMPRESS) += lzo/
+ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
+ obj-$(CONFIG_XZ_DEC) += xz/
+ obj-$(CONFIG_RAID6_PQ) += raid6/
++obj-$(CONFIG_LZMA_COMPRESS) += lzma/
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/
+
+ lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
+ lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
+--- /dev/null
++++ b/lib/lzma/LzFind.c
+@@ -0,0 +1,761 @@
++/* LzFind.c -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++#include "LzFind.h"
++#include "LzHash.h"
++
++#define kEmptyHashValue 0
++#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
++#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
++#define kNormalizeMask (~(kNormalizeStepMin - 1))
++#define kMaxHistorySize ((UInt32)3 << 30)
++
++#define kStartMaxLen 3
++
++static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ if (!p->directInput)
++ {
++ alloc->Free(alloc, p->bufferBase);
++ p->bufferBase = 0;
++ }
++}
++
++/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
++
++static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)
++{
++ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
++ if (p->directInput)
++ {
++ p->blockSize = blockSize;
++ return 1;
++ }
++ if (p->bufferBase == 0 || p->blockSize != blockSize)
++ {
++ LzInWindow_Free(p, alloc);
++ p->blockSize = blockSize;
++ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);
++ }
++ return (p->bufferBase != 0);
++}
++
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
++Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
++
++UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
++
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
++{
++ p->posLimit -= subValue;
++ p->pos -= subValue;
++ p->streamPos -= subValue;
++}
++
++static void MatchFinder_ReadBlock(CMatchFinder *p)
++{
++ if (p->streamEndWasReached || p->result != SZ_OK)
++ return;
++ if (p->directInput)
++ {
++ UInt32 curSize = 0xFFFFFFFF - p->streamPos;
++ if (curSize > p->directInputRem)
++ curSize = (UInt32)p->directInputRem;
++ p->directInputRem -= curSize;
++ p->streamPos += curSize;
++ if (p->directInputRem == 0)
++ p->streamEndWasReached = 1;
++ return;
++ }
++ for (;;)
++ {
++ Byte *dest = p->buffer + (p->streamPos - p->pos);
++ size_t size = (p->bufferBase + p->blockSize - dest);
++ if (size == 0)
++ return;
++ p->result = p->stream->Read(p->stream, dest, &size);
++ if (p->result != SZ_OK)
++ return;
++ if (size == 0)
++ {
++ p->streamEndWasReached = 1;
++ return;
++ }
++ p->streamPos += (UInt32)size;
++ if (p->streamPos - p->pos > p->keepSizeAfter)
++ return;
++ }
++}
++
++void MatchFinder_MoveBlock(CMatchFinder *p)
++{
++ memmove(p->bufferBase,
++ p->buffer - p->keepSizeBefore,
++ (size_t)(p->streamPos - p->pos + p->keepSizeBefore));
++ p->buffer = p->bufferBase + p->keepSizeBefore;
++}
++
++int MatchFinder_NeedMove(CMatchFinder *p)
++{
++ if (p->directInput)
++ return 0;
++ /* if (p->streamEndWasReached) return 0; */
++ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
++}
++
++void MatchFinder_ReadIfRequired(CMatchFinder *p)
++{
++ if (p->streamEndWasReached)
++ return;
++ if (p->keepSizeAfter >= p->streamPos - p->pos)
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
++{
++ if (MatchFinder_NeedMove(p))
++ MatchFinder_MoveBlock(p);
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
++{
++ p->cutValue = 32;
++ p->btMode = 1;
++ p->numHashBytes = 4;
++ p->bigHash = 0;
++}
++
++#define kCrcPoly 0xEDB88320
++
++void MatchFinder_Construct(CMatchFinder *p)
++{
++ UInt32 i;
++ p->bufferBase = 0;
++ p->directInput = 0;
++ p->hash = 0;
++ MatchFinder_SetDefaultSettings(p);
++
++ for (i = 0; i < 256; i++)
++ {
++ UInt32 r = i;
++ int j;
++ for (j = 0; j < 8; j++)
++ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
++ p->crc[i] = r;
++ }
++}
++
++static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->hash);
++ p->hash = 0;
++}
++
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ LzInWindow_Free(p, alloc);
++}
++
++static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc)
++{
++ size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
++ if (sizeInBytes / sizeof(CLzRef) != num)
++ return 0;
++ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);
++}
++
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc)
++{
++ UInt32 sizeReserv;
++ if (historySize > kMaxHistorySize)
++ {
++ MatchFinder_Free(p, alloc);
++ return 0;
++ }
++ sizeReserv = historySize >> 1;
++ if (historySize > ((UInt32)2 << 30))
++ sizeReserv = historySize >> 2;
++ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
++
++ p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
++ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
++ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
++ if (LzInWindow_Create(p, sizeReserv, alloc))
++ {
++ UInt32 newCyclicBufferSize = historySize + 1;
++ UInt32 hs;
++ p->matchMaxLen = matchMaxLen;
++ {
++ p->fixedHashSize = 0;
++ if (p->numHashBytes == 2)
++ hs = (1 << 16) - 1;
++ else
++ {
++ hs = historySize - 1;
++ hs |= (hs >> 1);
++ hs |= (hs >> 2);
++ hs |= (hs >> 4);
++ hs |= (hs >> 8);
++ hs >>= 1;
++ hs |= 0xFFFF; /* don't change it! It's required for Deflate */
++ if (hs > (1 << 24))
++ {
++ if (p->numHashBytes == 3)
++ hs = (1 << 24) - 1;
++ else
++ hs >>= 1;
++ }
++ }
++ p->hashMask = hs;
++ hs++;
++ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
++ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
++ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
++ hs += p->fixedHashSize;
++ }
++
++ {
++ UInt32 prevSize = p->hashSizeSum + p->numSons;
++ UInt32 newSize;
++ p->historySize = historySize;
++ p->hashSizeSum = hs;
++ p->cyclicBufferSize = newCyclicBufferSize;
++ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize);
++ newSize = p->hashSizeSum + p->numSons;
++ if (p->hash != 0 && prevSize == newSize)
++ return 1;
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ p->hash = AllocRefs(newSize, alloc);
++ if (p->hash != 0)
++ {
++ p->son = p->hash + p->hashSizeSum;
++ return 1;
++ }
++ }
++ }
++ MatchFinder_Free(p, alloc);
++ return 0;
++}
++
++static void MatchFinder_SetLimits(CMatchFinder *p)
++{
++ UInt32 limit = kMaxValForNormalize - p->pos;
++ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
++ if (limit2 < limit)
++ limit = limit2;
++ limit2 = p->streamPos - p->pos;
++ if (limit2 <= p->keepSizeAfter)
++ {
++ if (limit2 > 0)
++ limit2 = 1;
++ }
++ else
++ limit2 -= p->keepSizeAfter;
++ if (limit2 < limit)
++ limit = limit2;
++ {
++ UInt32 lenLimit = p->streamPos - p->pos;
++ if (lenLimit > p->matchMaxLen)
++ lenLimit = p->matchMaxLen;
++ p->lenLimit = lenLimit;
++ }
++ p->posLimit = p->pos + limit;
++}
++
++void MatchFinder_Init(CMatchFinder *p)
++{
++ UInt32 i;
++ for (i = 0; i < p->hashSizeSum; i++)
++ p->hash[i] = kEmptyHashValue;
++ p->cyclicBufferPos = 0;
++ p->buffer = p->bufferBase;
++ p->pos = p->streamPos = p->cyclicBufferSize;
++ p->result = SZ_OK;
++ p->streamEndWasReached = 0;
++ MatchFinder_ReadBlock(p);
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
++{
++ return (p->pos - p->historySize - 1) & kNormalizeMask;
++}
++
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
++{
++ UInt32 i;
++ for (i = 0; i < numItems; i++)
++ {
++ UInt32 value = items[i];
++ if (value <= subValue)
++ value = kEmptyHashValue;
++ else
++ value -= subValue;
++ items[i] = value;
++ }
++}
++
++static void MatchFinder_Normalize(CMatchFinder *p)
++{
++ UInt32 subValue = MatchFinder_GetSubValue(p);
++ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons);
++ MatchFinder_ReduceOffsets(p, subValue);
++}
++
++static void MatchFinder_CheckLimits(CMatchFinder *p)
++{
++ if (p->pos == kMaxValForNormalize)
++ MatchFinder_Normalize(p);
++ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
++ MatchFinder_CheckAndMoveAndRead(p);
++ if (p->cyclicBufferPos == p->cyclicBufferSize)
++ p->cyclicBufferPos = 0;
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ son[_cyclicBufferPos] = curMatch;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ return distances;
++ {
++ const Byte *pb = cur - delta;
++ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
++ if (pb[maxLen] == cur[maxLen] && *pb == *cur)
++ {
++ UInt32 len = 0;
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ return distances;
++ }
++ }
++ }
++ }
++}
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return distances;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ if (++len != lenLimit && pb[len] == cur[len])
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return distances;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ {
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++#define MOVE_POS \
++ ++p->cyclicBufferPos; \
++ p->buffer++; \
++ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
++
++#define MOVE_POS_RET MOVE_POS return offset;
++
++static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
++
++#define GET_MATCHES_HEADER2(minLen, ret_op) \
++ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \
++ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
++ cur = p->buffer;
++
++#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
++#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue)
++
++#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
++
++#define GET_MATCHES_FOOTER(offset, maxLen) \
++ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \
++ distances + offset, maxLen) - distances); MOVE_POS_RET;
++
++#define SKIP_FOOTER \
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS;
++
++static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 1)
++}
++
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 2)
++}
++
++static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, delta2, maxLen, offset;
++ GET_MATCHES_HEADER(3)
++
++ HASH3_CALC;
++
++ delta2 = p->pos - p->hash[hash2Value];
++ curMatch = p->hash[kFix3HashSize + hashValue];
++
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++
++
++ maxLen = 2;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[0] = maxLen;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances + offset, maxLen) - (distances));
++ MOVE_POS_RET
++}
++
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances, 2) - (distances));
++ MOVE_POS_RET
++}
++
++static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value;
++ SKIP_HEADER(3)
++ HASH3_CALC;
++ curMatch = p->hash[kFix3HashSize + hashValue];
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] = p->pos;
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
++{
++ vTable->Init = (Mf_Init_Func)MatchFinder_Init;
++ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte;
++ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
++ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
++ if (!p->btMode)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 2)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 3)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
++ }
++ else
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
++ }
++}
+--- /dev/null
++++ b/lib/lzma/LzmaDec.c
+@@ -0,0 +1,999 @@
++/* LzmaDec.c -- LZMA Decoder
++2009-09-20 : Igor Pavlov : Public domain */
++
++#include "LzmaDec.h"
++
++#include <string.h>
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++
++#define RC_INIT_SIZE 5
++
++#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
++#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
++#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
++ { UPDATE_0(p); i = (i + i); A0; } else \
++ { UPDATE_1(p); i = (i + i) + 1; A1; }
++#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)
++
++#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }
++#define TREE_DECODE(probs, limit, i) \
++ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
++
++/* #define _LZMA_SIZE_OPT */
++
++#ifdef _LZMA_SIZE_OPT
++#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
++#else
++#define TREE_6_DECODE(probs, i) \
++ { i = 1; \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ i -= 0x40; }
++#endif
++
++#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0_CHECK range = bound;
++#define UPDATE_1_CHECK range -= bound; code -= bound;
++#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
++ { UPDATE_0_CHECK; i = (i + i); A0; } else \
++ { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
++#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
++#define TREE_DECODE_CHECK(probs, limit, i) \
++ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
++
++
++#define kNumPosBitsMax 4
++#define kNumPosStatesMax (1 << kNumPosBitsMax)
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define LenChoice 0
++#define LenChoice2 (LenChoice + 1)
++#define LenLow (LenChoice2 + 1)
++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
++#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
++
++
++#define kNumStates 12
++#define kNumLitStates 7
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#define kNumPosSlotBits 6
++#define kNumLenToPosStates 4
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++
++#define kMatchMinLen 2
++#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define IsMatch 0
++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
++#define IsRepG0 (IsRep + kNumStates)
++#define IsRepG1 (IsRepG0 + kNumStates)
++#define IsRepG2 (IsRepG1 + kNumStates)
++#define IsRep0Long (IsRepG2 + kNumStates)
++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
++#define LenCoder (Align + kAlignTableSize)
++#define RepLenCoder (LenCoder + kNumLenProbs)
++#define Literal (RepLenCoder + kNumLenProbs)
++
++#define LZMA_BASE_SIZE 1846
++#define LZMA_LIT_SIZE 768
++
++#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
++
++#if Literal != LZMA_BASE_SIZE
++StopCompilingDueBUG
++#endif
++
++#define LZMA_DIC_MIN (1 << 12)
++
++/* First LZMA-symbol is always decoded.
++And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization
++Out:
++ Result:
++ SZ_OK - OK
++ SZ_ERROR_DATA - Error
++ p->remainLen:
++ < kMatchSpecLenStart : normal remain
++ = kMatchSpecLenStart : finished
++ = kMatchSpecLenStart + 1 : Flush marker
++ = kMatchSpecLenStart + 2 : State Init Marker
++*/
++
++static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ CLzmaProb *probs = p->probs;
++
++ unsigned state = p->state;
++ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
++ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
++ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;
++ unsigned lc = p->prop.lc;
++
++ Byte *dic = p->dic;
++ SizeT dicBufSize = p->dicBufSize;
++ SizeT dicPos = p->dicPos;
++
++ UInt32 processedPos = p->processedPos;
++ UInt32 checkDicSize = p->checkDicSize;
++ unsigned len = 0;
++
++ const Byte *buf = p->buf;
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++
++ do
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = processedPos & pbMask;
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ unsigned symbol;
++ UPDATE_0(prob);
++ prob = probs + Literal;
++ if (checkDicSize != 0 || processedPos != 0)
++ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +
++ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));
++
++ if (state < kNumLitStates)
++ {
++ state -= (state < 4) ? state : 3;
++ symbol = 1;
++ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ unsigned offs = 0x100;
++ state -= (state < 10) ? 3 : 6;
++ symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ dic[dicPos++] = (Byte)symbol;
++ processedPos++;
++ continue;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRep + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ state += kNumStates;
++ prob = probs + LenCoder;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ if (checkDicSize == 0 && processedPos == 0)
++ return SZ_ERROR_DATA;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ processedPos++;
++ state = state < kNumLitStates ? 9 : 11;
++ continue;
++ }
++ UPDATE_1(prob);
++ }
++ else
++ {
++ UInt32 distance;
++ UPDATE_1(prob);
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep1;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep2;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ distance = rep3;
++ rep3 = rep2;
++ }
++ rep2 = rep1;
++ }
++ rep1 = rep0;
++ rep0 = distance;
++ }
++ state = state < kNumLitStates ? 8 : 11;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = (1 << kLenNumLowBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenChoice2;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = (1 << kLenNumMidBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = (1 << kLenNumHighBits);
++ }
++ }
++ TREE_DECODE(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state >= kNumStates)
++ {
++ UInt32 distance;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
++ TREE_6_DECODE(prob, distance);
++ if (distance >= kStartPosModelIndex)
++ {
++ unsigned posSlot = (unsigned)distance;
++ int numDirectBits = (int)(((distance >> 1) - 1));
++ distance = (2 | (distance & 1));
++ if (posSlot < kEndPosModelIndex)
++ {
++ distance <<= numDirectBits;
++ prob = probs + SpecPos + distance - posSlot - 1;
++ {
++ UInt32 mask = 1;
++ unsigned i = 1;
++ do
++ {
++ GET_BIT2(prob + i, i, ; , distance |= mask);
++ mask <<= 1;
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE
++ range >>= 1;
++
++ {
++ UInt32 t;
++ code -= range;
++ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
++ distance = (distance << 1) + (t + 1);
++ code += range & t;
++ }
++ /*
++ distance <<= 1;
++ if (code >= range)
++ {
++ code -= range;
++ distance |= 1;
++ }
++ */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ distance <<= kNumAlignBits;
++ {
++ unsigned i = 1;
++ GET_BIT2(prob + i, i, ; , distance |= 1);
++ GET_BIT2(prob + i, i, ; , distance |= 2);
++ GET_BIT2(prob + i, i, ; , distance |= 4);
++ GET_BIT2(prob + i, i, ; , distance |= 8);
++ }
++ if (distance == (UInt32)0xFFFFFFFF)
++ {
++ len += kMatchSpecLenStart;
++ state -= kNumStates;
++ break;
++ }
++ }
++ }
++ rep3 = rep2;
++ rep2 = rep1;
++ rep1 = rep0;
++ rep0 = distance + 1;
++ if (checkDicSize == 0)
++ {
++ if (distance >= processedPos)
++ return SZ_ERROR_DATA;
++ }
++ else if (distance >= checkDicSize)
++ return SZ_ERROR_DATA;
++ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
++ }
++
++ len += kMatchMinLen;
++
++ if (limit == dicPos)
++ return SZ_ERROR_DATA;
++ {
++ SizeT rem = limit - dicPos;
++ unsigned curLen = ((rem < len) ? (unsigned)rem : len);
++ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0);
++
++ processedPos += curLen;
++
++ len -= curLen;
++ if (pos + curLen <= dicBufSize)
++ {
++ Byte *dest = dic + dicPos;
++ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
++ const Byte *lim = dest + curLen;
++ dicPos += curLen;
++ do
++ *(dest) = (Byte)*(dest + src);
++ while (++dest != lim);
++ }
++ else
++ {
++ do
++ {
++ dic[dicPos++] = dic[pos];
++ if (++pos == dicBufSize)
++ pos = 0;
++ }
++ while (--curLen != 0);
++ }
++ }
++ }
++ }
++ while (dicPos < limit && buf < bufLimit);
++ NORMALIZE;
++ p->buf = buf;
++ p->range = range;
++ p->code = code;
++ p->remainLen = len;
++ p->dicPos = dicPos;
++ p->processedPos = processedPos;
++ p->reps[0] = rep0;
++ p->reps[1] = rep1;
++ p->reps[2] = rep2;
++ p->reps[3] = rep3;
++ p->state = state;
++
++ return SZ_OK;
++}
++
++static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
++{
++ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
++ {
++ Byte *dic = p->dic;
++ SizeT dicPos = p->dicPos;
++ SizeT dicBufSize = p->dicBufSize;
++ unsigned len = p->remainLen;
++ UInt32 rep0 = p->reps[0];
++ if (limit - dicPos < len)
++ len = (unsigned)(limit - dicPos);
++
++ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
++ p->checkDicSize = p->prop.dicSize;
++
++ p->processedPos += len;
++ p->remainLen -= len;
++ while (len-- != 0)
++ {
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ }
++ p->dicPos = dicPos;
++ }
++}
++
++static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ do
++ {
++ SizeT limit2 = limit;
++ if (p->checkDicSize == 0)
++ {
++ UInt32 rem = p->prop.dicSize - p->processedPos;
++ if (limit - p->dicPos > rem)
++ limit2 = p->dicPos + rem;
++ }
++ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));
++ if (p->processedPos >= p->prop.dicSize)
++ p->checkDicSize = p->prop.dicSize;
++ LzmaDec_WriteRem(p, limit);
++ }
++ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
++
++ if (p->remainLen > kMatchSpecLenStart)
++ {
++ p->remainLen = kMatchSpecLenStart;
++ }
++ return 0;
++}
++
++typedef enum
++{
++ DUMMY_ERROR, /* unexpected end of input stream */
++ DUMMY_LIT,
++ DUMMY_MATCH,
++ DUMMY_REP
++} ELzmaDummy;
++
++static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
++{
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++ const Byte *bufLimit = buf + inSize;
++ CLzmaProb *probs = p->probs;
++ unsigned state = p->state;
++ ELzmaDummy res;
++
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK
++
++ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
++
++ prob = probs + Literal;
++ if (p->checkDicSize != 0 || p->processedPos != 0)
++ prob += (LZMA_LIT_SIZE *
++ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
++ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
++
++ if (state < kNumLitStates)
++ {
++ unsigned symbol = 1;
++ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
++ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)];
++ unsigned offs = 0x100;
++ unsigned symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ res = DUMMY_LIT;
++ }
++ else
++ {
++ unsigned len;
++ UPDATE_1_CHECK;
++
++ prob = probs + IsRep + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ state = 0;
++ prob = probs + LenCoder;
++ res = DUMMY_MATCH;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ res = DUMMY_REP;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ NORMALIZE_CHECK;
++ return DUMMY_REP;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ }
++ state = kNumStates;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = 1 << kLenNumLowBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenChoice2;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = 1 << kLenNumMidBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = 1 << kLenNumHighBits;
++ }
++ }
++ TREE_DECODE_CHECK(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state < 4)
++ {
++ unsigned posSlot;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
++ kNumPosSlotBits);
++ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
++ if (posSlot >= kStartPosModelIndex)
++ {
++ int numDirectBits = ((posSlot >> 1) - 1);
++
++ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
++
++ if (posSlot < kEndPosModelIndex)
++ {
++ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE_CHECK
++ range >>= 1;
++ code -= range & (((code - range) >> 31) - 1);
++ /* if (code >= range) code -= range; */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ numDirectBits = kNumAlignBits;
++ }
++ {
++ unsigned i = 1;
++ do
++ {
++ GET_BIT_CHECK(prob + i, i);
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ }
++ }
++ }
++ NORMALIZE_CHECK;
++ return res;
++}
++
++
++static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data)
++{
++ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]);
++ p->range = 0xFFFFFFFF;
++ p->needFlush = 0;
++}
++
++void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
++{
++ p->needFlush = 1;
++ p->remainLen = 0;
++ p->tempBufSize = 0;
++
++ if (initDic)
++ {
++ p->processedPos = 0;
++ p->checkDicSize = 0;
++ p->needInitState = 1;
++ }
++ if (initState)
++ p->needInitState = 1;
++}
++
++void LzmaDec_Init(CLzmaDec *p)
++{
++ p->dicPos = 0;
++ LzmaDec_InitDicAndState(p, True, True);
++}
++
++static void LzmaDec_InitStateReal(CLzmaDec *p)
++{
++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp));
++ UInt32 i;
++ CLzmaProb *probs = p->probs;
++ for (i = 0; i < numProbs; i++)
++ probs[i] = kBitModelTotal >> 1;
++ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
++ p->state = 0;
++ p->needInitState = 0;
++}
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
++ ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT inSize = *srcLen;
++ (*srcLen) = 0;
++ LzmaDec_WriteRem(p, dicLimit);
++
++ *status = LZMA_STATUS_NOT_SPECIFIED;
++
++ while (p->remainLen != kMatchSpecLenStart)
++ {
++ int checkEndMarkNow;
++
++ if (p->needFlush != 0)
++ {
++ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
++ p->tempBuf[p->tempBufSize++] = *src++;
++ if (p->tempBufSize < RC_INIT_SIZE)
++ {
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (p->tempBuf[0] != 0)
++ return SZ_ERROR_DATA;
++
++ LzmaDec_InitRc(p, p->tempBuf);
++ p->tempBufSize = 0;
++ }
++
++ checkEndMarkNow = 0;
++ if (p->dicPos >= dicLimit)
++ {
++ if (p->remainLen == 0 && p->code == 0)
++ {
++ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
++ return SZ_OK;
++ }
++ if (finishMode == LZMA_FINISH_ANY)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_OK;
++ }
++ if (p->remainLen != 0)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ checkEndMarkNow = 1;
++ }
++
++ if (p->needInitState)
++ LzmaDec_InitStateReal(p);
++
++ if (p->tempBufSize == 0)
++ {
++ SizeT processed;
++ const Byte *bufLimit;
++ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, src, inSize);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ memcpy(p->tempBuf, src, inSize);
++ p->tempBufSize = (unsigned)inSize;
++ (*srcLen) += inSize;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ bufLimit = src;
++ }
++ else
++ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
++ p->buf = src;
++ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
++ return SZ_ERROR_DATA;
++ processed = (SizeT)(p->buf - src);
++ (*srcLen) += processed;
++ src += processed;
++ inSize -= processed;
++ }
++ else
++ {
++ unsigned rem = p->tempBufSize, lookAhead = 0;
++ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
++ p->tempBuf[rem++] = src[lookAhead++];
++ p->tempBufSize = rem;
++ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ (*srcLen) += lookAhead;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ }
++ p->buf = p->tempBuf;
++ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
++ return SZ_ERROR_DATA;
++ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf));
++ (*srcLen) += lookAhead;
++ src += lookAhead;
++ inSize -= lookAhead;
++ p->tempBufSize = 0;
++ }
++ }
++ if (p->code == 0)
++ *status = LZMA_STATUS_FINISHED_WITH_MARK;
++ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
++}
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT outSize = *destLen;
++ SizeT inSize = *srcLen;
++ *srcLen = *destLen = 0;
++ for (;;)
++ {
++ SizeT inSizeCur = inSize, outSizeCur, dicPos;
++ ELzmaFinishMode curFinishMode;
++ SRes res;
++ if (p->dicPos == p->dicBufSize)
++ p->dicPos = 0;
++ dicPos = p->dicPos;
++ if (outSize > p->dicBufSize - dicPos)
++ {
++ outSizeCur = p->dicBufSize;
++ curFinishMode = LZMA_FINISH_ANY;
++ }
++ else
++ {
++ outSizeCur = dicPos + outSize;
++ curFinishMode = finishMode;
++ }
++
++ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
++ src += inSizeCur;
++ inSize -= inSizeCur;
++ *srcLen += inSizeCur;
++ outSizeCur = p->dicPos - dicPos;
++ memcpy(dest, p->dic + dicPos, outSizeCur);
++ dest += outSizeCur;
++ outSize -= outSizeCur;
++ *destLen += outSizeCur;
++ if (res != 0)
++ return res;
++ if (outSizeCur == 0 || outSize == 0)
++ return SZ_OK;
++ }
++}
++
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->probs);
++ p->probs = 0;
++}
++
++static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->dic);
++ p->dic = 0;
++}
++
++void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
++{
++ LzmaDec_FreeProbs(p, alloc);
++ LzmaDec_FreeDict(p, alloc);
++}
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
++{
++ UInt32 dicSize;
++ Byte d;
++
++ if (size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_UNSUPPORTED;
++ else
++ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
++
++ if (dicSize < LZMA_DIC_MIN)
++ dicSize = LZMA_DIC_MIN;
++ p->dicSize = dicSize;
++
++ d = data[0];
++ if (d >= (9 * 5 * 5))
++ return SZ_ERROR_UNSUPPORTED;
++
++ p->lc = d % 9;
++ d /= 9;
++ p->pb = d / 5;
++ p->lp = d % 5;
++
++ return SZ_OK;
++}
++
++static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)
++{
++ UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
++ if (p->probs == 0 || numProbs != p->numProbs)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));
++ p->numProbs = numProbs;
++ if (p->probs == 0)
++ return SZ_ERROR_MEM;
++ }
++ return SZ_OK;
++}
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ SizeT dicBufSize;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ dicBufSize = propNew.dicSize;
++ if (p->dic == 0 || dicBufSize != p->dicBufSize)
++ {
++ LzmaDec_FreeDict(p, alloc);
++ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize);
++ if (p->dic == 0)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ }
++ p->dicBufSize = dicBufSize;
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc)
++{
++ CLzmaDec p;
++ SRes res;
++ SizeT inSize = *srcLen;
++ SizeT outSize = *destLen;
++ *srcLen = *destLen = 0;
++ if (inSize < RC_INIT_SIZE)
++ return SZ_ERROR_INPUT_EOF;
++
++ LzmaDec_Construct(&p);
++ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc);
++ if (res != 0)
++ return res;
++ p.dic = dest;
++ p.dicBufSize = outSize;
++
++ LzmaDec_Init(&p);
++
++ *srcLen = inSize;
++ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
++
++ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
++ res = SZ_ERROR_INPUT_EOF;
++
++ (*destLen) = p.dicPos;
++ LzmaDec_FreeProbs(&p, alloc);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/LzmaEnc.c
+@@ -0,0 +1,2271 @@
++/* LzmaEnc.c -- LZMA Encoder
++2009-11-24 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++/* #define SHOW_STAT */
++/* #define SHOW_STAT2 */
++
++#if defined(SHOW_STAT) || defined(SHOW_STAT2)
++#include <stdio.h>
++#endif
++
++#include "LzmaEnc.h"
++
++/* disable MT */
++#define _7ZIP_ST
++
++#include "LzFind.h"
++#ifndef _7ZIP_ST
++#include "LzFindMt.h"
++#endif
++
++#ifdef SHOW_STAT
++static int ttt = 0;
++#endif
++
++#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1)
++
++#define kBlockSize (9 << 10)
++#define kUnpackBlockSize (1 << 18)
++#define kMatchArraySize (1 << 21)
++#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX)
++
++#define kNumMaxDirectBits (31)
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++#define kProbInitValue (kBitModelTotal >> 1)
++
++#define kNumMoveReducingBits 4
++#define kNumBitPriceShiftBits 4
++#define kBitPrice (1 << kNumBitPriceShiftBits)
++
++void LzmaEncProps_Init(CLzmaEncProps *p)
++{
++ p->level = 5;
++ p->dictSize = p->mc = 0;
++ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
++ p->writeEndMark = 0;
++}
++
++void LzmaEncProps_Normalize(CLzmaEncProps *p)
++{
++ int level = p->level;
++ if (level < 0) level = 5;
++ p->level = level;
++ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26)));
++ if (p->lc < 0) p->lc = 3;
++ if (p->lp < 0) p->lp = 0;
++ if (p->pb < 0) p->pb = 2;
++ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
++ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
++ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
++ if (p->numHashBytes < 0) p->numHashBytes = 4;
++ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
++ if (p->numThreads < 0)
++ p->numThreads =
++ #ifndef _7ZIP_ST
++ ((p->btMode && p->algo) ? 2 : 1);
++ #else
++ 1;
++ #endif
++}
++
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
++{
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++ return props.dictSize;
++}
++
++/* #define LZMA_LOG_BSR */
++/* Define it for Intel's CPU */
++
++
++#ifdef LZMA_LOG_BSR
++
++#define kDicLogSizeMaxCompress 30
++
++#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
++
++UInt32 GetPosSlot1(UInt32 pos)
++{
++ UInt32 res;
++ BSR2_RET(pos, res);
++ return res;
++}
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
++
++#else
++
++#define kNumLogBits (9 + (int)sizeof(size_t) / 2)
++#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
++
++void LzmaEnc_FastPosInit(Byte *g_FastPos)
++{
++ int c = 2, slotFast;
++ g_FastPos[0] = 0;
++ g_FastPos[1] = 1;
++
++ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++)
++ {
++ UInt32 k = (1 << ((slotFast >> 1) - 1));
++ UInt32 j;
++ for (j = 0; j < k; j++, c++)
++ g_FastPos[c] = (Byte)slotFast;
++ }
++}
++
++#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \
++ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
++ res = p->g_FastPos[pos >> i] + (i * 2); }
++/*
++#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
++ p->g_FastPos[pos >> 6] + 12 : \
++ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
++*/
++
++#define GetPosSlot1(pos) p->g_FastPos[pos]
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); }
++
++#endif
++
++
++#define LZMA_NUM_REPS 4
++
++typedef unsigned CState;
++
++typedef struct
++{
++ UInt32 price;
++
++ CState state;
++ int prev1IsChar;
++ int prev2;
++
++ UInt32 posPrev2;
++ UInt32 backPrev2;
++
++ UInt32 posPrev;
++ UInt32 backPrev;
++ UInt32 backs[LZMA_NUM_REPS];
++} COptimal;
++
++#define kNumOpts (1 << 12)
++
++#define kNumLenToPosStates 4
++#define kNumPosSlotBits 6
++#define kDicLogSizeMin 0
++#define kDicLogSizeMax 32
++#define kDistTableSizeMax (kDicLogSizeMax * 2)
++
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++#define kAlignMask (kAlignTableSize - 1)
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex)
++
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++#define LZMA_PB_MAX 4
++#define LZMA_LC_MAX 8
++#define LZMA_LP_MAX 4
++
++#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
++
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define LZMA_MATCH_LEN_MIN 2
++#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
++
++#define kNumStates 12
++
++typedef struct
++{
++ CLzmaProb choice;
++ CLzmaProb choice2;
++ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits];
++ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits];
++ CLzmaProb high[kLenNumHighSymbols];
++} CLenEnc;
++
++typedef struct
++{
++ CLenEnc p;
++ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
++ UInt32 tableSize;
++ UInt32 counters[LZMA_NUM_PB_STATES_MAX];
++} CLenPriceEnc;
++
++typedef struct
++{
++ UInt32 range;
++ Byte cache;
++ UInt64 low;
++ UInt64 cacheSize;
++ Byte *buf;
++ Byte *bufLim;
++ Byte *bufBase;
++ ISeqOutStream *outStream;
++ UInt64 processed;
++ SRes res;
++} CRangeEnc;
++
++typedef struct
++{
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++} CSaveState;
++
++typedef struct
++{
++ IMatchFinder matchFinder;
++ void *matchFinderObj;
++
++ #ifndef _7ZIP_ST
++ Bool mtMode;
++ CMatchFinderMt matchFinderMt;
++ #endif
++
++ CMatchFinder matchFinderBase;
++
++ #ifndef _7ZIP_ST
++ Byte pad[128];
++ #endif
++
++ UInt32 optimumEndIndex;
++ UInt32 optimumCurrentIndex;
++
++ UInt32 longestMatchLength;
++ UInt32 numPairs;
++ UInt32 numAvail;
++ COptimal opt[kNumOpts];
++
++ #ifndef LZMA_LOG_BSR
++ Byte g_FastPos[1 << kNumLogBits];
++ #endif
++
++ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
++ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
++ UInt32 numFastBytes;
++ UInt32 additionalOffset;
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++
++ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
++ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
++ UInt32 alignPrices[kAlignTableSize];
++ UInt32 alignPriceCount;
++
++ UInt32 distTableSize;
++
++ unsigned lc, lp, pb;
++ unsigned lpMask, pbMask;
++
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ unsigned lclp;
++
++ Bool fastMode;
++
++ CRangeEnc rc;
++
++ Bool writeEndMark;
++ UInt64 nowPos64;
++ UInt32 matchPriceCount;
++ Bool finished;
++ Bool multiThread;
++
++ SRes result;
++ UInt32 dictSize;
++ UInt32 matchFinderCycles;
++
++ int needInit;
++
++ CSaveState saveState;
++} CLzmaEnc;
++
++void LzmaEnc_SaveState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CSaveState *dest = &p->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
++}
++
++void LzmaEnc_RestoreState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *dest = (CLzmaEnc *)pp;
++ const CSaveState *p = &dest->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb));
++}
++
++SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++
++ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX ||
++ props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30))
++ return SZ_ERROR_PARAM;
++ p->dictSize = props.dictSize;
++ p->matchFinderCycles = props.mc;
++ {
++ unsigned fb = props.fb;
++ if (fb < 5)
++ fb = 5;
++ if (fb > LZMA_MATCH_LEN_MAX)
++ fb = LZMA_MATCH_LEN_MAX;
++ p->numFastBytes = fb;
++ }
++ p->lc = props.lc;
++ p->lp = props.lp;
++ p->pb = props.pb;
++ p->fastMode = (props.algo == 0);
++ p->matchFinderBase.btMode = props.btMode;
++ {
++ UInt32 numHashBytes = 4;
++ if (props.btMode)
++ {
++ if (props.numHashBytes < 2)
++ numHashBytes = 2;
++ else if (props.numHashBytes < 4)
++ numHashBytes = props.numHashBytes;
++ }
++ p->matchFinderBase.numHashBytes = numHashBytes;
++ }
++
++ p->matchFinderBase.cutValue = props.mc;
++
++ p->writeEndMark = props.writeEndMark;
++
++ #ifndef _7ZIP_ST
++ /*
++ if (newMultiThread != _multiThread)
++ {
++ ReleaseMatchFinder();
++ _multiThread = newMultiThread;
++ }
++ */
++ p->multiThread = (props.numThreads > 1);
++ #endif
++
++ return SZ_OK;
++}
++
++static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
++static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
++static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
++static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
++
++#define IsCharState(s) ((s) < 7)
++
++#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
++
++#define kInfinityPrice (1 << 30)
++
++static void RangeEnc_Construct(CRangeEnc *p)
++{
++ p->outStream = 0;
++ p->bufBase = 0;
++}
++
++#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
++
++#define RC_BUF_SIZE (1 << 16)
++static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc)
++{
++ if (p->bufBase == 0)
++ {
++ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE);
++ if (p->bufBase == 0)
++ return 0;
++ p->bufLim = p->bufBase + RC_BUF_SIZE;
++ }
++ return 1;
++}
++
++static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->bufBase);
++ p->bufBase = 0;
++}
++
++static void RangeEnc_Init(CRangeEnc *p)
++{
++ /* Stream.Init(); */
++ p->low = 0;
++ p->range = 0xFFFFFFFF;
++ p->cacheSize = 1;
++ p->cache = 0;
++
++ p->buf = p->bufBase;
++
++ p->processed = 0;
++ p->res = SZ_OK;
++}
++
++static void RangeEnc_FlushStream(CRangeEnc *p)
++{
++ size_t num;
++ if (p->res != SZ_OK)
++ return;
++ num = p->buf - p->bufBase;
++ if (num != p->outStream->Write(p->outStream, p->bufBase, num))
++ p->res = SZ_ERROR_WRITE;
++ p->processed += num;
++ p->buf = p->bufBase;
++}
++
++static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
++{
++ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0)
++ {
++ Byte temp = p->cache;
++ do
++ {
++ Byte *buf = p->buf;
++ *buf++ = (Byte)(temp + (Byte)(p->low >> 32));
++ p->buf = buf;
++ if (buf == p->bufLim)
++ RangeEnc_FlushStream(p);
++ temp = 0xFF;
++ }
++ while (--p->cacheSize != 0);
++ p->cache = (Byte)((UInt32)p->low >> 24);
++ }
++ p->cacheSize++;
++ p->low = (UInt32)p->low << 8;
++}
++
++static void RangeEnc_FlushData(CRangeEnc *p)
++{
++ int i;
++ for (i = 0; i < 5; i++)
++ RangeEnc_ShiftLow(p);
++}
++
++static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits)
++{
++ do
++ {
++ p->range >>= 1;
++ p->low += p->range & (0 - ((value >> --numBits) & 1));
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++ }
++ while (numBits != 0);
++}
++
++static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol)
++{
++ UInt32 ttt = *prob;
++ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt;
++ if (symbol == 0)
++ {
++ p->range = newBound;
++ ttt += (kBitModelTotal - ttt) >> kNumMoveBits;
++ }
++ else
++ {
++ p->low += newBound;
++ p->range -= newBound;
++ ttt -= ttt >> kNumMoveBits;
++ }
++ *prob = (CLzmaProb)ttt;
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++}
++
++static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol)
++{
++ symbol |= 0x100;
++ do
++ {
++ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++}
++
++static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte)
++{
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++}
++
++void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
++{
++ UInt32 i;
++ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
++ {
++ const int kCyclesBits = kNumBitPriceShiftBits;
++ UInt32 w = i;
++ UInt32 bitCount = 0;
++ int j;
++ for (j = 0; j < kCyclesBits; j++)
++ {
++ w = w * w;
++ bitCount <<= 1;
++ while (w >= ((UInt32)1 << 16))
++ {
++ w >>= 1;
++ bitCount++;
++ }
++ }
++ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
++ }
++}
++
++
++#define GET_PRICE(prob, symbol) \
++ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICEa(prob, symbol) \
++ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= 0x100;
++ do
++ {
++ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++
++static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0;)
++ {
++ UInt32 bit;
++ i--;
++ bit = (symbol >> i) & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ }
++}
++
++static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = 0; i < numBitLevels; i++)
++ {
++ UInt32 bit = symbol & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ symbol >>= 1;
++ }
++}
++
++static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= (1 << numBitLevels);
++ while (symbol != 1)
++ {
++ price += GET_PRICEa(probs[symbol >> 1], symbol & 1);
++ symbol >>= 1;
++ }
++ return price;
++}
++
++static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0; i--)
++ {
++ UInt32 bit = symbol & 1;
++ symbol >>= 1;
++ price += GET_PRICEa(probs[m], bit);
++ m = (m << 1) | bit;
++ }
++ return price;
++}
++
++
++static void LenEnc_Init(CLenEnc *p)
++{
++ unsigned i;
++ p->choice = p->choice2 = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++)
++ p->low[i] = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++)
++ p->mid[i] = kProbInitValue;
++ for (i = 0; i < kLenNumHighSymbols; i++)
++ p->high[i] = kProbInitValue;
++}
++
++static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState)
++{
++ if (symbol < kLenNumLowSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 0);
++ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 1);
++ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 0);
++ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 1);
++ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols);
++ }
++ }
++}
++
++static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices)
++{
++ UInt32 a0 = GET_PRICE_0a(p->choice);
++ UInt32 a1 = GET_PRICE_1a(p->choice);
++ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2);
++ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2);
++ UInt32 i = 0;
++ for (i = 0; i < kLenNumLowSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices);
++ }
++ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices);
++ }
++ for (; i < numSymbols; i++)
++ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices);
++}
++
++static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices)
++{
++ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices);
++ p->counters[posState] = p->tableSize;
++}
++
++static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices)
++{
++ UInt32 posState;
++ for (posState = 0; posState < numPosStates; posState++)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices)
++{
++ LenEnc_Encode(&p->p, rc, symbol, posState);
++ if (updatePrice)
++ if (--p->counters[posState] == 0)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++
++
++
++static void MovePos(CLzmaEnc *p, UInt32 num)
++{
++ #ifdef SHOW_STAT
++ ttt += num;
++ printf("\n MovePos %d", num);
++ #endif
++ if (num != 0)
++ {
++ p->additionalOffset += num;
++ p->matchFinder.Skip(p->matchFinderObj, num);
++ }
++}
++
++static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes)
++{
++ UInt32 lenRes = 0, numPairs;
++ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
++ #ifdef SHOW_STAT
++ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2);
++ ttt++;
++ {
++ UInt32 i;
++ for (i = 0; i < numPairs; i += 2)
++ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]);
++ }
++ #endif
++ if (numPairs > 0)
++ {
++ lenRes = p->matches[numPairs - 2];
++ if (lenRes == p->numFastBytes)
++ {
++ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ UInt32 distance = p->matches[numPairs - 1] + 1;
++ UInt32 numAvail = p->numAvail;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ {
++ const Byte *pby2 = pby - distance;
++ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++);
++ }
++ }
++ }
++ p->additionalOffset++;
++ *numDistancePairsRes = numPairs;
++ return lenRes;
++}
++
++
++#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False;
++#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False;
++#define IsShortRep(p) ((p)->backPrev == 0)
++
++static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState)
++{
++ return
++ GET_PRICE_0(p->isRepG0[state]) +
++ GET_PRICE_0(p->isRep0Long[state][posState]);
++}
++
++static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState)
++{
++ UInt32 price;
++ if (repIndex == 0)
++ {
++ price = GET_PRICE_0(p->isRepG0[state]);
++ price += GET_PRICE_1(p->isRep0Long[state][posState]);
++ }
++ else
++ {
++ price = GET_PRICE_1(p->isRepG0[state]);
++ if (repIndex == 1)
++ price += GET_PRICE_0(p->isRepG1[state]);
++ else
++ {
++ price += GET_PRICE_1(p->isRepG1[state]);
++ price += GET_PRICE(p->isRepG2[state], repIndex - 2);
++ }
++ }
++ return price;
++}
++
++static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState)
++{
++ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] +
++ GetPureRepPrice(p, repIndex, state, posState);
++}
++
++static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur)
++{
++ UInt32 posMem = p->opt[cur].posPrev;
++ UInt32 backMem = p->opt[cur].backPrev;
++ p->optimumEndIndex = cur;
++ do
++ {
++ if (p->opt[cur].prev1IsChar)
++ {
++ MakeAsChar(&p->opt[posMem])
++ p->opt[posMem].posPrev = posMem - 1;
++ if (p->opt[cur].prev2)
++ {
++ p->opt[posMem - 1].prev1IsChar = False;
++ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2;
++ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2;
++ }
++ }
++ {
++ UInt32 posPrev = posMem;
++ UInt32 backCur = backMem;
++
++ backMem = p->opt[posPrev].backPrev;
++ posMem = p->opt[posPrev].posPrev;
++
++ p->opt[posPrev].backPrev = backCur;
++ p->opt[posPrev].posPrev = cur;
++ cur = posPrev;
++ }
++ }
++ while (cur != 0);
++ *backRes = p->opt[0].backPrev;
++ p->optimumCurrentIndex = p->opt[0].posPrev;
++ return p->optimumCurrentIndex;
++}
++
++#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300)
++
++static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur;
++ UInt32 matchPrice, repMatchPrice, normalMatchPrice;
++ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS];
++ UInt32 *matches;
++ const Byte *data;
++ Byte curByte, matchByte;
++ if (p->optimumEndIndex != p->optimumCurrentIndex)
++ {
++ const COptimal *opt = &p->opt[p->optimumCurrentIndex];
++ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex;
++ *backRes = opt->backPrev;
++ p->optimumCurrentIndex = opt->posPrev;
++ return lenRes;
++ }
++ p->optimumCurrentIndex = p->optimumEndIndex = 0;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ if (numAvail < 2)
++ {
++ *backRes = (UInt32)(-1);
++ return 1;
++ }
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ repMaxIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 lenTest;
++ const Byte *data2;
++ reps[i] = p->reps[i];
++ data2 = data - (reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ {
++ repLens[i] = 0;
++ continue;
++ }
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ repLens[i] = lenTest;
++ if (lenTest > repLens[repMaxIndex])
++ repMaxIndex = i;
++ }
++ if (repLens[repMaxIndex] >= p->numFastBytes)
++ {
++ UInt32 lenRes;
++ *backRes = repMaxIndex;
++ lenRes = repLens[repMaxIndex];
++ MovePos(p, lenRes - 1);
++ return lenRes;
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2)
++ {
++ *backRes = (UInt32)-1;
++ return 1;
++ }
++
++ p->opt[0].state = (CState)p->state;
++
++ posState = (position & p->pbMask);
++
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
++ (!IsCharState(p->state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ MakeAsChar(&p->opt[1]);
++
++ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
++
++ if (matchByte == curByte)
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState);
++ if (shortRepPrice < p->opt[1].price)
++ {
++ p->opt[1].price = shortRepPrice;
++ MakeAsShortRep(&p->opt[1]);
++ }
++ }
++ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]);
++
++ if (lenEnd < 2)
++ {
++ *backRes = p->opt[1].backPrev;
++ return 1;
++ }
++
++ p->opt[1].posPrev = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ p->opt[0].backs[i] = reps[i];
++
++ len = lenEnd;
++ do
++ p->opt[len--].price = kInfinityPrice;
++ while (len >= 2);
++
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 repLen = repLens[i];
++ UInt32 price;
++ if (repLen < 2)
++ continue;
++ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2];
++ COptimal *opt = &p->opt[repLen];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = i;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--repLen >= 2);
++ }
++
++ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
++
++ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
++ if (len <= mainLen)
++ {
++ UInt32 offs = 0;
++ while (len > matches[offs])
++ offs += 2;
++ for (; ; len++)
++ {
++ COptimal *opt;
++ UInt32 distance = matches[offs + 1];
++
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(len);
++ if (distance < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][distance];
++ else
++ {
++ UInt32 slot;
++ GetPosSlot2(distance, slot);
++ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot];
++ }
++ opt = &p->opt[len];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = distance + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++ if (len == matches[offs])
++ {
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ }
++ }
++ }
++
++ cur = 0;
++
++ #ifdef SHOW_STAT2
++ if (position >= 0)
++ {
++ unsigned i;
++ printf("\n pos = %4X", position);
++ for (i = cur; i <= lenEnd; i++)
++ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price);
++ }
++ #endif
++
++ for (;;)
++ {
++ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen;
++ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice;
++ Bool nextIsChar;
++ Byte curByte, matchByte;
++ const Byte *data;
++ COptimal *curOpt;
++ COptimal *nextOpt;
++
++ cur++;
++ if (cur == lenEnd)
++ return Backward(p, backRes, cur);
++
++ newLen = ReadMatchDistances(p, &numPairs);
++ if (newLen >= p->numFastBytes)
++ {
++ p->numPairs = numPairs;
++ p->longestMatchLength = newLen;
++ return Backward(p, backRes, cur);
++ }
++ position++;
++ curOpt = &p->opt[cur];
++ posPrev = curOpt->posPrev;
++ if (curOpt->prev1IsChar)
++ {
++ posPrev--;
++ if (curOpt->prev2)
++ {
++ state = p->opt[curOpt->posPrev2].state;
++ if (curOpt->backPrev2 < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ state = kLiteralNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ if (posPrev == cur - 1)
++ {
++ if (IsShortRep(curOpt))
++ state = kShortRepNextStates[state];
++ else
++ state = kLiteralNextStates[state];
++ }
++ else
++ {
++ UInt32 pos;
++ const COptimal *prevOpt;
++ if (curOpt->prev1IsChar && curOpt->prev2)
++ {
++ posPrev = curOpt->posPrev2;
++ pos = curOpt->backPrev2;
++ state = kRepNextStates[state];
++ }
++ else
++ {
++ pos = curOpt->backPrev;
++ if (pos < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ prevOpt = &p->opt[posPrev];
++ if (pos < LZMA_NUM_REPS)
++ {
++ UInt32 i;
++ reps[0] = prevOpt->backs[pos];
++ for (i = 1; i <= pos; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ for (; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i];
++ }
++ else
++ {
++ UInt32 i;
++ reps[0] = (pos - LZMA_NUM_REPS);
++ for (i = 1; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ }
++ }
++ curOpt->state = (CState)state;
++
++ curOpt->backs[0] = reps[0];
++ curOpt->backs[1] = reps[1];
++ curOpt->backs[2] = reps[2];
++ curOpt->backs[3] = reps[3];
++
++ curPrice = curOpt->price;
++ nextIsChar = False;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ posState = (position & p->pbMask);
++
++ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]);
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ curAnd1Price +=
++ (!IsCharState(state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ nextOpt = &p->opt[cur + 1];
++
++ if (curAnd1Price < nextOpt->price)
++ {
++ nextOpt->price = curAnd1Price;
++ nextOpt->posPrev = cur;
++ MakeAsChar(nextOpt);
++ nextIsChar = True;
++ }
++
++ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
++
++ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0))
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState);
++ if (shortRepPrice <= nextOpt->price)
++ {
++ nextOpt->price = shortRepPrice;
++ nextOpt->posPrev = cur;
++ MakeAsShortRep(nextOpt);
++ nextIsChar = True;
++ }
++ }
++ numAvailFull = p->numAvail;
++ {
++ UInt32 temp = kNumOpts - 1 - cur;
++ if (temp < numAvailFull)
++ numAvailFull = temp;
++ }
++
++ if (numAvailFull < 2)
++ continue;
++ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
++
++ if (!nextIsChar && matchByte != curByte) /* speed optimization */
++ {
++ /* try Literal + rep0 */
++ UInt32 temp;
++ UInt32 lenTest2;
++ const Byte *data2 = data - (reps[0] + 1);
++ UInt32 limit = p->numFastBytes + 1;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++
++ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++);
++ lenTest2 = temp - 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kLiteralNextStates[state];
++ UInt32 posStateNext = (position + 1) & p->pbMask;
++ UInt32 nextRepMatchPrice = curAnd1Price +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = False;
++ }
++ }
++ }
++ }
++
++ startLen = 2; /* speed optimization */
++ {
++ UInt32 repIndex;
++ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++)
++ {
++ UInt32 lenTest;
++ UInt32 lenTestTemp;
++ UInt32 price;
++ const Byte *data2 = data - (reps[repIndex] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ while (lenEnd < cur + lenTest)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ lenTestTemp = lenTest;
++ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2];
++ COptimal *opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = repIndex;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--lenTest >= 2);
++ lenTest = lenTestTemp;
++
++ if (repIndex == 0)
++ startLen = lenTest + 1;
++
++ /* if (_maxMode) */
++ {
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kRepNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice =
++ price + p->repLenEnc.prices[posState][lenTest - 2] +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (position + lenTest + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = repIndex;
++ }
++ }
++ }
++ }
++ }
++ }
++ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */
++ if (newLen > numAvail)
++ {
++ newLen = numAvail;
++ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
++ matches[numPairs] = newLen;
++ numPairs += 2;
++ }
++ if (newLen >= startLen)
++ {
++ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
++ UInt32 offs, curBack, posSlot;
++ UInt32 lenTest;
++ while (lenEnd < cur + newLen)
++ p->opt[++lenEnd].price = kInfinityPrice;
++
++ offs = 0;
++ while (startLen > matches[offs])
++ offs += 2;
++ curBack = matches[offs + 1];
++ GetPosSlot2(curBack, posSlot);
++ for (lenTest = /*2*/ startLen; ; lenTest++)
++ {
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(lenTest);
++ COptimal *opt;
++ if (curBack < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][curBack];
++ else
++ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask];
++
++ opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = curBack + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++
++ if (/*_maxMode && */lenTest == matches[offs])
++ {
++ /* Try Match + Literal + Rep0 */
++ const Byte *data2 = data - (curBack + 1);
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kMatchNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice = curAndLenPrice +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (posStateNext + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = curBack + LZMA_NUM_REPS;
++ }
++ }
++ }
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ curBack = matches[offs + 1];
++ if (curBack >= kNumFullDistances)
++ GetPosSlot2(curBack, posSlot);
++ }
++ }
++ }
++ }
++}
++
++#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
++
++static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i;
++ const Byte *data;
++ const UInt32 *matches;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ *backRes = (UInt32)-1;
++ if (numAvail < 2)
++ return 1;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++
++ repLen = repIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (len = 2; len < numAvail && data[len] == data2[len]; len++);
++ if (len >= p->numFastBytes)
++ {
++ *backRes = i;
++ MovePos(p, len - 1);
++ return len;
++ }
++ if (len > repLen)
++ {
++ repIndex = i;
++ repLen = len;
++ }
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++
++ mainDist = 0; /* for GCC */
++ if (mainLen >= 2)
++ {
++ mainDist = matches[numPairs - 1];
++ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1)
++ {
++ if (!ChangePair(matches[numPairs - 3], mainDist))
++ break;
++ numPairs -= 2;
++ mainLen = matches[numPairs - 2];
++ mainDist = matches[numPairs - 1];
++ }
++ if (mainLen == 2 && mainDist >= 0x80)
++ mainLen = 1;
++ }
++
++ if (repLen >= 2 && (
++ (repLen + 1 >= mainLen) ||
++ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) ||
++ (repLen + 3 >= mainLen && mainDist >= (1 << 15))))
++ {
++ *backRes = repIndex;
++ MovePos(p, repLen - 1);
++ return repLen;
++ }
++
++ if (mainLen < 2 || numAvail <= 2)
++ return 1;
++
++ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs);
++ if (p->longestMatchLength >= 2)
++ {
++ UInt32 newDistance = matches[p->numPairs - 1];
++ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) ||
++ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) ||
++ (p->longestMatchLength > mainLen + 1) ||
++ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist)))
++ return 1;
++ }
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len, limit;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ limit = mainLen - 1;
++ for (len = 2; len < limit && data[len] == data2[len]; len++);
++ if (len >= limit)
++ return 1;
++ }
++ *backRes = mainDist + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 2);
++ return mainLen;
++}
++
++static void WriteEndMarker(CLzmaEnc *p, UInt32 posState)
++{
++ UInt32 len;
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ len = LZMA_MATCH_LEN_MIN;
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1);
++ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
++}
++
++static SRes CheckErrors(CLzmaEnc *p)
++{
++ if (p->result != SZ_OK)
++ return p->result;
++ if (p->rc.res != SZ_OK)
++ p->result = SZ_ERROR_WRITE;
++ if (p->matchFinderBase.result != SZ_OK)
++ p->result = SZ_ERROR_READ;
++ if (p->result != SZ_OK)
++ p->finished = True;
++ return p->result;
++}
++
++static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
++{
++ /* ReleaseMFStream(); */
++ p->finished = True;
++ if (p->writeEndMark)
++ WriteEndMarker(p, nowPos & p->pbMask);
++ RangeEnc_FlushData(&p->rc);
++ RangeEnc_FlushStream(&p->rc);
++ return CheckErrors(p);
++}
++
++static void FillAlignPrices(CLzmaEnc *p)
++{
++ UInt32 i;
++ for (i = 0; i < kAlignTableSize; i++)
++ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
++ p->alignPriceCount = 0;
++}
++
++static void FillDistancesPrices(CLzmaEnc *p)
++{
++ UInt32 tempPrices[kNumFullDistances];
++ UInt32 i, lenToPosState;
++ for (i = kStartPosModelIndex; i < kNumFullDistances; i++)
++ {
++ UInt32 posSlot = GetPosSlot1(i);
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices);
++ }
++
++ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++)
++ {
++ UInt32 posSlot;
++ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState];
++ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState];
++ for (posSlot = 0; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices);
++ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
++
++ {
++ UInt32 *distancesPrices = p->distancesPrices[lenToPosState];
++ UInt32 i;
++ for (i = 0; i < kStartPosModelIndex; i++)
++ distancesPrices[i] = posSlotPrices[i];
++ for (; i < kNumFullDistances; i++)
++ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i];
++ }
++ }
++ p->matchPriceCount = 0;
++}
++
++void LzmaEnc_Construct(CLzmaEnc *p)
++{
++ RangeEnc_Construct(&p->rc);
++ MatchFinder_Construct(&p->matchFinderBase);
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Construct(&p->matchFinderMt);
++ p->matchFinderMt.MatchFinder = &p->matchFinderBase;
++ #endif
++
++ {
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++ LzmaEnc_SetProps(p, &props);
++ }
++
++ #ifndef LZMA_LOG_BSR
++ LzmaEnc_FastPosInit(p->g_FastPos);
++ #endif
++
++ LzmaEnc_InitPriceTables(p->ProbPrices);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc)
++{
++ void *p;
++ p = alloc->Alloc(alloc, sizeof(CLzmaEnc));
++ if (p != 0)
++ LzmaEnc_Construct((CLzmaEnc *)p);
++ return p;
++}
++
++void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->litProbs);
++ alloc->Free(alloc, p->saveState.litProbs);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
++ #endif
++ MatchFinder_Free(&p->matchFinderBase, allocBig);
++ LzmaEnc_FreeLits(p, alloc);
++ RangeEnc_Free(&p->rc, alloc);
++}
++
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
++ alloc->Free(alloc, p);
++}
++
++static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize)
++{
++ UInt32 nowPos32, startPos32;
++ if (p->needInit)
++ {
++ p->matchFinder.Init(p->matchFinderObj);
++ p->needInit = 0;
++ }
++
++ if (p->finished)
++ return p->result;
++ RINOK(CheckErrors(p));
++
++ nowPos32 = (UInt32)p->nowPos64;
++ startPos32 = nowPos32;
++
++ if (p->nowPos64 == 0)
++ {
++ UInt32 numPairs;
++ Byte curByte;
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ return Flush(p, nowPos32);
++ ReadMatchDistances(p, &numPairs);
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0);
++ p->state = kLiteralNextStates[p->state];
++ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset);
++ LitEnc_Encode(&p->rc, p->litProbs, curByte);
++ p->additionalOffset--;
++ nowPos32++;
++ }
++
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
++ for (;;)
++ {
++ UInt32 pos, len, posState;
++
++ if (p->fastMode)
++ len = GetOptimumFast(p, &pos);
++ else
++ len = GetOptimum(p, nowPos32, &pos);
++
++ #ifdef SHOW_STAT2
++ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos);
++ #endif
++
++ posState = nowPos32 & p->pbMask;
++ if (len == 1 && pos == (UInt32)-1)
++ {
++ Byte curByte;
++ CLzmaProb *probs;
++ const Byte *data;
++
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0);
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++ curByte = *data;
++ probs = LIT_PROBS(nowPos32, *(data - 1));
++ if (IsCharState(p->state))
++ LitEnc_Encode(&p->rc, probs, curByte);
++ else
++ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1));
++ p->state = kLiteralNextStates[p->state];
++ }
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ if (pos < LZMA_NUM_REPS)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1);
++ if (pos == 0)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1));
++ }
++ else
++ {
++ UInt32 distance = p->reps[pos];
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1);
++ if (pos == 1)
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0);
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2);
++ if (pos == 3)
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ }
++ p->reps[1] = p->reps[0];
++ p->reps[0] = distance;
++ }
++ if (len == 1)
++ p->state = kShortRepNextStates[p->state];
++ else
++ {
++ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ p->state = kRepNextStates[p->state];
++ }
++ }
++ else
++ {
++ UInt32 posSlot;
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ pos -= LZMA_NUM_REPS;
++ GetPosSlot(pos, posSlot);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot);
++
++ if (posSlot >= kStartPosModelIndex)
++ {
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ UInt32 posReduced = pos - base;
++
++ if (posSlot < kEndPosModelIndex)
++ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced);
++ else
++ {
++ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
++ p->alignPriceCount++;
++ }
++ }
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ p->reps[1] = p->reps[0];
++ p->reps[0] = pos;
++ p->matchPriceCount++;
++ }
++ }
++ p->additionalOffset -= len;
++ nowPos32 += len;
++ if (p->additionalOffset == 0)
++ {
++ UInt32 processed;
++ if (!p->fastMode)
++ {
++ if (p->matchPriceCount >= (1 << 7))
++ FillDistancesPrices(p);
++ if (p->alignPriceCount >= kAlignTableSize)
++ FillAlignPrices(p);
++ }
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ break;
++ processed = nowPos32 - startPos32;
++ if (useLimits)
++ {
++ if (processed + kNumOpts + 300 >= maxUnpackSize ||
++ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize)
++ break;
++ }
++ else if (processed >= (1 << 15))
++ {
++ p->nowPos64 += nowPos32 - startPos32;
++ return CheckErrors(p);
++ }
++ }
++ }
++ p->nowPos64 += nowPos32 - startPos32;
++ return Flush(p, nowPos32);
++}
++
++#define kBigHashDicLimit ((UInt32)1 << 24)
++
++static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 beforeSize = kNumOpts;
++ Bool btMode;
++ if (!RangeEnc_Alloc(&p->rc, alloc))
++ return SZ_ERROR_MEM;
++ btMode = (p->matchFinderBase.btMode != 0);
++ #ifndef _7ZIP_ST
++ p->mtMode = (p->multiThread && !p->fastMode && btMode);
++ #endif
++
++ {
++ unsigned lclp = p->lc + p->lp;
++ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ if (p->litProbs == 0 || p->saveState.litProbs == 0)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ p->lclp = lclp;
++ }
++ }
++
++ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit);
++
++ if (beforeSize + p->dictSize < keepWindowSize)
++ beforeSize = keepWindowSize - p->dictSize;
++
++ #ifndef _7ZIP_ST
++ if (p->mtMode)
++ {
++ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig));
++ p->matchFinderObj = &p->matchFinderMt;
++ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
++ }
++ else
++ #endif
++ {
++ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
++ return SZ_ERROR_MEM;
++ p->matchFinderObj = &p->matchFinderBase;
++ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
++ }
++ return SZ_OK;
++}
++
++void LzmaEnc_Init(CLzmaEnc *p)
++{
++ UInt32 i;
++ p->state = 0;
++ for (i = 0 ; i < LZMA_NUM_REPS; i++)
++ p->reps[i] = 0;
++
++ RangeEnc_Init(&p->rc);
++
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ UInt32 j;
++ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
++ {
++ p->isMatch[i][j] = kProbInitValue;
++ p->isRep0Long[i][j] = kProbInitValue;
++ }
++ p->isRep[i] = kProbInitValue;
++ p->isRepG0[i] = kProbInitValue;
++ p->isRepG1[i] = kProbInitValue;
++ p->isRepG2[i] = kProbInitValue;
++ }
++
++ {
++ UInt32 num = 0x300 << (p->lp + p->lc);
++ for (i = 0; i < num; i++)
++ p->litProbs[i] = kProbInitValue;
++ }
++
++ {
++ for (i = 0; i < kNumLenToPosStates; i++)
++ {
++ CLzmaProb *probs = p->posSlotEncoder[i];
++ UInt32 j;
++ for (j = 0; j < (1 << kNumPosSlotBits); j++)
++ probs[j] = kProbInitValue;
++ }
++ }
++ {
++ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++)
++ p->posEncoders[i] = kProbInitValue;
++ }
++
++ LenEnc_Init(&p->lenEnc.p);
++ LenEnc_Init(&p->repLenEnc.p);
++
++ for (i = 0; i < (1 << kNumAlignBits); i++)
++ p->posAlignEncoder[i] = kProbInitValue;
++
++ p->optimumEndIndex = 0;
++ p->optimumCurrentIndex = 0;
++ p->additionalOffset = 0;
++
++ p->pbMask = (1 << p->pb) - 1;
++ p->lpMask = (1 << p->lp) - 1;
++}
++
++void LzmaEnc_InitPrices(CLzmaEnc *p)
++{
++ if (!p->fastMode)
++ {
++ FillDistancesPrices(p);
++ FillAlignPrices(p);
++ }
++
++ p->lenEnc.tableSize =
++ p->repLenEnc.tableSize =
++ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
++ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices);
++ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices);
++}
++
++static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 i;
++ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++)
++ if (p->dictSize <= ((UInt32)1 << i))
++ break;
++ p->distTableSize = i * 2;
++
++ p->finished = False;
++ p->result = SZ_OK;
++ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ p->nowPos64 = 0;
++ return SZ_OK;
++}
++
++static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ p->rc.outStream = outStream;
++ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
++}
++
++SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
++ ISeqInStream *inStream, UInt32 keepWindowSize,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
++{
++ p->matchFinderBase.directInput = 1;
++ p->matchFinderBase.bufferBase = (Byte *)src;
++ p->matchFinderBase.directInputRem = srcLen;
++}
++
++SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
++ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++ p->needInit = 1;
++
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++void LzmaEnc_Finish(CLzmaEncHandle pp)
++{
++ #ifndef _7ZIP_ST
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ if (p->mtMode)
++ MatchFinderMt_ReleaseStream(&p->matchFinderMt);
++ #else
++ pp = pp;
++ #endif
++}
++
++typedef struct
++{
++ ISeqOutStream funcTable;
++ Byte *data;
++ SizeT rem;
++ Bool overflow;
++} CSeqOutStreamBuf;
++
++static size_t MyWrite(void *pp, const void *data, size_t size)
++{
++ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp;
++ if (p->rem < size)
++ {
++ size = p->rem;
++ p->overflow = True;
++ }
++ memcpy(p->data, data, size);
++ p->rem -= size;
++ p->data += size;
++ return size;
++}
++
++
++UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++}
++
++const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++}
++
++SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
++ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ UInt64 nowPos64;
++ SRes res;
++ CSeqOutStreamBuf outStream;
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = False;
++ p->finished = False;
++ p->result = SZ_OK;
++
++ if (reInit)
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ nowPos64 = p->nowPos64;
++ RangeEnc_Init(&p->rc);
++ p->rc.outStream = &outStream.funcTable;
++
++ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize);
++
++ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++
++ return res;
++}
++
++static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
++{
++ SRes res = SZ_OK;
++
++ #ifndef _7ZIP_ST
++ Byte allocaDummy[0x300];
++ int i = 0;
++ for (i = 0; i < 16; i++)
++ allocaDummy[i] = (Byte)i;
++ #endif
++
++ for (;;)
++ {
++ res = LzmaEnc_CodeOneBlock(p, False, 0, 0);
++ if (res != SZ_OK || p->finished != 0)
++ break;
++ if (progress != 0)
++ {
++ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
++ if (res != SZ_OK)
++ {
++ res = SZ_ERROR_PROGRESS;
++ break;
++ }
++ }
++ }
++ LzmaEnc_Finish(p);
++ return res;
++}
++
++SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
++ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
++}
++
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ int i;
++ UInt32 dictSize = p->dictSize;
++ if (*size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_PARAM;
++ *size = LZMA_PROPS_SIZE;
++ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
++
++ for (i = 11; i <= 30; i++)
++ {
++ if (dictSize <= ((UInt32)2 << i))
++ {
++ dictSize = (2 << i);
++ break;
++ }
++ if (dictSize <= ((UInt32)3 << i))
++ {
++ dictSize = (3 << i);
++ break;
++ }
++ }
++
++ for (i = 0; i < 4; i++)
++ props[1 + i] = (Byte)(dictSize >> (8 * i));
++ return SZ_OK;
++}
++
++SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ SRes res;
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++
++ CSeqOutStreamBuf outStream;
++
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = writeEndMark;
++
++ p->rc.outStream = &outStream.funcTable;
++ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
++ if (res == SZ_OK)
++ res = LzmaEnc_Encode2(p, progress);
++
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++ return res;
++}
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
++ SRes res;
++ if (p == 0)
++ return SZ_ERROR_MEM;
++
++ res = LzmaEnc_SetProps(p, props);
++ if (res == SZ_OK)
++ {
++ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
++ if (res == SZ_OK)
++ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
++ writeEndMark, progress, alloc, allocBig);
++ }
++
++ LzmaEnc_Destroy(p, alloc, allocBig);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/Makefile
+@@ -0,0 +1,7 @@
++lzma_compress-objs := LzFind.o LzmaEnc.o
++lzma_decompress-objs := LzmaDec.o
++
++obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o
++
++EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h
diff --git a/target/linux/generic/patches-3.3/511-debloat_lzma.patch b/target/linux/generic/patches-3.3/511-debloat_lzma.patch
new file mode 100644
index 000000000..1e41661eb
--- /dev/null
+++ b/target/linux/generic/patches-3.3/511-debloat_lzma.patch
@@ -0,0 +1,485 @@
+--- a/include/linux/lzma/LzmaDec.h
++++ b/include/linux/lzma/LzmaDec.h
+@@ -31,14 +31,6 @@ typedef struct _CLzmaProps
+ UInt32 dicSize;
+ } CLzmaProps;
+
+-/* LzmaProps_Decode - decodes properties
+-Returns:
+- SZ_OK
+- SZ_ERROR_UNSUPPORTED - Unsupported properties
+-*/
+-
+-SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
+-
+
+ /* ---------- LZMA Decoder state ---------- */
+
+@@ -70,8 +62,6 @@ typedef struct
+
+ #define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
+
+-void LzmaDec_Init(CLzmaDec *p);
+-
+ /* There are two types of LZMA streams:
+ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
+ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
+@@ -108,97 +98,6 @@ typedef enum
+
+ /* ELzmaStatus is used only as output value for function call */
+
+-
+-/* ---------- Interfaces ---------- */
+-
+-/* There are 3 levels of interfaces:
+- 1) Dictionary Interface
+- 2) Buffer Interface
+- 3) One Call Interface
+- You can select any of these interfaces, but don't mix functions from different
+- groups for same object. */
+-
+-
+-/* There are two variants to allocate state for Dictionary Interface:
+- 1) LzmaDec_Allocate / LzmaDec_Free
+- 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
+- You can use variant 2, if you set dictionary buffer manually.
+- For Buffer Interface you must always use variant 1.
+-
+-LzmaDec_Allocate* can return:
+- SZ_OK
+- SZ_ERROR_MEM - Memory allocation error
+- SZ_ERROR_UNSUPPORTED - Unsupported properties
+-*/
+-
+-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);
+-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);
+-
+-SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);
+-void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);
+-
+-/* ---------- Dictionary Interface ---------- */
+-
+-/* You can use it, if you want to eliminate the overhead for data copying from
+- dictionary to some other external buffer.
+- You must work with CLzmaDec variables directly in this interface.
+-
+- STEPS:
+- LzmaDec_Constr()
+- LzmaDec_Allocate()
+- for (each new stream)
+- {
+- LzmaDec_Init()
+- while (it needs more decompression)
+- {
+- LzmaDec_DecodeToDic()
+- use data from CLzmaDec::dic and update CLzmaDec::dicPos
+- }
+- }
+- LzmaDec_Free()
+-*/
+-
+-/* LzmaDec_DecodeToDic
+-
+- The decoding to internal dictionary buffer (CLzmaDec::dic).
+- You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
+-
+-finishMode:
+- It has meaning only if the decoding reaches output limit (dicLimit).
+- LZMA_FINISH_ANY - Decode just dicLimit bytes.
+- LZMA_FINISH_END - Stream must be finished after dicLimit.
+-
+-Returns:
+- SZ_OK
+- status:
+- LZMA_STATUS_FINISHED_WITH_MARK
+- LZMA_STATUS_NOT_FINISHED
+- LZMA_STATUS_NEEDS_MORE_INPUT
+- LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+- SZ_ERROR_DATA - Data error
+-*/
+-
+-SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
+- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+-
+-
+-/* ---------- Buffer Interface ---------- */
+-
+-/* It's zlib-like interface.
+- See LzmaDec_DecodeToDic description for information about STEPS and return results,
+- but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
+- to work with CLzmaDec variables manually.
+-
+-finishMode:
+- It has meaning only if the decoding reaches output limit (*destLen).
+- LZMA_FINISH_ANY - Decode just destLen bytes.
+- LZMA_FINISH_END - Stream must be finished after (*destLen).
+-*/
+-
+-SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
+- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+-
+-
+ /* ---------- One Call Interface ---------- */
+
+ /* LzmaDecode
+--- a/lib/lzma/LzmaDec.c
++++ b/lib/lzma/LzmaDec.c
+@@ -682,7 +682,7 @@ static void LzmaDec_InitRc(CLzmaDec *p,
+ p->needFlush = 0;
+ }
+
+-void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
++static void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
+ {
+ p->needFlush = 1;
+ p->remainLen = 0;
+@@ -698,7 +698,7 @@ void LzmaDec_InitDicAndState(CLzmaDec *p
+ p->needInitState = 1;
+ }
+
+-void LzmaDec_Init(CLzmaDec *p)
++static void LzmaDec_Init(CLzmaDec *p)
+ {
+ p->dicPos = 0;
+ LzmaDec_InitDicAndState(p, True, True);
+@@ -716,7 +716,7 @@ static void LzmaDec_InitStateReal(CLzmaD
+ p->needInitState = 0;
+ }
+
+-SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
++static SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
+ ELzmaFinishMode finishMode, ELzmaStatus *status)
+ {
+ SizeT inSize = *srcLen;
+@@ -837,7 +837,7 @@ SRes LzmaDec_DecodeToDic(CLzmaDec *p, Si
+ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
+ }
+
+-SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
++static __maybe_unused SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+ {
+ SizeT outSize = *destLen;
+ SizeT inSize = *srcLen;
+@@ -877,7 +877,7 @@ SRes LzmaDec_DecodeToBuf(CLzmaDec *p, By
+ }
+ }
+
+-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
++static void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
+ {
+ alloc->Free(alloc, p->probs);
+ p->probs = 0;
+@@ -889,13 +889,13 @@ static void LzmaDec_FreeDict(CLzmaDec *p
+ p->dic = 0;
+ }
+
+-void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
++static void __maybe_unused LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
+ {
+ LzmaDec_FreeProbs(p, alloc);
+ LzmaDec_FreeDict(p, alloc);
+ }
+
+-SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
++static SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
+ {
+ UInt32 dicSize;
+ Byte d;
+@@ -935,7 +935,7 @@ static SRes LzmaDec_AllocateProbs2(CLzma
+ return SZ_OK;
+ }
+
+-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++static SRes __maybe_unused LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+ {
+ CLzmaProps propNew;
+ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+@@ -944,7 +944,7 @@ SRes LzmaDec_AllocateProbs(CLzmaDec *p,
+ return SZ_OK;
+ }
+
+-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++static SRes __maybe_unused LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+ {
+ CLzmaProps propNew;
+ SizeT dicBufSize;
+--- a/include/linux/lzma/LzmaEnc.h
++++ b/include/linux/lzma/LzmaEnc.h
+@@ -31,9 +31,6 @@ typedef struct _CLzmaEncProps
+ } CLzmaEncProps;
+
+ void LzmaEncProps_Init(CLzmaEncProps *p);
+-void LzmaEncProps_Normalize(CLzmaEncProps *p);
+-UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
+-
+
+ /* ---------- CLzmaEncHandle Interface ---------- */
+
+@@ -53,26 +50,9 @@ CLzmaEncHandle LzmaEnc_Create(ISzAlloc *
+ void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
+ SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
+ SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
+-SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
+- ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+ SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+
+-/* ---------- One Call Interface ---------- */
+-
+-/* LzmaEncode
+-Return code:
+- SZ_OK - OK
+- SZ_ERROR_MEM - Memory allocation error
+- SZ_ERROR_PARAM - Incorrect paramater
+- SZ_ERROR_OUTPUT_EOF - output buffer overflow
+- SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+-*/
+-
+-SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+- const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+- ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+-
+ #ifdef __cplusplus
+ }
+ #endif
+--- a/lib/lzma/LzmaEnc.c
++++ b/lib/lzma/LzmaEnc.c
+@@ -53,7 +53,7 @@ void LzmaEncProps_Init(CLzmaEncProps *p)
+ p->writeEndMark = 0;
+ }
+
+-void LzmaEncProps_Normalize(CLzmaEncProps *p)
++static void LzmaEncProps_Normalize(CLzmaEncProps *p)
+ {
+ int level = p->level;
+ if (level < 0) level = 5;
+@@ -76,7 +76,7 @@ void LzmaEncProps_Normalize(CLzmaEncProp
+ #endif
+ }
+
+-UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
++static UInt32 __maybe_unused LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
+ {
+ CLzmaEncProps props = *props2;
+ LzmaEncProps_Normalize(&props);
+@@ -93,7 +93,7 @@ UInt32 LzmaEncProps_GetDictSize(const CL
+
+ #define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
+
+-UInt32 GetPosSlot1(UInt32 pos)
++static UInt32 GetPosSlot1(UInt32 pos)
+ {
+ UInt32 res;
+ BSR2_RET(pos, res);
+@@ -107,7 +107,7 @@ UInt32 GetPosSlot1(UInt32 pos)
+ #define kNumLogBits (9 + (int)sizeof(size_t) / 2)
+ #define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
+
+-void LzmaEnc_FastPosInit(Byte *g_FastPos)
++static void LzmaEnc_FastPosInit(Byte *g_FastPos)
+ {
+ int c = 2, slotFast;
+ g_FastPos[0] = 0;
+@@ -339,7 +339,7 @@ typedef struct
+ CSaveState saveState;
+ } CLzmaEnc;
+
+-void LzmaEnc_SaveState(CLzmaEncHandle pp)
++static void __maybe_unused LzmaEnc_SaveState(CLzmaEncHandle pp)
+ {
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ CSaveState *dest = &p->saveState;
+@@ -365,7 +365,7 @@ void LzmaEnc_SaveState(CLzmaEncHandle pp
+ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
+ }
+
+-void LzmaEnc_RestoreState(CLzmaEncHandle pp)
++static void __maybe_unused LzmaEnc_RestoreState(CLzmaEncHandle pp)
+ {
+ CLzmaEnc *dest = (CLzmaEnc *)pp;
+ const CSaveState *p = &dest->saveState;
+@@ -600,7 +600,7 @@ static void LitEnc_EncodeMatched(CRangeE
+ while (symbol < 0x10000);
+ }
+
+-void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
++static void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
+ {
+ UInt32 i;
+ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
+@@ -1676,7 +1676,7 @@ static void FillDistancesPrices(CLzmaEnc
+ p->matchPriceCount = 0;
+ }
+
+-void LzmaEnc_Construct(CLzmaEnc *p)
++static void LzmaEnc_Construct(CLzmaEnc *p)
+ {
+ RangeEnc_Construct(&p->rc);
+ MatchFinder_Construct(&p->matchFinderBase);
+@@ -1709,7 +1709,7 @@ CLzmaEncHandle LzmaEnc_Create(ISzAlloc *
+ return p;
+ }
+
+-void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
++static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
+ {
+ alloc->Free(alloc, p->litProbs);
+ alloc->Free(alloc, p->saveState.litProbs);
+@@ -2074,7 +2074,7 @@ SRes LzmaEnc_MemPrepare(CLzmaEncHandle p
+ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+ }
+
+-void LzmaEnc_Finish(CLzmaEncHandle pp)
++static void LzmaEnc_Finish(CLzmaEncHandle pp)
+ {
+ #ifndef _7ZIP_ST
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+@@ -2108,7 +2108,7 @@ static size_t MyWrite(void *pp, const vo
+ }
+
+
+-UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
++static UInt32 __maybe_unused LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
+ {
+ const CLzmaEnc *p = (CLzmaEnc *)pp;
+ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+@@ -2120,7 +2120,7 @@ const Byte *LzmaEnc_GetCurBuf(CLzmaEncHa
+ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+ }
+
+-SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
++static SRes __maybe_unused LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
+ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
+ {
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+@@ -2248,7 +2248,7 @@ SRes LzmaEnc_MemEncode(CLzmaEncHandle pp
+ return res;
+ }
+
+-SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++static __maybe_unused SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
+ {
+--- a/include/linux/lzma/LzFind.h
++++ b/include/linux/lzma/LzFind.h
+@@ -55,11 +55,6 @@ typedef struct _CMatchFinder
+
+ #define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
+
+-int MatchFinder_NeedMove(CMatchFinder *p);
+-Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
+-void MatchFinder_MoveBlock(CMatchFinder *p);
+-void MatchFinder_ReadIfRequired(CMatchFinder *p);
+-
+ void MatchFinder_Construct(CMatchFinder *p);
+
+ /* Conditions:
+@@ -70,12 +65,6 @@ int MatchFinder_Create(CMatchFinder *p,
+ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+ ISzAlloc *alloc);
+ void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
+-void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems);
+-void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
+-
+-UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
+- UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
+- UInt32 *distances, UInt32 maxLen);
+
+ /*
+ Conditions:
+@@ -102,12 +91,6 @@ typedef struct _IMatchFinder
+
+ void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
+
+-void MatchFinder_Init(CMatchFinder *p);
+-UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
+-UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
+-void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
+-void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
+-
+ #ifdef __cplusplus
+ }
+ #endif
+--- a/lib/lzma/LzFind.c
++++ b/lib/lzma/LzFind.c
+@@ -42,12 +42,12 @@ static int LzInWindow_Create(CMatchFinde
+ return (p->bufferBase != 0);
+ }
+
+-Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
+-Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
++static Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
++static Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
+
+-UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
++static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
+
+-void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
++static void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
+ {
+ p->posLimit -= subValue;
+ p->pos -= subValue;
+@@ -268,7 +268,7 @@ static void MatchFinder_SetLimits(CMatch
+ p->posLimit = p->pos + limit;
+ }
+
+-void MatchFinder_Init(CMatchFinder *p)
++static void MatchFinder_Init(CMatchFinder *p)
+ {
+ UInt32 i;
+ for (i = 0; i < p->hashSizeSum; i++)
+@@ -287,7 +287,7 @@ static UInt32 MatchFinder_GetSubValue(CM
+ return (p->pos - p->historySize - 1) & kNormalizeMask;
+ }
+
+-void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
++static void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
+ {
+ UInt32 i;
+ for (i = 0; i < numItems; i++)
+@@ -350,7 +350,7 @@ static UInt32 * Hc_GetMatchesSpec(UInt32
+ }
+ }
+
+-UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++static UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+ UInt32 *distances, UInt32 maxLen)
+ {
+@@ -492,7 +492,7 @@ static UInt32 Bt2_MatchFinder_GetMatches
+ GET_MATCHES_FOOTER(offset, 1)
+ }
+
+-UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++static __maybe_unused UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+ {
+ UInt32 offset;
+ GET_MATCHES_HEADER(3)
+@@ -632,7 +632,7 @@ static UInt32 Hc4_MatchFinder_GetMatches
+ MOVE_POS_RET
+ }
+
+-UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++static __maybe_unused UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+ {
+ UInt32 offset;
+ GET_MATCHES_HEADER(3)
+@@ -657,7 +657,7 @@ static void Bt2_MatchFinder_Skip(CMatchF
+ while (--num != 0);
+ }
+
+-void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++static __maybe_unused void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+ {
+ do
+ {
+@@ -718,7 +718,7 @@ static void Hc4_MatchFinder_Skip(CMatchF
+ while (--num != 0);
+ }
+
+-void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++static __maybe_unused void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+ {
+ do
+ {
diff --git a/target/linux/generic/patches-3.3/512-jffs2_eofdetect.patch b/target/linux/generic/patches-3.3/512-jffs2_eofdetect.patch
new file mode 100644
index 000000000..1fac6d1f1
--- /dev/null
+++ b/target/linux/generic/patches-3.3/512-jffs2_eofdetect.patch
@@ -0,0 +1,132 @@
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -112,6 +112,17 @@ static int jffs2_build_filesystem(struct
+ dbg_fsbuild("scanned flash completely\n");
+ jffs2_dbg_dump_block_lists_nolock(c);
+
++ if (c->flags & (1 << 7)) {
++ printk("%s(): unlocking the mtd device... ", __func__);
++ if (c->mtd->unlock)
++ c->mtd->unlock(c->mtd, 0, c->mtd->size);
++ printk("done.\n");
++
++ printk("%s(): erasing all blocks after the end marker... ", __func__);
++ jffs2_erase_pending_blocks(c, -1);
++ printk("done.\n");
++ }
++
+ dbg_fsbuild("pass 1 starting\n");
+ c->flags |= JFFS2_SB_FLAG_BUILDING;
+ /* Now scan the directory tree, increasing nlink according to every dirent found. */
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -72,7 +72,7 @@ static int file_dirty(struct jffs2_sb_in
+ return ret;
+ if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
+ return ret;
+- /* Turned wasted size into dirty, since we apparently
++ /* Turned wasted size into dirty, since we apparently
+ think it's recoverable now. */
+ jeb->dirty_size += jeb->wasted_size;
+ c->dirty_size += jeb->wasted_size;
+@@ -147,8 +147,11 @@ int jffs2_scan_medium(struct jffs2_sb_in
+ /* reset summary info for next eraseblock scan */
+ jffs2_sum_reset_collected(s);
+
+- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+- buf_size, s);
++ if (c->flags & (1 << 7))
++ ret = BLK_STATE_ALLFF;
++ else
++ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
++ buf_size, s);
+
+ if (ret < 0)
+ goto out;
+@@ -403,7 +406,7 @@ static int jffs2_scan_xref_node(struct j
+ if (!ref)
+ return -ENOMEM;
+
+- /* BEFORE jffs2_build_xattr_subsystem() called,
++ /* BEFORE jffs2_build_xattr_subsystem() called,
+ * and AFTER xattr_ref is marked as a dead xref,
+ * ref->xid is used to store 32bit xid, xd is not used
+ * ref->ino is used to store 32bit inode-number, ic is not used
+@@ -476,7 +479,7 @@ static int jffs2_scan_eraseblock (struct
+ struct jffs2_sum_marker *sm;
+ void *sumptr = NULL;
+ uint32_t sumlen;
+-
++
+ if (!buf_size) {
+ /* XIP case. Just look, point at the summary if it's there */
+ sm = (void *)buf + c->sector_size - sizeof(*sm);
+@@ -492,9 +495,9 @@ static int jffs2_scan_eraseblock (struct
+ buf_len = sizeof(*sm);
+
+ /* Read as much as we want into the _end_ of the preallocated buffer */
+- err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
++ err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
+ jeb->offset + c->sector_size - buf_len,
+- buf_len);
++ buf_len);
+ if (err)
+ return err;
+
+@@ -513,9 +516,9 @@ static int jffs2_scan_eraseblock (struct
+ }
+ if (buf_len < sumlen) {
+ /* Need to read more so that the entire summary node is present */
+- err = jffs2_fill_scan_buf(c, sumptr,
++ err = jffs2_fill_scan_buf(c, sumptr,
+ jeb->offset + c->sector_size - sumlen,
+- sumlen - buf_len);
++ sumlen - buf_len);
+ if (err)
+ return err;
+ }
+@@ -528,7 +531,7 @@ static int jffs2_scan_eraseblock (struct
+
+ if (buf_size && sumlen > buf_size)
+ kfree(sumptr);
+- /* If it returns with a real error, bail.
++ /* If it returns with a real error, bail.
+ If it returns positive, that's a block classification
+ (i.e. BLK_STATE_xxx) so return that too.
+ If it returns zero, fall through to full scan. */
+@@ -549,6 +552,17 @@ static int jffs2_scan_eraseblock (struct
+ return err;
+ }
+
++ if ((buf[0] == 0xde) &&
++ (buf[1] == 0xad) &&
++ (buf[2] == 0xc0) &&
++ (buf[3] == 0xde)) {
++ /* end of filesystem. erase everything after this point */
++ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
++ c->flags |= (1 << 7);
++
++ return BLK_STATE_ALLFF;
++ }
++
+ /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+ ofs = 0;
+ max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
+@@ -674,7 +688,7 @@ scan_more:
+ scan_end = buf_len;
+ goto more_empty;
+ }
+-
++
+ /* See how much more there is to read in this eraseblock... */
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ if (!buf_len) {
+@@ -910,7 +924,7 @@ scan_more:
+
+ D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
+ jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
+-
++
+ /* mark_node_obsolete can add to wasted !! */
+ if (jeb->wasted_size) {
+ jeb->dirty_size += jeb->wasted_size;
diff --git a/target/linux/generic/patches-3.3/520-squashfs_update_xz_comp_opts.patch b/target/linux/generic/patches-3.3/520-squashfs_update_xz_comp_opts.patch
new file mode 100644
index 000000000..523b89f9a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/520-squashfs_update_xz_comp_opts.patch
@@ -0,0 +1,25 @@
+From f31b7c0efa255dd17a5f584022a319387f09b0d8 Mon Sep 17 00:00:00 2001
+From: Jonas Gorski <jonas.gorski@gmail.com>
+Date: Tue, 12 Apr 2011 19:55:41 +0200
+Subject: [PATCH] squashfs: update xz compressor options struct.
+
+Update the xz compressor options struct to match the squashfs userspace
+one.
+---
+ fs/squashfs/xz_wrapper.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+--- a/fs/squashfs/xz_wrapper.c
++++ b/fs/squashfs/xz_wrapper.c
+@@ -39,8 +39,10 @@ struct squashfs_xz {
+ };
+
+ struct comp_opts {
+- __le32 dictionary_size;
+ __le32 flags;
++ __le16 bit_opts;
++ __le16 fb;
++ __le32 dictionary_size;
+ };
+
+ static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
diff --git a/target/linux/generic/patches-3.3/540-crypto-xz-decompression-support.patch b/target/linux/generic/patches-3.3/540-crypto-xz-decompression-support.patch
new file mode 100644
index 000000000..3b3700730
--- /dev/null
+++ b/target/linux/generic/patches-3.3/540-crypto-xz-decompression-support.patch
@@ -0,0 +1,146 @@
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -924,6 +924,13 @@ config CRYPTO_LZO
+ help
+ This is the LZO algorithm.
+
++config CRYPTO_XZ
++ tristate "XZ compression algorithm"
++ select CRYPTO_ALGAPI
++ select XZ_DEC
++ help
++ This is the XZ algorithm. Only decompression is supported for now.
++
+ comment "Random Number Generation"
+
+ config CRYPTO_ANSI_CPRNG
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -82,6 +82,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += mich
+ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
+ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
+ obj-$(CONFIG_CRYPTO_LZO) += lzo.o
++obj-$(CONFIG_CRYPTO_XZ) += xz.o
+ obj-$(CONFIG_CRYPTO_RNG2) += rng.o
+ obj-$(CONFIG_CRYPTO_RNG2) += krng.o
+ obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
+--- /dev/null
++++ b/crypto/xz.c
+@@ -0,0 +1,117 @@
++/*
++ * Cryptographic API.
++ *
++ * XZ decompression support.
++ *
++ * Copyright (c) 2012 Gabor Juhos <juhosg@openwrt.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/crypto.h>
++#include <linux/xz.h>
++#include <linux/interrupt.h>
++#include <linux/mm.h>
++#include <linux/net.h>
++
++struct xz_comp_ctx {
++ struct xz_dec *decomp_state;
++ struct xz_buf decomp_buf;
++};
++
++static int crypto_xz_decomp_init(struct xz_comp_ctx *ctx)
++{
++ ctx->decomp_state = xz_dec_init(XZ_SINGLE, 0);
++ if (!ctx->decomp_state)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static void crypto_xz_decomp_exit(struct xz_comp_ctx *ctx)
++{
++ xz_dec_end(ctx->decomp_state);
++}
++
++static int crypto_xz_init(struct crypto_tfm *tfm)
++{
++ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ return crypto_xz_decomp_init(ctx);
++}
++
++static void crypto_xz_exit(struct crypto_tfm *tfm)
++{
++ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ crypto_xz_decomp_exit(ctx);
++}
++
++static int crypto_xz_compress(struct crypto_tfm *tfm, const u8 *src,
++ unsigned int slen, u8 *dst, unsigned int *dlen)
++{
++ return -EOPNOTSUPP;
++}
++
++static int crypto_xz_decompress(struct crypto_tfm *tfm, const u8 *src,
++ unsigned int slen, u8 *dst, unsigned int *dlen)
++{
++ struct xz_comp_ctx *dctx = crypto_tfm_ctx(tfm);
++ struct xz_buf *xz_buf = &dctx->decomp_buf;
++ int ret;
++
++ memset(xz_buf, '\0', sizeof(struct xz_buf));
++
++ xz_buf->in = (u8 *) src;
++ xz_buf->in_pos = 0;
++ xz_buf->in_size = slen;
++ xz_buf->out = (u8 *) dst;
++ xz_buf->out_pos = 0;
++ xz_buf->out_size = *dlen;
++
++ ret = xz_dec_run(dctx->decomp_state, xz_buf);
++ if (ret != XZ_STREAM_END) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ *dlen = xz_buf->out_pos;
++ ret = 0;
++
++out:
++ return ret;
++}
++
++static struct crypto_alg crypto_xz_alg = {
++ .cra_name = "xz",
++ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
++ .cra_ctxsize = sizeof(struct xz_comp_ctx),
++ .cra_module = THIS_MODULE,
++ .cra_list = LIST_HEAD_INIT(crypto_xz_alg.cra_list),
++ .cra_init = crypto_xz_init,
++ .cra_exit = crypto_xz_exit,
++ .cra_u = { .compress = {
++ .coa_compress = crypto_xz_compress,
++ .coa_decompress = crypto_xz_decompress } }
++};
++
++static int __init crypto_xz_mod_init(void)
++{
++ return crypto_register_alg(&crypto_xz_alg);
++}
++
++static void __exit crypto_xz_mod_exit(void)
++{
++ crypto_unregister_alg(&crypto_xz_alg);
++}
++
++module_init(crypto_xz_mod_init);
++module_exit(crypto_xz_mod_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Crypto XZ decompression support");
++MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
diff --git a/target/linux/generic/patches-3.3/541-ubifs-xz-decompression-support.patch b/target/linux/generic/patches-3.3/541-ubifs-xz-decompression-support.patch
new file mode 100644
index 000000000..3c917c164
--- /dev/null
+++ b/target/linux/generic/patches-3.3/541-ubifs-xz-decompression-support.patch
@@ -0,0 +1,94 @@
+--- a/fs/ubifs/Kconfig
++++ b/fs/ubifs/Kconfig
+@@ -5,8 +5,10 @@ config UBIFS_FS
+ select CRYPTO if UBIFS_FS_ADVANCED_COMPR
+ select CRYPTO if UBIFS_FS_LZO
+ select CRYPTO if UBIFS_FS_ZLIB
++ select CRYPTO if UBIFS_FS_XZ
+ select CRYPTO_LZO if UBIFS_FS_LZO
+ select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
++ select CRYPTO_XZ if UBIFS_FS_XZ
+ depends on MTD_UBI
+ help
+ UBIFS is a file system for flash devices which works on top of UBI.
+@@ -42,6 +44,14 @@ config UBIFS_FS_ZLIB
+ help
+ Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
+
++config UBIFS_FS_XZ
++ bool "XZ decompression support" if UBIFS_FS_ADVANCED_COMPR
++ depends on UBIFS_FS
++ default y
++ help
++ XZ compresses better the ZLIB but it is slower.
++ Say 'Y' if unsure.
++
+ # Debugging-related stuff
+ config UBIFS_FS_DEBUG
+ bool "Enable debugging support"
+--- a/fs/ubifs/compress.c
++++ b/fs/ubifs/compress.c
+@@ -71,6 +71,24 @@ static struct ubifs_compressor zlib_comp
+ };
+ #endif
+
++#ifdef CONFIG_UBIFS_FS_XZ
++static DEFINE_MUTEX(xz_enc_mutex);
++static DEFINE_MUTEX(xz_dec_mutex);
++
++static struct ubifs_compressor xz_compr = {
++ .compr_type = UBIFS_COMPR_XZ,
++ .comp_mutex = &xz_enc_mutex,
++ .decomp_mutex = &xz_dec_mutex,
++ .name = "xz",
++ .capi_name = "xz",
++};
++#else
++static struct ubifs_compressor zlib_compr = {
++ .compr_type = UBIFS_COMPR_XZ,
++ .name = "xz",
++};
++#endif
++
+ /* All UBIFS compressors */
+ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
+
+@@ -233,9 +251,15 @@ int __init ubifs_compressors_init(void)
+ if (err)
+ goto out_lzo;
+
++ err = compr_init(&xz_compr);
++ if (err)
++ goto out_zlib;
++
+ ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr;
+ return 0;
+
++out_zlib:
++ compr_exit(&zlib_compr);
+ out_lzo:
+ compr_exit(&lzo_compr);
+ return err;
+@@ -248,4 +272,5 @@ void ubifs_compressors_exit(void)
+ {
+ compr_exit(&lzo_compr);
+ compr_exit(&zlib_compr);
++ compr_exit(&xz_compr);
+ }
+--- a/fs/ubifs/ubifs-media.h
++++ b/fs/ubifs/ubifs-media.h
+@@ -332,12 +332,14 @@ enum {
+ * UBIFS_COMPR_NONE: no compression
+ * UBIFS_COMPR_LZO: LZO compression
+ * UBIFS_COMPR_ZLIB: ZLIB compression
++ * UBIFS_COMPR_XZ: XZ compression
+ * UBIFS_COMPR_TYPES_CNT: count of supported compression types
+ */
+ enum {
+ UBIFS_COMPR_NONE,
+ UBIFS_COMPR_LZO,
+ UBIFS_COMPR_ZLIB,
++ UBIFS_COMPR_XZ,
+ UBIFS_COMPR_TYPES_CNT,
+ };
+
diff --git a/target/linux/generic/patches-3.3/550-ubifs-symlink-xattr-support.patch b/target/linux/generic/patches-3.3/550-ubifs-symlink-xattr-support.patch
new file mode 100644
index 000000000..b0d818e8d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/550-ubifs-symlink-xattr-support.patch
@@ -0,0 +1,67 @@
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1575,6 +1575,12 @@ const struct inode_operations ubifs_syml
+ .follow_link = ubifs_follow_link,
+ .setattr = ubifs_setattr,
+ .getattr = ubifs_getattr,
++#ifdef CONFIG_UBIFS_FS_XATTR
++ .setxattr = ubifs_setxattr,
++ .getxattr = ubifs_getxattr,
++ .listxattr = ubifs_listxattr,
++ .removexattr = ubifs_removexattr,
++#endif
+ };
+
+ const struct file_operations ubifs_file_operations = {
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -553,7 +553,8 @@ int ubifs_jnl_update(struct ubifs_info *
+
+ dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
+ inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
+- ubifs_assert(dir_ui->data_len == 0);
++ if (!xent)
++ ubifs_assert(dir_ui->data_len == 0);
+ ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex));
+
+ dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
+@@ -573,6 +574,13 @@ int ubifs_jnl_update(struct ubifs_info *
+ aligned_dlen = ALIGN(dlen, 8);
+ aligned_ilen = ALIGN(ilen, 8);
+ len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
++ if (xent) {
++ /*
++ * Make sure to account for dir_ui->data_len in
++ * length calculation in case there is extended attribute.
++ */
++ len += dir_ui->data_len;
++ }
+ dent = kmalloc(len, GFP_NOFS);
+ if (!dent)
+ return -ENOMEM;
+@@ -649,7 +657,8 @@ int ubifs_jnl_update(struct ubifs_info *
+
+ ino_key_init(c, &ino_key, dir->i_ino);
+ ino_offs += aligned_ilen;
+- err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ);
++ err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
++ UBIFS_INO_NODE_SZ + dir_ui->data_len);
+ if (err)
+ goto out_ro;
+
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -209,12 +209,12 @@ static int change_xattr(struct ubifs_inf
+ goto out_free;
+ }
+ inode->i_size = ui->ui_size = size;
+- ui->data_len = size;
+
+ mutex_lock(&host_ui->ui_mutex);
+ host->i_ctime = ubifs_current_time(host);
+ host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
+ host_ui->xattr_size += CALC_XATTR_BYTES(size);
++ ui->data_len = size;
+
+ /*
+ * It is important to write the host inode after the xattr inode
diff --git a/target/linux/generic/patches-3.3/600-netfilter_layer7_2.22.patch b/target/linux/generic/patches-3.3/600-netfilter_layer7_2.22.patch
new file mode 100644
index 000000000..f3055590a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/600-netfilter_layer7_2.22.patch
@@ -0,0 +1,2142 @@
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -1053,6 +1053,27 @@ config NETFILTER_XT_MATCH_STATE
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config NETFILTER_XT_MATCH_LAYER7
++ tristate '"layer7" match support'
++ depends on NETFILTER_XTABLES
++ depends on EXPERIMENTAL && (IP_NF_CONNTRACK || NF_CONNTRACK)
++ depends on NETFILTER_ADVANCED
++ help
++ Say Y if you want to be able to classify connections (and their
++ packets) based on regular expression matching of their application
++ layer data. This is one way to classify applications such as
++ peer-to-peer filesharing systems that do not always use the same
++ port.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config NETFILTER_XT_MATCH_LAYER7_DEBUG
++ bool 'Layer 7 debugging output'
++ depends on NETFILTER_XT_MATCH_LAYER7
++ help
++ Say Y to get lots of debugging output.
++
++
+ config NETFILTER_XT_MATCH_STATISTIC
+ tristate '"statistic" match support'
+ depends on NETFILTER_ADVANCED
+--- a/net/netfilter/Makefile
++++ b/net/netfilter/Makefile
+@@ -105,6 +105,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT)
+ obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_SOCKET) += xt_socket.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o
++obj-$(CONFIG_NETFILTER_XT_MATCH_LAYER7) += xt_layer7.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
+--- /dev/null
++++ b/net/netfilter/xt_layer7.c
+@@ -0,0 +1,666 @@
++/*
++ Kernel module to match application layer (OSI layer 7) data in connections.
++
++ http://l7-filter.sf.net
++
++ (C) 2003-2009 Matthew Strait and Ethan Sommer.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License
++ as published by the Free Software Foundation; either version
++ 2 of the License, or (at your option) any later version.
++ http://www.gnu.org/licenses/gpl.txt
++
++ Based on ipt_string.c (C) 2000 Emmanuel Roger <winfield@freegates.be>,
++ xt_helper.c (C) 2002 Harald Welte and cls_layer7.c (C) 2003 Matthew Strait,
++ Ethan Sommer, Justin Levandoski.
++*/
++
++#include <linux/spinlock.h>
++#include <linux/version.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_conntrack.h>
++#include <net/netfilter/nf_conntrack_core.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++#include <net/netfilter/nf_conntrack_extend.h>
++#include <net/netfilter/nf_conntrack_acct.h>
++#endif
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_layer7.h>
++#include <linux/ctype.h>
++#include <linux/proc_fs.h>
++
++#include "regexp/regexp.c"
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>");
++MODULE_DESCRIPTION("iptables application layer match module");
++MODULE_ALIAS("ipt_layer7");
++MODULE_VERSION("2.21");
++
++static int maxdatalen = 2048; // this is the default
++module_param(maxdatalen, int, 0444);
++MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter");
++#ifdef CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG
++ #define DPRINTK(format,args...) printk(format,##args)
++#else
++ #define DPRINTK(format,args...)
++#endif
++
++/* Number of packets whose data we look at.
++This can be modified through /proc/net/layer7_numpackets */
++static int num_packets = 10;
++
++static struct pattern_cache {
++ char * regex_string;
++ regexp * pattern;
++ struct pattern_cache * next;
++} * first_pattern_cache = NULL;
++
++DEFINE_SPINLOCK(l7_lock);
++
++static int total_acct_packets(struct nf_conn *ct)
++{
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 26)
++ BUG_ON(ct == NULL);
++ return (ct->counters[IP_CT_DIR_ORIGINAL].packets + ct->counters[IP_CT_DIR_REPLY].packets);
++#else
++ struct nf_conn_counter *acct;
++
++ BUG_ON(ct == NULL);
++ acct = nf_conn_acct_find(ct);
++ if (!acct)
++ return 0;
++ return (atomic64_read(&acct[IP_CT_DIR_ORIGINAL].packets) + atomic64_read(&acct[IP_CT_DIR_REPLY].packets));
++#endif
++}
++
++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++/* Converts an unfriendly string into a friendly one by
++replacing unprintables with periods and all whitespace with " ". */
++static char * friendly_print(unsigned char * s)
++{
++ char * f = kmalloc(strlen(s) + 1, GFP_ATOMIC);
++ int i;
++
++ if(!f) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "friendly_print, bailing.\n");
++ return NULL;
++ }
++
++ for(i = 0; i < strlen(s); i++){
++ if(isprint(s[i]) && s[i] < 128) f[i] = s[i];
++ else if(isspace(s[i])) f[i] = ' ';
++ else f[i] = '.';
++ }
++ f[i] = '\0';
++ return f;
++}
++
++static char dec2hex(int i)
++{
++ switch (i) {
++ case 0 ... 9:
++ return (i + '0');
++ break;
++ case 10 ... 15:
++ return (i - 10 + 'a');
++ break;
++ default:
++ if (net_ratelimit())
++ printk("layer7: Problem in dec2hex\n");
++ return '\0';
++ }
++}
++
++static char * hex_print(unsigned char * s)
++{
++ char * g = kmalloc(strlen(s)*3 + 1, GFP_ATOMIC);
++ int i;
++
++ if(!g) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in hex_print, "
++ "bailing.\n");
++ return NULL;
++ }
++
++ for(i = 0; i < strlen(s); i++) {
++ g[i*3 ] = dec2hex(s[i]/16);
++ g[i*3 + 1] = dec2hex(s[i]%16);
++ g[i*3 + 2] = ' ';
++ }
++ g[i*3] = '\0';
++
++ return g;
++}
++#endif // DEBUG
++
++/* Use instead of regcomp. As we expect to be seeing the same regexps over and
++over again, it make sense to cache the results. */
++static regexp * compile_and_cache(const char * regex_string,
++ const char * protocol)
++{
++ struct pattern_cache * node = first_pattern_cache;
++ struct pattern_cache * last_pattern_cache = first_pattern_cache;
++ struct pattern_cache * tmp;
++ unsigned int len;
++
++ while (node != NULL) {
++ if (!strcmp(node->regex_string, regex_string))
++ return node->pattern;
++
++ last_pattern_cache = node;/* points at the last non-NULL node */
++ node = node->next;
++ }
++
++ /* If we reach the end of the list, then we have not yet cached
++ the pattern for this regex. Let's do that now.
++ Be paranoid about running out of memory to avoid list corruption. */
++ tmp = kmalloc(sizeof(struct pattern_cache), GFP_ATOMIC);
++
++ if(!tmp) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "compile_and_cache, bailing.\n");
++ return NULL;
++ }
++
++ tmp->regex_string = kmalloc(strlen(regex_string) + 1, GFP_ATOMIC);
++ tmp->pattern = kmalloc(sizeof(struct regexp), GFP_ATOMIC);
++ tmp->next = NULL;
++
++ if(!tmp->regex_string || !tmp->pattern) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "compile_and_cache, bailing.\n");
++ kfree(tmp->regex_string);
++ kfree(tmp->pattern);
++ kfree(tmp);
++ return NULL;
++ }
++
++ /* Ok. The new node is all ready now. */
++ node = tmp;
++
++ if(first_pattern_cache == NULL) /* list is empty */
++ first_pattern_cache = node; /* make node the beginning */
++ else
++ last_pattern_cache->next = node; /* attach node to the end */
++
++ /* copy the string and compile the regex */
++ len = strlen(regex_string);
++ DPRINTK("About to compile this: \"%s\"\n", regex_string);
++ node->pattern = regcomp((char *)regex_string, &len);
++ if ( !node->pattern ) {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: Error compiling regexp "
++ "\"%s\" (%s)\n",
++ regex_string, protocol);
++ /* pattern is now cached as NULL, so we won't try again. */
++ }
++
++ strcpy(node->regex_string, regex_string);
++ return node->pattern;
++}
++
++static int can_handle(const struct sk_buff *skb)
++{
++ if(!ip_hdr(skb)) /* not IP */
++ return 0;
++ if(ip_hdr(skb)->protocol != IPPROTO_TCP &&
++ ip_hdr(skb)->protocol != IPPROTO_UDP &&
++ ip_hdr(skb)->protocol != IPPROTO_ICMP)
++ return 0;
++ return 1;
++}
++
++/* Returns offset the into the skb->data that the application data starts */
++static int app_data_offset(const struct sk_buff *skb)
++{
++ /* In case we are ported somewhere (ebtables?) where ip_hdr(skb)
++ isn't set, this can be gotten from 4*(skb->data[0] & 0x0f) as well. */
++ int ip_hl = 4*ip_hdr(skb)->ihl;
++
++ if( ip_hdr(skb)->protocol == IPPROTO_TCP ) {
++ /* 12 == offset into TCP header for the header length field.
++ Can't get this with skb->h.th->doff because the tcphdr
++ struct doesn't get set when routing (this is confirmed to be
++ true in Netfilter as well as QoS.) */
++ int tcp_hl = 4*(skb->data[ip_hl + 12] >> 4);
++
++ return ip_hl + tcp_hl;
++ } else if( ip_hdr(skb)->protocol == IPPROTO_UDP ) {
++ return ip_hl + 8; /* UDP header is always 8 bytes */
++ } else if( ip_hdr(skb)->protocol == IPPROTO_ICMP ) {
++ return ip_hl + 8; /* ICMP header is 8 bytes */
++ } else {
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: tried to handle unknown "
++ "protocol!\n");
++ return ip_hl + 8; /* something reasonable */
++ }
++}
++
++/* handles whether there's a match when we aren't appending data anymore */
++static int match_no_append(struct nf_conn * conntrack,
++ struct nf_conn * master_conntrack,
++ enum ip_conntrack_info ctinfo,
++ enum ip_conntrack_info master_ctinfo,
++ const struct xt_layer7_info * info)
++{
++ /* If we're in here, throw the app data away */
++ if(master_conntrack->layer7.app_data != NULL) {
++
++ #ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++ if(!master_conntrack->layer7.app_proto) {
++ char * f =
++ friendly_print(master_conntrack->layer7.app_data);
++ char * g =
++ hex_print(master_conntrack->layer7.app_data);
++ DPRINTK("\nl7-filter gave up after %d bytes "
++ "(%d packets):\n%s\n",
++ strlen(f), total_acct_packets(master_conntrack), f);
++ kfree(f);
++ DPRINTK("In hex: %s\n", g);
++ kfree(g);
++ }
++ #endif
++
++ kfree(master_conntrack->layer7.app_data);
++ master_conntrack->layer7.app_data = NULL; /* don't free again */
++ }
++
++ if(master_conntrack->layer7.app_proto){
++ /* Here child connections set their .app_proto (for /proc) */
++ if(!conntrack->layer7.app_proto) {
++ conntrack->layer7.app_proto =
++ kmalloc(strlen(master_conntrack->layer7.app_proto)+1,
++ GFP_ATOMIC);
++ if(!conntrack->layer7.app_proto){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory "
++ "in match_no_append, "
++ "bailing.\n");
++ return 1;
++ }
++ strcpy(conntrack->layer7.app_proto,
++ master_conntrack->layer7.app_proto);
++ }
++
++ return (!strcmp(master_conntrack->layer7.app_proto,
++ info->protocol));
++ }
++ else {
++ /* If not classified, set to "unknown" to distinguish from
++ connections that are still being tested. */
++ master_conntrack->layer7.app_proto =
++ kmalloc(strlen("unknown")+1, GFP_ATOMIC);
++ if(!master_conntrack->layer7.app_proto){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "match_no_append, bailing.\n");
++ return 1;
++ }
++ strcpy(master_conntrack->layer7.app_proto, "unknown");
++ return 0;
++ }
++}
++
++/* add the new app data to the conntrack. Return number of bytes added. */
++static int add_data(struct nf_conn * master_conntrack,
++ char * app_data, int appdatalen)
++{
++ int length = 0, i;
++ int oldlength = master_conntrack->layer7.app_data_len;
++
++ /* This is a fix for a race condition by Deti Fliegl. However, I'm not
++ clear on whether the race condition exists or whether this really
++ fixes it. I might just be being dense... Anyway, if it's not really
++ a fix, all it does is waste a very small amount of time. */
++ if(!master_conntrack->layer7.app_data) return 0;
++
++ /* Strip nulls. Make everything lower case (our regex lib doesn't
++ do case insensitivity). Add it to the end of the current data. */
++ for(i = 0; i < maxdatalen-oldlength-1 &&
++ i < appdatalen; i++) {
++ if(app_data[i] != '\0') {
++ /* the kernel version of tolower mungs 'upper ascii' */
++ master_conntrack->layer7.app_data[length+oldlength] =
++ isascii(app_data[i])?
++ tolower(app_data[i]) : app_data[i];
++ length++;
++ }
++ }
++
++ master_conntrack->layer7.app_data[length+oldlength] = '\0';
++ master_conntrack->layer7.app_data_len = length + oldlength;
++
++ return length;
++}
++
++/* taken from drivers/video/modedb.c */
++static int my_atoi(const char *s)
++{
++ int val = 0;
++
++ for (;; s++) {
++ switch (*s) {
++ case '0'...'9':
++ val = 10*val+(*s-'0');
++ break;
++ default:
++ return val;
++ }
++ }
++}
++
++/* write out num_packets to userland. */
++static int layer7_read_proc(char* page, char ** start, off_t off, int count,
++ int* eof, void * data)
++{
++ if(num_packets > 99 && net_ratelimit())
++ printk(KERN_ERR "layer7: NOT REACHED. num_packets too big\n");
++
++ page[0] = num_packets/10 + '0';
++ page[1] = num_packets%10 + '0';
++ page[2] = '\n';
++ page[3] = '\0';
++
++ *eof=1;
++
++ return 3;
++}
++
++/* Read in num_packets from userland */
++static int layer7_write_proc(struct file* file, const char* buffer,
++ unsigned long count, void *data)
++{
++ char * foo = kmalloc(count, GFP_ATOMIC);
++
++ if(!foo){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory, bailing. "
++ "num_packets unchanged.\n");
++ return count;
++ }
++
++ if(copy_from_user(foo, buffer, count)) {
++ return -EFAULT;
++ }
++
++
++ num_packets = my_atoi(foo);
++ kfree (foo);
++
++ /* This has an arbitrary limit to make the math easier. I'm lazy.
++ But anyway, 99 is a LOT! If you want more, you're doing it wrong! */
++ if(num_packets > 99) {
++ printk(KERN_WARNING "layer7: num_packets can't be > 99.\n");
++ num_packets = 99;
++ } else if(num_packets < 1) {
++ printk(KERN_WARNING "layer7: num_packets can't be < 1.\n");
++ num_packets = 1;
++ }
++
++ return count;
++}
++
++static bool
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++match(const struct sk_buff *skbin, const struct xt_match_param *par)
++#else
++match(const struct sk_buff *skbin,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct xt_match *match,
++ const void *matchinfo,
++ int offset,
++ unsigned int protoff,
++ bool *hotdrop)
++#endif
++{
++ /* sidestep const without getting a compiler warning... */
++ struct sk_buff * skb = (struct sk_buff *)skbin;
++
++ const struct xt_layer7_info * info =
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ par->matchinfo;
++ #else
++ matchinfo;
++ #endif
++
++ enum ip_conntrack_info master_ctinfo, ctinfo;
++ struct nf_conn *master_conntrack, *conntrack;
++ unsigned char * app_data;
++ unsigned int pattern_result, appdatalen;
++ regexp * comppattern;
++
++ /* Be paranoid/incompetent - lock the entire match function. */
++ spin_lock_bh(&l7_lock);
++
++ if(!can_handle(skb)){
++ DPRINTK("layer7: This is some protocol I can't handle.\n");
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++
++ /* Treat parent & all its children together as one connection, except
++ for the purpose of setting conntrack->layer7.app_proto in the actual
++ connection. This makes /proc/net/ip_conntrack more satisfying. */
++ if(!(conntrack = nf_ct_get(skb, &ctinfo)) ||
++ !(master_conntrack=nf_ct_get(skb,&master_ctinfo))){
++ DPRINTK("layer7: couldn't get conntrack.\n");
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++
++ /* Try to get a master conntrack (and its master etc) for FTP, etc. */
++ while (master_ct(master_conntrack) != NULL)
++ master_conntrack = master_ct(master_conntrack);
++
++ /* if we've classified it or seen too many packets */
++ if(total_acct_packets(master_conntrack) > num_packets ||
++ master_conntrack->layer7.app_proto) {
++
++ pattern_result = match_no_append(conntrack, master_conntrack,
++ ctinfo, master_ctinfo, info);
++
++ /* skb->cb[0] == seen. Don't do things twice if there are
++ multiple l7 rules. I'm not sure that using cb for this purpose
++ is correct, even though it says "put your private variables
++ there". But it doesn't look like it is being used for anything
++ else in the skbs that make it here. */
++ skb->cb[0] = 1; /* marking it seen here's probably irrelevant */
++
++ spin_unlock_bh(&l7_lock);
++ return (pattern_result ^ info->invert);
++ }
++
++ if(skb_is_nonlinear(skb)){
++ if(skb_linearize(skb) != 0){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: failed to linearize "
++ "packet, bailing.\n");
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++ }
++
++ /* now that the skb is linearized, it's safe to set these. */
++ app_data = skb->data + app_data_offset(skb);
++ appdatalen = skb_tail_pointer(skb) - app_data;
++
++ /* the return value gets checked later, when we're ready to use it */
++ comppattern = compile_and_cache(info->pattern, info->protocol);
++
++ /* On the first packet of a connection, allocate space for app data */
++ if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] &&
++ !master_conntrack->layer7.app_data){
++ master_conntrack->layer7.app_data =
++ kmalloc(maxdatalen, GFP_ATOMIC);
++ if(!master_conntrack->layer7.app_data){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "match, bailing.\n");
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++
++ master_conntrack->layer7.app_data[0] = '\0';
++ }
++
++ /* Can be here, but unallocated, if numpackets is increased near
++ the beginning of a connection */
++ if(master_conntrack->layer7.app_data == NULL){
++ spin_unlock_bh(&l7_lock);
++ return info->invert; /* unmatched */
++ }
++
++ if(!skb->cb[0]){
++ int newbytes;
++ newbytes = add_data(master_conntrack, app_data, appdatalen);
++
++ if(newbytes == 0) { /* didn't add any data */
++ skb->cb[0] = 1;
++ /* Didn't match before, not going to match now */
++ spin_unlock_bh(&l7_lock);
++ return info->invert;
++ }
++ }
++
++ /* If looking for "unknown", then never match. "Unknown" means that
++ we've given up; we're still trying with these packets. */
++ if(!strcmp(info->protocol, "unknown")) {
++ pattern_result = 0;
++ /* If looking for "unset", then always match. "Unset" means that we
++ haven't yet classified the connection. */
++ } else if(!strcmp(info->protocol, "unset")) {
++ pattern_result = 2;
++ DPRINTK("layer7: matched unset: not yet classified "
++ "(%d/%d packets)\n",
++ total_acct_packets(master_conntrack), num_packets);
++ /* If the regexp failed to compile, don't bother running it */
++ } else if(comppattern &&
++ regexec(comppattern, master_conntrack->layer7.app_data)){
++ DPRINTK("layer7: matched %s\n", info->protocol);
++ pattern_result = 1;
++ } else pattern_result = 0;
++
++ if(pattern_result == 1) {
++ master_conntrack->layer7.app_proto =
++ kmalloc(strlen(info->protocol)+1, GFP_ATOMIC);
++ if(!master_conntrack->layer7.app_proto){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in "
++ "match, bailing.\n");
++ spin_unlock_bh(&l7_lock);
++ return (pattern_result ^ info->invert);
++ }
++ strcpy(master_conntrack->layer7.app_proto, info->protocol);
++ } else if(pattern_result > 1) { /* cleanup from "unset" */
++ pattern_result = 1;
++ }
++
++ /* mark the packet seen */
++ skb->cb[0] = 1;
++
++ spin_unlock_bh(&l7_lock);
++ return (pattern_result ^ info->invert);
++}
++
++// load nf_conntrack_ipv4
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++static bool check(const struct xt_mtchk_param *par)
++{
++ if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
++ printk(KERN_WARNING "can't load conntrack support for "
++ "proto=%d\n", par->match->family);
++#else
++static bool check(const char *tablename, const void *inf,
++ const struct xt_match *match, void *matchinfo,
++ unsigned int hook_mask)
++{
++ if (nf_ct_l3proto_try_module_get(match->family) < 0) {
++ printk(KERN_WARNING "can't load conntrack support for "
++ "proto=%d\n", match->family);
++#endif
++ return 0;
++ }
++ return 1;
++}
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ static void destroy(const struct xt_mtdtor_param *par)
++ {
++ nf_ct_l3proto_module_put(par->match->family);
++ }
++#else
++ static void destroy(const struct xt_match *match, void *matchinfo)
++ {
++ nf_ct_l3proto_module_put(match->family);
++ }
++#endif
++
++static struct xt_match xt_layer7_match[] __read_mostly = {
++{
++ .name = "layer7",
++ .family = AF_INET,
++ .checkentry = check,
++ .match = match,
++ .destroy = destroy,
++ .matchsize = sizeof(struct xt_layer7_info),
++ .me = THIS_MODULE
++}
++};
++
++static void layer7_cleanup_proc(void)
++{
++ remove_proc_entry("layer7_numpackets", init_net.proc_net);
++}
++
++/* register the proc file */
++static void layer7_init_proc(void)
++{
++ struct proc_dir_entry* entry;
++ entry = create_proc_entry("layer7_numpackets", 0644, init_net.proc_net);
++ entry->read_proc = layer7_read_proc;
++ entry->write_proc = layer7_write_proc;
++}
++
++static int __init xt_layer7_init(void)
++{
++ need_conntrack();
++
++ layer7_init_proc();
++ if(maxdatalen < 1) {
++ printk(KERN_WARNING "layer7: maxdatalen can't be < 1, "
++ "using 1\n");
++ maxdatalen = 1;
++ }
++ /* This is not a hard limit. It's just here to prevent people from
++ bringing their slow machines to a grinding halt. */
++ else if(maxdatalen > 65536) {
++ printk(KERN_WARNING "layer7: maxdatalen can't be > 65536, "
++ "using 65536\n");
++ maxdatalen = 65536;
++ }
++ return xt_register_matches(xt_layer7_match,
++ ARRAY_SIZE(xt_layer7_match));
++}
++
++static void __exit xt_layer7_fini(void)
++{
++ layer7_cleanup_proc();
++ xt_unregister_matches(xt_layer7_match, ARRAY_SIZE(xt_layer7_match));
++}
++
++module_init(xt_layer7_init);
++module_exit(xt_layer7_fini);
+--- /dev/null
++++ b/net/netfilter/regexp/regexp.c
+@@ -0,0 +1,1197 @@
++/*
++ * regcomp and regexec -- regsub and regerror are elsewhere
++ * @(#)regexp.c 1.3 of 18 April 87
++ *
++ * Copyright (c) 1986 by University of Toronto.
++ * Written by Henry Spencer. Not derived from licensed software.
++ *
++ * Permission is granted to anyone to use this software for any
++ * purpose on any computer system, and to redistribute it freely,
++ * subject to the following restrictions:
++ *
++ * 1. The author is not responsible for the consequences of use of
++ * this software, no matter how awful, even if they arise
++ * from defects in it.
++ *
++ * 2. The origin of this software must not be misrepresented, either
++ * by explicit claim or by omission.
++ *
++ * 3. Altered versions must be plainly marked as such, and must not
++ * be misrepresented as being the original software.
++ *
++ * Beware that some of this code is subtly aware of the way operator
++ * precedence is structured in regular expressions. Serious changes in
++ * regular-expression syntax might require a total rethink.
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ * Modified slightly by Matthew Strait to use more modern C.
++ */
++
++#include "regexp.h"
++#include "regmagic.h"
++
++/* added by ethan and matt. Lets it work in both kernel and user space.
++(So iptables can use it, for instance.) Yea, it goes both ways... */
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++#else
++ #define printk(format,args...) printf(format,##args)
++#endif
++
++void regerror(char * s)
++{
++ printk("<3>Regexp: %s\n", s);
++ /* NOTREACHED */
++}
++
++/*
++ * The "internal use only" fields in regexp.h are present to pass info from
++ * compile to execute that permits the execute phase to run lots faster on
++ * simple cases. They are:
++ *
++ * regstart char that must begin a match; '\0' if none obvious
++ * reganch is the match anchored (at beginning-of-line only)?
++ * regmust string (pointer into program) that match must include, or NULL
++ * regmlen length of regmust string
++ *
++ * Regstart and reganch permit very fast decisions on suitable starting points
++ * for a match, cutting down the work a lot. Regmust permits fast rejection
++ * of lines that cannot possibly match. The regmust tests are costly enough
++ * that regcomp() supplies a regmust only if the r.e. contains something
++ * potentially expensive (at present, the only such thing detected is * or +
++ * at the start of the r.e., which can involve a lot of backup). Regmlen is
++ * supplied because the test in regexec() needs it and regcomp() is computing
++ * it anyway.
++ */
++
++/*
++ * Structure for regexp "program". This is essentially a linear encoding
++ * of a nondeterministic finite-state machine (aka syntax charts or
++ * "railroad normal form" in parsing technology). Each node is an opcode
++ * plus a "next" pointer, possibly plus an operand. "Next" pointers of
++ * all nodes except BRANCH implement concatenation; a "next" pointer with
++ * a BRANCH on both ends of it is connecting two alternatives. (Here we
++ * have one of the subtle syntax dependencies: an individual BRANCH (as
++ * opposed to a collection of them) is never concatenated with anything
++ * because of operator precedence.) The operand of some types of node is
++ * a literal string; for others, it is a node leading into a sub-FSM. In
++ * particular, the operand of a BRANCH node is the first node of the branch.
++ * (NB this is *not* a tree structure: the tail of the branch connects
++ * to the thing following the set of BRANCHes.) The opcodes are:
++ */
++
++/* definition number opnd? meaning */
++#define END 0 /* no End of program. */
++#define BOL 1 /* no Match "" at beginning of line. */
++#define EOL 2 /* no Match "" at end of line. */
++#define ANY 3 /* no Match any one character. */
++#define ANYOF 4 /* str Match any character in this string. */
++#define ANYBUT 5 /* str Match any character not in this string. */
++#define BRANCH 6 /* node Match this alternative, or the next... */
++#define BACK 7 /* no Match "", "next" ptr points backward. */
++#define EXACTLY 8 /* str Match this string. */
++#define NOTHING 9 /* no Match empty string. */
++#define STAR 10 /* node Match this (simple) thing 0 or more times. */
++#define PLUS 11 /* node Match this (simple) thing 1 or more times. */
++#define OPEN 20 /* no Mark this point in input as start of #n. */
++ /* OPEN+1 is number 1, etc. */
++#define CLOSE 30 /* no Analogous to OPEN. */
++
++/*
++ * Opcode notes:
++ *
++ * BRANCH The set of branches constituting a single choice are hooked
++ * together with their "next" pointers, since precedence prevents
++ * anything being concatenated to any individual branch. The
++ * "next" pointer of the last BRANCH in a choice points to the
++ * thing following the whole choice. This is also where the
++ * final "next" pointer of each individual branch points; each
++ * branch starts with the operand node of a BRANCH node.
++ *
++ * BACK Normal "next" pointers all implicitly point forward; BACK
++ * exists to make loop structures possible.
++ *
++ * STAR,PLUS '?', and complex '*' and '+', are implemented as circular
++ * BRANCH structures using BACK. Simple cases (one character
++ * per match) are implemented with STAR and PLUS for speed
++ * and to minimize recursive plunges.
++ *
++ * OPEN,CLOSE ...are numbered at compile time.
++ */
++
++/*
++ * A node is one char of opcode followed by two chars of "next" pointer.
++ * "Next" pointers are stored as two 8-bit pieces, high order first. The
++ * value is a positive offset from the opcode of the node containing it.
++ * An operand, if any, simply follows the node. (Note that much of the
++ * code generation knows about this implicit relationship.)
++ *
++ * Using two bytes for the "next" pointer is vast overkill for most things,
++ * but allows patterns to get big without disasters.
++ */
++#define OP(p) (*(p))
++#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
++#define OPERAND(p) ((p) + 3)
++
++/*
++ * See regmagic.h for one further detail of program structure.
++ */
++
++
++/*
++ * Utility definitions.
++ */
++#ifndef CHARBITS
++#define UCHARAT(p) ((int)*(unsigned char *)(p))
++#else
++#define UCHARAT(p) ((int)*(p)&CHARBITS)
++#endif
++
++#define FAIL(m) { regerror(m); return(NULL); }
++#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?')
++#define META "^$.[()|?+*\\"
++
++/*
++ * Flags to be passed up and down.
++ */
++#define HASWIDTH 01 /* Known never to match null string. */
++#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */
++#define SPSTART 04 /* Starts with * or +. */
++#define WORST 0 /* Worst case. */
++
++/*
++ * Global work variables for regcomp().
++ */
++struct match_globals {
++char *reginput; /* String-input pointer. */
++char *regbol; /* Beginning of input, for ^ check. */
++char **regstartp; /* Pointer to startp array. */
++char **regendp; /* Ditto for endp. */
++char *regparse; /* Input-scan pointer. */
++int regnpar; /* () count. */
++char regdummy;
++char *regcode; /* Code-emit pointer; &regdummy = don't. */
++long regsize; /* Code size. */
++};
++
++/*
++ * Forward declarations for regcomp()'s friends.
++ */
++#ifndef STATIC
++#define STATIC static
++#endif
++STATIC char *reg(struct match_globals *g, int paren,int *flagp);
++STATIC char *regbranch(struct match_globals *g, int *flagp);
++STATIC char *regpiece(struct match_globals *g, int *flagp);
++STATIC char *regatom(struct match_globals *g, int *flagp);
++STATIC char *regnode(struct match_globals *g, char op);
++STATIC char *regnext(struct match_globals *g, char *p);
++STATIC void regc(struct match_globals *g, char b);
++STATIC void reginsert(struct match_globals *g, char op, char *opnd);
++STATIC void regtail(struct match_globals *g, char *p, char *val);
++STATIC void regoptail(struct match_globals *g, char *p, char *val);
++
++
++__kernel_size_t my_strcspn(const char *s1,const char *s2)
++{
++ char *scan1;
++ char *scan2;
++ int count;
++
++ count = 0;
++ for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
++ for (scan2 = (char *)s2; *scan2 != '\0';) /* ++ moved down. */
++ if (*scan1 == *scan2++)
++ return(count);
++ count++;
++ }
++ return(count);
++}
++
++/*
++ - regcomp - compile a regular expression into internal code
++ *
++ * We can't allocate space until we know how big the compiled form will be,
++ * but we can't compile it (and thus know how big it is) until we've got a
++ * place to put the code. So we cheat: we compile it twice, once with code
++ * generation turned off and size counting turned on, and once "for real".
++ * This also means that we don't allocate space until we are sure that the
++ * thing really will compile successfully, and we never have to move the
++ * code and thus invalidate pointers into it. (Note that it has to be in
++ * one piece because free() must be able to free it all.)
++ *
++ * Beware that the optimization-preparation code in here knows about some
++ * of the structure of the compiled regexp.
++ */
++regexp *
++regcomp(char *exp,int *patternsize)
++{
++ register regexp *r;
++ register char *scan;
++ register char *longest;
++ register int len;
++ int flags;
++ struct match_globals g;
++
++ /* commented out by ethan
++ extern char *malloc();
++ */
++
++ if (exp == NULL)
++ FAIL("NULL argument");
++
++ /* First pass: determine size, legality. */
++ g.regparse = exp;
++ g.regnpar = 1;
++ g.regsize = 0L;
++ g.regcode = &g.regdummy;
++ regc(&g, MAGIC);
++ if (reg(&g, 0, &flags) == NULL)
++ return(NULL);
++
++ /* Small enough for pointer-storage convention? */
++ if (g.regsize >= 32767L) /* Probably could be 65535L. */
++ FAIL("regexp too big");
++
++ /* Allocate space. */
++ *patternsize=sizeof(regexp) + (unsigned)g.regsize;
++ r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
++ if (r == NULL)
++ FAIL("out of space");
++
++ /* Second pass: emit code. */
++ g.regparse = exp;
++ g.regnpar = 1;
++ g.regcode = r->program;
++ regc(&g, MAGIC);
++ if (reg(&g, 0, &flags) == NULL)
++ return(NULL);
++
++ /* Dig out information for optimizations. */
++ r->regstart = '\0'; /* Worst-case defaults. */
++ r->reganch = 0;
++ r->regmust = NULL;
++ r->regmlen = 0;
++ scan = r->program+1; /* First BRANCH. */
++ if (OP(regnext(&g, scan)) == END) { /* Only one top-level choice. */
++ scan = OPERAND(scan);
++
++ /* Starting-point info. */
++ if (OP(scan) == EXACTLY)
++ r->regstart = *OPERAND(scan);
++ else if (OP(scan) == BOL)
++ r->reganch++;
++
++ /*
++ * If there's something expensive in the r.e., find the
++ * longest literal string that must appear and make it the
++ * regmust. Resolve ties in favor of later strings, since
++ * the regstart check works with the beginning of the r.e.
++ * and avoiding duplication strengthens checking. Not a
++ * strong reason, but sufficient in the absence of others.
++ */
++ if (flags&SPSTART) {
++ longest = NULL;
++ len = 0;
++ for (; scan != NULL; scan = regnext(&g, scan))
++ if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
++ longest = OPERAND(scan);
++ len = strlen(OPERAND(scan));
++ }
++ r->regmust = longest;
++ r->regmlen = len;
++ }
++ }
++
++ return(r);
++}
++
++/*
++ - reg - regular expression, i.e. main body or parenthesized thing
++ *
++ * Caller must absorb opening parenthesis.
++ *
++ * Combining parenthesis handling with the base level of regular expression
++ * is a trifle forced, but the need to tie the tails of the branches to what
++ * follows makes it hard to avoid.
++ */
++static char *
++reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
++{
++ register char *ret;
++ register char *br;
++ register char *ender;
++ register int parno = 0; /* 0 makes gcc happy */
++ int flags;
++
++ *flagp = HASWIDTH; /* Tentatively. */
++
++ /* Make an OPEN node, if parenthesized. */
++ if (paren) {
++ if (g->regnpar >= NSUBEXP)
++ FAIL("too many ()");
++ parno = g->regnpar;
++ g->regnpar++;
++ ret = regnode(g, OPEN+parno);
++ } else
++ ret = NULL;
++
++ /* Pick up the branches, linking them together. */
++ br = regbranch(g, &flags);
++ if (br == NULL)
++ return(NULL);
++ if (ret != NULL)
++ regtail(g, ret, br); /* OPEN -> first. */
++ else
++ ret = br;
++ if (!(flags&HASWIDTH))
++ *flagp &= ~HASWIDTH;
++ *flagp |= flags&SPSTART;
++ while (*g->regparse == '|') {
++ g->regparse++;
++ br = regbranch(g, &flags);
++ if (br == NULL)
++ return(NULL);
++ regtail(g, ret, br); /* BRANCH -> BRANCH. */
++ if (!(flags&HASWIDTH))
++ *flagp &= ~HASWIDTH;
++ *flagp |= flags&SPSTART;
++ }
++
++ /* Make a closing node, and hook it on the end. */
++ ender = regnode(g, (paren) ? CLOSE+parno : END);
++ regtail(g, ret, ender);
++
++ /* Hook the tails of the branches to the closing node. */
++ for (br = ret; br != NULL; br = regnext(g, br))
++ regoptail(g, br, ender);
++
++ /* Check for proper termination. */
++ if (paren && *g->regparse++ != ')') {
++ FAIL("unmatched ()");
++ } else if (!paren && *g->regparse != '\0') {
++ if (*g->regparse == ')') {
++ FAIL("unmatched ()");
++ } else
++ FAIL("junk on end"); /* "Can't happen". */
++ /* NOTREACHED */
++ }
++
++ return(ret);
++}
++
++/*
++ - regbranch - one alternative of an | operator
++ *
++ * Implements the concatenation operator.
++ */
++static char *
++regbranch(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ register char *chain;
++ register char *latest;
++ int flags;
++
++ *flagp = WORST; /* Tentatively. */
++
++ ret = regnode(g, BRANCH);
++ chain = NULL;
++ while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
++ latest = regpiece(g, &flags);
++ if (latest == NULL)
++ return(NULL);
++ *flagp |= flags&HASWIDTH;
++ if (chain == NULL) /* First piece. */
++ *flagp |= flags&SPSTART;
++ else
++ regtail(g, chain, latest);
++ chain = latest;
++ }
++ if (chain == NULL) /* Loop ran zero times. */
++ (void) regnode(g, NOTHING);
++
++ return(ret);
++}
++
++/*
++ - regpiece - something followed by possible [*+?]
++ *
++ * Note that the branching code sequences used for ? and the general cases
++ * of * and + are somewhat optimized: they use the same NOTHING node as
++ * both the endmarker for their branch list and the body of the last branch.
++ * It might seem that this node could be dispensed with entirely, but the
++ * endmarker role is not redundant.
++ */
++static char *
++regpiece(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ register char op;
++ register char *next;
++ int flags;
++
++ ret = regatom(g, &flags);
++ if (ret == NULL)
++ return(NULL);
++
++ op = *g->regparse;
++ if (!ISMULT(op)) {
++ *flagp = flags;
++ return(ret);
++ }
++
++ if (!(flags&HASWIDTH) && op != '?')
++ FAIL("*+ operand could be empty");
++ *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
++
++ if (op == '*' && (flags&SIMPLE))
++ reginsert(g, STAR, ret);
++ else if (op == '*') {
++ /* Emit x* as (x&|), where & means "self". */
++ reginsert(g, BRANCH, ret); /* Either x */
++ regoptail(g, ret, regnode(g, BACK)); /* and loop */
++ regoptail(g, ret, ret); /* back */
++ regtail(g, ret, regnode(g, BRANCH)); /* or */
++ regtail(g, ret, regnode(g, NOTHING)); /* null. */
++ } else if (op == '+' && (flags&SIMPLE))
++ reginsert(g, PLUS, ret);
++ else if (op == '+') {
++ /* Emit x+ as x(&|), where & means "self". */
++ next = regnode(g, BRANCH); /* Either */
++ regtail(g, ret, next);
++ regtail(g, regnode(g, BACK), ret); /* loop back */
++ regtail(g, next, regnode(g, BRANCH)); /* or */
++ regtail(g, ret, regnode(g, NOTHING)); /* null. */
++ } else if (op == '?') {
++ /* Emit x? as (x|) */
++ reginsert(g, BRANCH, ret); /* Either x */
++ regtail(g, ret, regnode(g, BRANCH)); /* or */
++ next = regnode(g, NOTHING); /* null. */
++ regtail(g, ret, next);
++ regoptail(g, ret, next);
++ }
++ g->regparse++;
++ if (ISMULT(*g->regparse))
++ FAIL("nested *?+");
++
++ return(ret);
++}
++
++/*
++ - regatom - the lowest level
++ *
++ * Optimization: gobbles an entire sequence of ordinary characters so that
++ * it can turn them into a single node, which is smaller to store and
++ * faster to run. Backslashed characters are exceptions, each becoming a
++ * separate node; the code is simpler that way and it's not worth fixing.
++ */
++static char *
++regatom(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ int flags;
++
++ *flagp = WORST; /* Tentatively. */
++
++ switch (*g->regparse++) {
++ case '^':
++ ret = regnode(g, BOL);
++ break;
++ case '$':
++ ret = regnode(g, EOL);
++ break;
++ case '.':
++ ret = regnode(g, ANY);
++ *flagp |= HASWIDTH|SIMPLE;
++ break;
++ case '[': {
++ register int class;
++ register int classend;
++
++ if (*g->regparse == '^') { /* Complement of range. */
++ ret = regnode(g, ANYBUT);
++ g->regparse++;
++ } else
++ ret = regnode(g, ANYOF);
++ if (*g->regparse == ']' || *g->regparse == '-')
++ regc(g, *g->regparse++);
++ while (*g->regparse != '\0' && *g->regparse != ']') {
++ if (*g->regparse == '-') {
++ g->regparse++;
++ if (*g->regparse == ']' || *g->regparse == '\0')
++ regc(g, '-');
++ else {
++ class = UCHARAT(g->regparse-2)+1;
++ classend = UCHARAT(g->regparse);
++ if (class > classend+1)
++ FAIL("invalid [] range");
++ for (; class <= classend; class++)
++ regc(g, class);
++ g->regparse++;
++ }
++ } else
++ regc(g, *g->regparse++);
++ }
++ regc(g, '\0');
++ if (*g->regparse != ']')
++ FAIL("unmatched []");
++ g->regparse++;
++ *flagp |= HASWIDTH|SIMPLE;
++ }
++ break;
++ case '(':
++ ret = reg(g, 1, &flags);
++ if (ret == NULL)
++ return(NULL);
++ *flagp |= flags&(HASWIDTH|SPSTART);
++ break;
++ case '\0':
++ case '|':
++ case ')':
++ FAIL("internal urp"); /* Supposed to be caught earlier. */
++ break;
++ case '?':
++ case '+':
++ case '*':
++ FAIL("?+* follows nothing");
++ break;
++ case '\\':
++ if (*g->regparse == '\0')
++ FAIL("trailing \\");
++ ret = regnode(g, EXACTLY);
++ regc(g, *g->regparse++);
++ regc(g, '\0');
++ *flagp |= HASWIDTH|SIMPLE;
++ break;
++ default: {
++ register int len;
++ register char ender;
++
++ g->regparse--;
++ len = my_strcspn((const char *)g->regparse, (const char *)META);
++ if (len <= 0)
++ FAIL("internal disaster");
++ ender = *(g->regparse+len);
++ if (len > 1 && ISMULT(ender))
++ len--; /* Back off clear of ?+* operand. */
++ *flagp |= HASWIDTH;
++ if (len == 1)
++ *flagp |= SIMPLE;
++ ret = regnode(g, EXACTLY);
++ while (len > 0) {
++ regc(g, *g->regparse++);
++ len--;
++ }
++ regc(g, '\0');
++ }
++ break;
++ }
++
++ return(ret);
++}
++
++/*
++ - regnode - emit a node
++ */
++static char * /* Location. */
++regnode(struct match_globals *g, char op)
++{
++ register char *ret;
++ register char *ptr;
++
++ ret = g->regcode;
++ if (ret == &g->regdummy) {
++ g->regsize += 3;
++ return(ret);
++ }
++
++ ptr = ret;
++ *ptr++ = op;
++ *ptr++ = '\0'; /* Null "next" pointer. */
++ *ptr++ = '\0';
++ g->regcode = ptr;
++
++ return(ret);
++}
++
++/*
++ - regc - emit (if appropriate) a byte of code
++ */
++static void
++regc(struct match_globals *g, char b)
++{
++ if (g->regcode != &g->regdummy)
++ *g->regcode++ = b;
++ else
++ g->regsize++;
++}
++
++/*
++ - reginsert - insert an operator in front of already-emitted operand
++ *
++ * Means relocating the operand.
++ */
++static void
++reginsert(struct match_globals *g, char op, char* opnd)
++{
++ register char *src;
++ register char *dst;
++ register char *place;
++
++ if (g->regcode == &g->regdummy) {
++ g->regsize += 3;
++ return;
++ }
++
++ src = g->regcode;
++ g->regcode += 3;
++ dst = g->regcode;
++ while (src > opnd)
++ *--dst = *--src;
++
++ place = opnd; /* Op node, where operand used to be. */
++ *place++ = op;
++ *place++ = '\0';
++ *place++ = '\0';
++}
++
++/*
++ - regtail - set the next-pointer at the end of a node chain
++ */
++static void
++regtail(struct match_globals *g, char *p, char *val)
++{
++ register char *scan;
++ register char *temp;
++ register int offset;
++
++ if (p == &g->regdummy)
++ return;
++
++ /* Find last node. */
++ scan = p;
++ for (;;) {
++ temp = regnext(g, scan);
++ if (temp == NULL)
++ break;
++ scan = temp;
++ }
++
++ if (OP(scan) == BACK)
++ offset = scan - val;
++ else
++ offset = val - scan;
++ *(scan+1) = (offset>>8)&0377;
++ *(scan+2) = offset&0377;
++}
++
++/*
++ - regoptail - regtail on operand of first argument; nop if operandless
++ */
++static void
++regoptail(struct match_globals *g, char *p, char *val)
++{
++ /* "Operandless" and "op != BRANCH" are synonymous in practice. */
++ if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
++ return;
++ regtail(g, OPERAND(p), val);
++}
++
++/*
++ * regexec and friends
++ */
++
++
++/*
++ * Forwards.
++ */
++STATIC int regtry(struct match_globals *g, regexp *prog, char *string);
++STATIC int regmatch(struct match_globals *g, char *prog);
++STATIC int regrepeat(struct match_globals *g, char *p);
++
++#ifdef DEBUG
++int regnarrate = 0;
++void regdump();
++STATIC char *regprop(char *op);
++#endif
++
++/*
++ - regexec - match a regexp against a string
++ */
++int
++regexec(regexp *prog, char *string)
++{
++ register char *s;
++ struct match_globals g;
++
++ /* Be paranoid... */
++ if (prog == NULL || string == NULL) {
++ printk("<3>Regexp: NULL parameter\n");
++ return(0);
++ }
++
++ /* Check validity of program. */
++ if (UCHARAT(prog->program) != MAGIC) {
++ printk("<3>Regexp: corrupted program\n");
++ return(0);
++ }
++
++ /* If there is a "must appear" string, look for it. */
++ if (prog->regmust != NULL) {
++ s = string;
++ while ((s = strchr(s, prog->regmust[0])) != NULL) {
++ if (strncmp(s, prog->regmust, prog->regmlen) == 0)
++ break; /* Found it. */
++ s++;
++ }
++ if (s == NULL) /* Not present. */
++ return(0);
++ }
++
++ /* Mark beginning of line for ^ . */
++ g.regbol = string;
++
++ /* Simplest case: anchored match need be tried only once. */
++ if (prog->reganch)
++ return(regtry(&g, prog, string));
++
++ /* Messy cases: unanchored match. */
++ s = string;
++ if (prog->regstart != '\0')
++ /* We know what char it must start with. */
++ while ((s = strchr(s, prog->regstart)) != NULL) {
++ if (regtry(&g, prog, s))
++ return(1);
++ s++;
++ }
++ else
++ /* We don't -- general case. */
++ do {
++ if (regtry(&g, prog, s))
++ return(1);
++ } while (*s++ != '\0');
++
++ /* Failure. */
++ return(0);
++}
++
++/*
++ - regtry - try match at specific point
++ */
++static int /* 0 failure, 1 success */
++regtry(struct match_globals *g, regexp *prog, char *string)
++{
++ register int i;
++ register char **sp;
++ register char **ep;
++
++ g->reginput = string;
++ g->regstartp = prog->startp;
++ g->regendp = prog->endp;
++
++ sp = prog->startp;
++ ep = prog->endp;
++ for (i = NSUBEXP; i > 0; i--) {
++ *sp++ = NULL;
++ *ep++ = NULL;
++ }
++ if (regmatch(g, prog->program + 1)) {
++ prog->startp[0] = string;
++ prog->endp[0] = g->reginput;
++ return(1);
++ } else
++ return(0);
++}
++
++/*
++ - regmatch - main matching routine
++ *
++ * Conceptually the strategy is simple: check to see whether the current
++ * node matches, call self recursively to see whether the rest matches,
++ * and then act accordingly. In practice we make some effort to avoid
++ * recursion, in particular by going through "ordinary" nodes (that don't
++ * need to know whether the rest of the match failed) by a loop instead of
++ * by recursion.
++ */
++static int /* 0 failure, 1 success */
++regmatch(struct match_globals *g, char *prog)
++{
++ register char *scan = prog; /* Current node. */
++ char *next; /* Next node. */
++
++#ifdef DEBUG
++ if (scan != NULL && regnarrate)
++ fprintf(stderr, "%s(\n", regprop(scan));
++#endif
++ while (scan != NULL) {
++#ifdef DEBUG
++ if (regnarrate)
++ fprintf(stderr, "%s...\n", regprop(scan));
++#endif
++ next = regnext(g, scan);
++
++ switch (OP(scan)) {
++ case BOL:
++ if (g->reginput != g->regbol)
++ return(0);
++ break;
++ case EOL:
++ if (*g->reginput != '\0')
++ return(0);
++ break;
++ case ANY:
++ if (*g->reginput == '\0')
++ return(0);
++ g->reginput++;
++ break;
++ case EXACTLY: {
++ register int len;
++ register char *opnd;
++
++ opnd = OPERAND(scan);
++ /* Inline the first character, for speed. */
++ if (*opnd != *g->reginput)
++ return(0);
++ len = strlen(opnd);
++ if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
++ return(0);
++ g->reginput += len;
++ }
++ break;
++ case ANYOF:
++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
++ return(0);
++ g->reginput++;
++ break;
++ case ANYBUT:
++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
++ return(0);
++ g->reginput++;
++ break;
++ case NOTHING:
++ case BACK:
++ break;
++ case OPEN+1:
++ case OPEN+2:
++ case OPEN+3:
++ case OPEN+4:
++ case OPEN+5:
++ case OPEN+6:
++ case OPEN+7:
++ case OPEN+8:
++ case OPEN+9: {
++ register int no;
++ register char *save;
++
++ no = OP(scan) - OPEN;
++ save = g->reginput;
++
++ if (regmatch(g, next)) {
++ /*
++ * Don't set startp if some later
++ * invocation of the same parentheses
++ * already has.
++ */
++ if (g->regstartp[no] == NULL)
++ g->regstartp[no] = save;
++ return(1);
++ } else
++ return(0);
++ }
++ break;
++ case CLOSE+1:
++ case CLOSE+2:
++ case CLOSE+3:
++ case CLOSE+4:
++ case CLOSE+5:
++ case CLOSE+6:
++ case CLOSE+7:
++ case CLOSE+8:
++ case CLOSE+9:
++ {
++ register int no;
++ register char *save;
++
++ no = OP(scan) - CLOSE;
++ save = g->reginput;
++
++ if (regmatch(g, next)) {
++ /*
++ * Don't set endp if some later
++ * invocation of the same parentheses
++ * already has.
++ */
++ if (g->regendp[no] == NULL)
++ g->regendp[no] = save;
++ return(1);
++ } else
++ return(0);
++ }
++ break;
++ case BRANCH: {
++ register char *save;
++
++ if (OP(next) != BRANCH) /* No choice. */
++ next = OPERAND(scan); /* Avoid recursion. */
++ else {
++ do {
++ save = g->reginput;
++ if (regmatch(g, OPERAND(scan)))
++ return(1);
++ g->reginput = save;
++ scan = regnext(g, scan);
++ } while (scan != NULL && OP(scan) == BRANCH);
++ return(0);
++ /* NOTREACHED */
++ }
++ }
++ break;
++ case STAR:
++ case PLUS: {
++ register char nextch;
++ register int no;
++ register char *save;
++ register int min;
++
++ /*
++ * Lookahead to avoid useless match attempts
++ * when we know what character comes next.
++ */
++ nextch = '\0';
++ if (OP(next) == EXACTLY)
++ nextch = *OPERAND(next);
++ min = (OP(scan) == STAR) ? 0 : 1;
++ save = g->reginput;
++ no = regrepeat(g, OPERAND(scan));
++ while (no >= min) {
++ /* If it could work, try it. */
++ if (nextch == '\0' || *g->reginput == nextch)
++ if (regmatch(g, next))
++ return(1);
++ /* Couldn't or didn't -- back up. */
++ no--;
++ g->reginput = save + no;
++ }
++ return(0);
++ }
++ break;
++ case END:
++ return(1); /* Success! */
++ break;
++ default:
++ printk("<3>Regexp: memory corruption\n");
++ return(0);
++ break;
++ }
++
++ scan = next;
++ }
++
++ /*
++ * We get here only if there's trouble -- normally "case END" is
++ * the terminating point.
++ */
++ printk("<3>Regexp: corrupted pointers\n");
++ return(0);
++}
++
++/*
++ - regrepeat - repeatedly match something simple, report how many
++ */
++static int
++regrepeat(struct match_globals *g, char *p)
++{
++ register int count = 0;
++ register char *scan;
++ register char *opnd;
++
++ scan = g->reginput;
++ opnd = OPERAND(p);
++ switch (OP(p)) {
++ case ANY:
++ count = strlen(scan);
++ scan += count;
++ break;
++ case EXACTLY:
++ while (*opnd == *scan) {
++ count++;
++ scan++;
++ }
++ break;
++ case ANYOF:
++ while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
++ count++;
++ scan++;
++ }
++ break;
++ case ANYBUT:
++ while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
++ count++;
++ scan++;
++ }
++ break;
++ default: /* Oh dear. Called inappropriately. */
++ printk("<3>Regexp: internal foulup\n");
++ count = 0; /* Best compromise. */
++ break;
++ }
++ g->reginput = scan;
++
++ return(count);
++}
++
++/*
++ - regnext - dig the "next" pointer out of a node
++ */
++static char*
++regnext(struct match_globals *g, char *p)
++{
++ register int offset;
++
++ if (p == &g->regdummy)
++ return(NULL);
++
++ offset = NEXT(p);
++ if (offset == 0)
++ return(NULL);
++
++ if (OP(p) == BACK)
++ return(p-offset);
++ else
++ return(p+offset);
++}
++
++#ifdef DEBUG
++
++STATIC char *regprop();
++
++/*
++ - regdump - dump a regexp onto stdout in vaguely comprehensible form
++ */
++void
++regdump(regexp *r)
++{
++ register char *s;
++ register char op = EXACTLY; /* Arbitrary non-END op. */
++ register char *next;
++ /* extern char *strchr(); */
++
++
++ s = r->program + 1;
++ while (op != END) { /* While that wasn't END last time... */
++ op = OP(s);
++ printf("%2d%s", s-r->program, regprop(s)); /* Where, what. */
++ next = regnext(s);
++ if (next == NULL) /* Next ptr. */
++ printf("(0)");
++ else
++ printf("(%d)", (s-r->program)+(next-s));
++ s += 3;
++ if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
++ /* Literal string, where present. */
++ while (*s != '\0') {
++ putchar(*s);
++ s++;
++ }
++ s++;
++ }
++ putchar('\n');
++ }
++
++ /* Header fields of interest. */
++ if (r->regstart != '\0')
++ printf("start `%c' ", r->regstart);
++ if (r->reganch)
++ printf("anchored ");
++ if (r->regmust != NULL)
++ printf("must have \"%s\"", r->regmust);
++ printf("\n");
++}
++
++/*
++ - regprop - printable representation of opcode
++ */
++static char *
++regprop(char *op)
++{
++#define BUFLEN 50
++ register char *p;
++ static char buf[BUFLEN];
++
++ strcpy(buf, ":");
++
++ switch (OP(op)) {
++ case BOL:
++ p = "BOL";
++ break;
++ case EOL:
++ p = "EOL";
++ break;
++ case ANY:
++ p = "ANY";
++ break;
++ case ANYOF:
++ p = "ANYOF";
++ break;
++ case ANYBUT:
++ p = "ANYBUT";
++ break;
++ case BRANCH:
++ p = "BRANCH";
++ break;
++ case EXACTLY:
++ p = "EXACTLY";
++ break;
++ case NOTHING:
++ p = "NOTHING";
++ break;
++ case BACK:
++ p = "BACK";
++ break;
++ case END:
++ p = "END";
++ break;
++ case OPEN+1:
++ case OPEN+2:
++ case OPEN+3:
++ case OPEN+4:
++ case OPEN+5:
++ case OPEN+6:
++ case OPEN+7:
++ case OPEN+8:
++ case OPEN+9:
++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
++ p = NULL;
++ break;
++ case CLOSE+1:
++ case CLOSE+2:
++ case CLOSE+3:
++ case CLOSE+4:
++ case CLOSE+5:
++ case CLOSE+6:
++ case CLOSE+7:
++ case CLOSE+8:
++ case CLOSE+9:
++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
++ p = NULL;
++ break;
++ case STAR:
++ p = "STAR";
++ break;
++ case PLUS:
++ p = "PLUS";
++ break;
++ default:
++ printk("<3>Regexp: corrupted opcode\n");
++ break;
++ }
++ if (p != NULL)
++ strncat(buf, p, BUFLEN-strlen(buf));
++ return(buf);
++}
++#endif
++
++
+--- /dev/null
++++ b/net/netfilter/regexp/regexp.h
+@@ -0,0 +1,41 @@
++/*
++ * Definitions etc. for regexp(3) routines.
++ *
++ * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof],
++ * not the System V one.
++ */
++
++#ifndef REGEXP_H
++#define REGEXP_H
++
++
++/*
++http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
++which contains a version of this library, says:
++
++ *
++ * NSUBEXP must be at least 10, and no greater than 117 or the parser
++ * will not work properly.
++ *
++
++However, it looks rather like this library is limited to 10. If you think
++otherwise, let us know.
++*/
++
++#define NSUBEXP 10
++typedef struct regexp {
++ char *startp[NSUBEXP];
++ char *endp[NSUBEXP];
++ char regstart; /* Internal use only. */
++ char reganch; /* Internal use only. */
++ char *regmust; /* Internal use only. */
++ int regmlen; /* Internal use only. */
++ char program[1]; /* Unwarranted chumminess with compiler. */
++} regexp;
++
++regexp * regcomp(char *exp, int *patternsize);
++int regexec(regexp *prog, char *string);
++void regsub(regexp *prog, char *source, char *dest);
++void regerror(char *s);
++
++#endif
+--- /dev/null
++++ b/net/netfilter/regexp/regmagic.h
+@@ -0,0 +1,5 @@
++/*
++ * The first byte of the regexp internal "program" is actually this magic
++ * number; the start node begins in the second byte.
++ */
++#define MAGIC 0234
+--- /dev/null
++++ b/net/netfilter/regexp/regsub.c
+@@ -0,0 +1,95 @@
++/*
++ * regsub
++ * @(#)regsub.c 1.3 of 2 April 86
++ *
++ * Copyright (c) 1986 by University of Toronto.
++ * Written by Henry Spencer. Not derived from licensed software.
++ *
++ * Permission is granted to anyone to use this software for any
++ * purpose on any computer system, and to redistribute it freely,
++ * subject to the following restrictions:
++ *
++ * 1. The author is not responsible for the consequences of use of
++ * this software, no matter how awful, even if they arise
++ * from defects in it.
++ *
++ * 2. The origin of this software must not be misrepresented, either
++ * by explicit claim or by omission.
++ *
++ * 3. Altered versions must be plainly marked as such, and must not
++ * be misrepresented as being the original software.
++ *
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ */
++#include "regexp.h"
++#include "regmagic.h"
++#include <linux/string.h>
++
++
++#ifndef CHARBITS
++#define UCHARAT(p) ((int)*(unsigned char *)(p))
++#else
++#define UCHARAT(p) ((int)*(p)&CHARBITS)
++#endif
++
++#if 0
++//void regerror(char * s)
++//{
++// printk("regexp(3): %s", s);
++// /* NOTREACHED */
++//}
++#endif
++
++/*
++ - regsub - perform substitutions after a regexp match
++ */
++void
++regsub(regexp * prog, char * source, char * dest)
++{
++ register char *src;
++ register char *dst;
++ register char c;
++ register int no;
++ register int len;
++
++ /* Not necessary and gcc doesn't like it -MLS */
++ /*extern char *strncpy();*/
++
++ if (prog == NULL || source == NULL || dest == NULL) {
++ regerror("NULL parm to regsub");
++ return;
++ }
++ if (UCHARAT(prog->program) != MAGIC) {
++ regerror("damaged regexp fed to regsub");
++ return;
++ }
++
++ src = source;
++ dst = dest;
++ while ((c = *src++) != '\0') {
++ if (c == '&')
++ no = 0;
++ else if (c == '\\' && '0' <= *src && *src <= '9')
++ no = *src++ - '0';
++ else
++ no = -1;
++
++ if (no < 0) { /* Ordinary character. */
++ if (c == '\\' && (*src == '\\' || *src == '&'))
++ c = *src++;
++ *dst++ = c;
++ } else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
++ len = prog->endp[no] - prog->startp[no];
++ (void) strncpy(dst, prog->startp[no], len);
++ dst += len;
++ if (len != 0 && *(dst-1) == '\0') { /* strncpy hit NUL. */
++ regerror("damaged match string");
++ return;
++ }
++ }
++ }
++ *dst++ = '\0';
++}
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -214,6 +214,14 @@ destroy_conntrack(struct nf_conntrack *n
+ * too. */
+ nf_ct_remove_expectations(ct);
+
++ #if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++ if(ct->layer7.app_proto)
++ kfree(ct->layer7.app_proto);
++ if(ct->layer7.app_data)
++ kfree(ct->layer7.app_data);
++ #endif
++
++
+ /* We overload first tuple to link into unconfirmed list. */
+ if (!nf_ct_is_confirmed(ct)) {
+ BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -239,6 +239,12 @@ static int ct_seq_show(struct seq_file *
+ if (ct_show_delta_time(s, ct))
+ goto release;
+
++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++ if(ct->layer7.app_proto &&
++ seq_printf(s, "l7proto=%s ", ct->layer7.app_proto))
++ return -ENOSPC;
++#endif
++
+ if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
+ goto release;
+
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -134,6 +134,22 @@ struct nf_conn {
+ struct net *ct_net;
+ #endif
+
++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || \
++ defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
++ struct {
++ /*
++ * e.g. "http". NULL before decision. "unknown" after decision
++ * if no match.
++ */
++ char *app_proto;
++ /*
++ * application layer data so far. NULL after match decision.
++ */
++ char *app_data;
++ unsigned int app_data_len;
++ } layer7;
++#endif
++
+ /* Storage reserved for other modules, must be the last member */
+ union nf_conntrack_proto proto;
+ };
+--- /dev/null
++++ b/include/linux/netfilter/xt_layer7.h
+@@ -0,0 +1,13 @@
++#ifndef _XT_LAYER7_H
++#define _XT_LAYER7_H
++
++#define MAX_PATTERN_LEN 8192
++#define MAX_PROTOCOL_LEN 256
++
++struct xt_layer7_info {
++ char protocol[MAX_PROTOCOL_LEN];
++ char pattern[MAX_PATTERN_LEN];
++ u_int8_t invert;
++};
++
++#endif /* _XT_LAYER7_H */
+--- a/include/linux/netfilter/Kbuild
++++ b/include/linux/netfilter/Kbuild
+@@ -49,6 +49,7 @@ header-y += xt_hashlimit.h
+ header-y += xt_helper.h
+ header-y += xt_iprange.h
+ header-y += xt_ipvs.h
++header-y += xt_layer7.h
+ header-y += xt_length.h
+ header-y += xt_limit.h
+ header-y += xt_mac.h
diff --git a/target/linux/generic/patches-3.3/601-netfilter_layer7_pktmatch.patch b/target/linux/generic/patches-3.3/601-netfilter_layer7_pktmatch.patch
new file mode 100644
index 000000000..f65e301fd
--- /dev/null
+++ b/target/linux/generic/patches-3.3/601-netfilter_layer7_pktmatch.patch
@@ -0,0 +1,108 @@
+--- a/include/linux/netfilter/xt_layer7.h
++++ b/include/linux/netfilter/xt_layer7.h
+@@ -8,6 +8,7 @@ struct xt_layer7_info {
+ char protocol[MAX_PROTOCOL_LEN];
+ char pattern[MAX_PATTERN_LEN];
+ u_int8_t invert;
++ u_int8_t pkt;
+ };
+
+ #endif /* _XT_LAYER7_H */
+--- a/net/netfilter/xt_layer7.c
++++ b/net/netfilter/xt_layer7.c
+@@ -314,33 +314,35 @@ static int match_no_append(struct nf_con
+ }
+
+ /* add the new app data to the conntrack. Return number of bytes added. */
+-static int add_data(struct nf_conn * master_conntrack,
+- char * app_data, int appdatalen)
++static int add_datastr(char *target, int offset, char *app_data, int len)
+ {
+ int length = 0, i;
+- int oldlength = master_conntrack->layer7.app_data_len;
+-
+- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
+- clear on whether the race condition exists or whether this really
+- fixes it. I might just be being dense... Anyway, if it's not really
+- a fix, all it does is waste a very small amount of time. */
+- if(!master_conntrack->layer7.app_data) return 0;
++ if (!target) return 0;
+
+ /* Strip nulls. Make everything lower case (our regex lib doesn't
+ do case insensitivity). Add it to the end of the current data. */
+- for(i = 0; i < maxdatalen-oldlength-1 &&
+- i < appdatalen; i++) {
++ for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
+ if(app_data[i] != '\0') {
+ /* the kernel version of tolower mungs 'upper ascii' */
+- master_conntrack->layer7.app_data[length+oldlength] =
++ target[length+offset] =
+ isascii(app_data[i])?
+ tolower(app_data[i]) : app_data[i];
+ length++;
+ }
+ }
++ target[length+offset] = '\0';
++
++ return length;
++}
++
++/* add the new app data to the conntrack. Return number of bytes added. */
++static int add_data(struct nf_conn * master_conntrack,
++ char * app_data, int appdatalen)
++{
++ int length;
+
+- master_conntrack->layer7.app_data[length+oldlength] = '\0';
+- master_conntrack->layer7.app_data_len = length + oldlength;
++ length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
++ master_conntrack->layer7.app_data_len += length;
+
+ return length;
+ }
+@@ -438,7 +440,7 @@ match(const struct sk_buff *skbin,
+
+ enum ip_conntrack_info master_ctinfo, ctinfo;
+ struct nf_conn *master_conntrack, *conntrack;
+- unsigned char * app_data;
++ unsigned char *app_data, *tmp_data;
+ unsigned int pattern_result, appdatalen;
+ regexp * comppattern;
+
+@@ -466,8 +468,8 @@ match(const struct sk_buff *skbin,
+ master_conntrack = master_ct(master_conntrack);
+
+ /* if we've classified it or seen too many packets */
+- if(total_acct_packets(master_conntrack) > num_packets ||
+- master_conntrack->layer7.app_proto) {
++ if(!info->pkt && (total_acct_packets(master_conntrack) > num_packets ||
++ master_conntrack->layer7.app_proto)) {
+
+ pattern_result = match_no_append(conntrack, master_conntrack,
+ ctinfo, master_ctinfo, info);
+@@ -500,6 +502,25 @@ match(const struct sk_buff *skbin,
+ /* the return value gets checked later, when we're ready to use it */
+ comppattern = compile_and_cache(info->pattern, info->protocol);
+
++ if (info->pkt) {
++ tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
++ if(!tmp_data){
++ if (net_ratelimit())
++ printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
++ return info->invert;
++ }
++
++ tmp_data[0] = '\0';
++ add_datastr(tmp_data, 0, app_data, appdatalen);
++ pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
++
++ kfree(tmp_data);
++ tmp_data = NULL;
++ spin_unlock_bh(&l7_lock);
++
++ return (pattern_result ^ info->invert);
++ }
++
+ /* On the first packet of a connection, allocate space for app data */
+ if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] &&
+ !master_conntrack->layer7.app_data){
diff --git a/target/linux/generic/patches-3.3/602-netfilter_layer7_match.patch b/target/linux/generic/patches-3.3/602-netfilter_layer7_match.patch
new file mode 100644
index 000000000..b2e48c824
--- /dev/null
+++ b/target/linux/generic/patches-3.3/602-netfilter_layer7_match.patch
@@ -0,0 +1,51 @@
+--- a/net/netfilter/xt_layer7.c
++++ b/net/netfilter/xt_layer7.c
+@@ -415,7 +415,9 @@ static int layer7_write_proc(struct file
+ }
+
+ static bool
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++match(const struct sk_buff *skbin, struct xt_action_param *par)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ match(const struct sk_buff *skbin, const struct xt_match_param *par)
+ #else
+ match(const struct sk_buff *skbin,
+@@ -597,14 +599,19 @@ match(const struct sk_buff *skbin,
+ }
+
+ // load nf_conntrack_ipv4
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++static int
++#else
++static bool
++#endif
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+-static bool check(const struct xt_mtchk_param *par)
++check(const struct xt_mtchk_param *par)
+ {
+ if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
+ printk(KERN_WARNING "can't load conntrack support for "
+ "proto=%d\n", par->match->family);
+ #else
+-static bool check(const char *tablename, const void *inf,
++check(const char *tablename, const void *inf,
+ const struct xt_match *match, void *matchinfo,
+ unsigned int hook_mask)
+ {
+@@ -612,9 +619,15 @@ static bool check(const char *tablename,
+ printk(KERN_WARNING "can't load conntrack support for "
+ "proto=%d\n", match->family);
+ #endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
++ return -EINVAL;
++ }
++ return 0;
++#else
+ return 0;
+ }
+ return 1;
++#endif
+ }
+
+
diff --git a/target/linux/generic/patches-3.3/603-netfilter_layer7_2.6.36_fix.patch b/target/linux/generic/patches-3.3/603-netfilter_layer7_2.6.36_fix.patch
new file mode 100644
index 000000000..92a720060
--- /dev/null
+++ b/target/linux/generic/patches-3.3/603-netfilter_layer7_2.6.36_fix.patch
@@ -0,0 +1,61 @@
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -857,6 +857,27 @@ config NETFILTER_XT_MATCH_IPVS
+
+ If unsure, say N.
+
++config NETFILTER_XT_MATCH_LAYER7
++ tristate '"layer7" match support'
++ depends on EXPERIMENTAL
++ depends on NETFILTER_XTABLES
++ depends on NETFILTER_ADVANCED
++ depends on NF_CONNTRACK
++ help
++ Say Y if you want to be able to classify connections (and their
++ packets) based on regular expression matching of their application
++ layer data. This is one way to classify applications such as
++ peer-to-peer filesharing systems that do not always use the same
++ port.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config NETFILTER_XT_MATCH_LAYER7_DEBUG
++ bool 'Layer 7 debugging output'
++ depends on NETFILTER_XT_MATCH_LAYER7
++ help
++ Say Y to get lots of debugging output.
++
+ config NETFILTER_XT_MATCH_LENGTH
+ tristate '"length" match support'
+ depends on NETFILTER_ADVANCED
+@@ -1053,26 +1074,11 @@ config NETFILTER_XT_MATCH_STATE
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+-config NETFILTER_XT_MATCH_LAYER7
+- tristate '"layer7" match support'
+- depends on NETFILTER_XTABLES
+- depends on EXPERIMENTAL && (IP_NF_CONNTRACK || NF_CONNTRACK)
+- depends on NETFILTER_ADVANCED
+- help
+- Say Y if you want to be able to classify connections (and their
+- packets) based on regular expression matching of their application
+- layer data. This is one way to classify applications such as
+- peer-to-peer filesharing systems that do not always use the same
+- port.
+-
+- To compile it as a module, choose M here. If unsure, say N.
+-
+ config NETFILTER_XT_MATCH_LAYER7_DEBUG
+- bool 'Layer 7 debugging output'
+- depends on NETFILTER_XT_MATCH_LAYER7
+- help
+- Say Y to get lots of debugging output.
+-
++ bool 'Layer 7 debugging output'
++ depends on NETFILTER_XT_MATCH_LAYER7
++ help
++ Say Y to get lots of debugging output.
+
+ config NETFILTER_XT_MATCH_STATISTIC
+ tristate '"statistic" match support'
diff --git a/target/linux/generic/patches-3.3/604-netfilter_cisco_794x_iphone.patch b/target/linux/generic/patches-3.3/604-netfilter_cisco_794x_iphone.patch
new file mode 100644
index 000000000..662a499d1
--- /dev/null
+++ b/target/linux/generic/patches-3.3/604-netfilter_cisco_794x_iphone.patch
@@ -0,0 +1,118 @@
+--- a/include/linux/netfilter/nf_conntrack_sip.h
++++ b/include/linux/netfilter/nf_conntrack_sip.h
+@@ -2,12 +2,15 @@
+ #define __NF_CONNTRACK_SIP_H__
+ #ifdef __KERNEL__
+
++#include <linux/types.h>
++
+ #define SIP_PORT 5060
+ #define SIP_TIMEOUT 3600
+
+ struct nf_ct_sip_master {
+ unsigned int register_cseq;
+ unsigned int invite_cseq;
++ __be16 forced_dport;
+ };
+
+ enum sip_expectation_classes {
+--- a/net/ipv4/netfilter/nf_nat_sip.c
++++ b/net/ipv4/netfilter/nf_nat_sip.c
+@@ -73,6 +73,7 @@ static int map_addr(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
++ struct nf_conn_help *help = nfct_help(ct);
+ char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+ unsigned int buflen;
+ __be32 newaddr;
+@@ -85,7 +86,8 @@ static int map_addr(struct sk_buff *skb,
+ } else if (ct->tuplehash[dir].tuple.dst.u3.ip == addr->ip &&
+ ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
+ newaddr = ct->tuplehash[!dir].tuple.src.u3.ip;
+- newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
++ newport = help->help.ct_sip_info.forced_dport ? :
++ ct->tuplehash[!dir].tuple.src.u.udp.port;
+ } else
+ return 1;
+
+@@ -121,6 +123,7 @@ static unsigned int ip_nat_sip(struct sk
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
++ struct nf_conn_help *help = nfct_help(ct);
+ unsigned int coff, matchoff, matchlen;
+ enum sip_header_types hdr;
+ union nf_inet_addr addr;
+@@ -229,6 +232,20 @@ next:
+ !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
+ return NF_DROP;
+
++ /* Mangle destination port for Cisco phones, then fix up checksums */
++ if (dir == IP_CT_DIR_REPLY && help->help.ct_sip_info.forced_dport) {
++ struct udphdr *uh;
++
++ if (!skb_make_writable(skb, skb->len))
++ return NF_DROP;
++
++ uh = (struct udphdr *)(skb->data + ip_hdrlen(skb));
++ uh->dest = help->help.ct_sip_info.forced_dport;
++
++ if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, 0, 0, NULL, 0))
++ return NF_DROP;
++ }
++
+ return NF_ACCEPT;
+ }
+
+@@ -280,8 +297,10 @@ static unsigned int ip_nat_sip_expect(st
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
++ struct nf_conn_help *help = nfct_help(ct);
+ __be32 newip;
+ u_int16_t port;
++ __be16 srcport;
+ char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+ unsigned buflen;
+
+@@ -294,8 +313,9 @@ static unsigned int ip_nat_sip_expect(st
+ /* If the signalling port matches the connection's source port in the
+ * original direction, try to use the destination port in the opposite
+ * direction. */
+- if (exp->tuple.dst.u.udp.port ==
+- ct->tuplehash[dir].tuple.src.u.udp.port)
++ srcport = help->help.ct_sip_info.forced_dport ? :
++ ct->tuplehash[dir].tuple.src.u.udp.port;
++ if (exp->tuple.dst.u.udp.port == srcport)
+ port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
+ else
+ port = ntohs(exp->tuple.dst.u.udp.port);
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -1363,8 +1363,25 @@ static int process_sip_request(struct sk
+ {
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
++ struct nf_conn_help *help = nfct_help(ct);
++ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ unsigned int matchoff, matchlen;
+ unsigned int cseq, i;
++ union nf_inet_addr addr;
++ __be16 port;
++
++ /* Many Cisco IP phones use a high source port for SIP requests, but
++ * listen for the response on port 5060. If we are the local
++ * router for one of these phones, save the port number from the
++ * Via: header so that nf_nat_sip can redirect the responses to
++ * the correct port.
++ */
++ if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
++ SIP_HDR_VIA_UDP, NULL, &matchoff,
++ &matchlen, &addr, &port) > 0 &&
++ port != ct->tuplehash[dir].tuple.src.u.udp.port &&
++ nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3))
++ help->help.ct_sip_info.forced_dport = port;
+
+ for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
+ const struct sip_handler *handler;
diff --git a/target/linux/generic/patches-3.3/610-netfilter_match_bypass_default_checks.patch b/target/linux/generic/patches-3.3/610-netfilter_match_bypass_default_checks.patch
new file mode 100644
index 000000000..51c9e0999
--- /dev/null
+++ b/target/linux/generic/patches-3.3/610-netfilter_match_bypass_default_checks.patch
@@ -0,0 +1,93 @@
+--- a/include/linux/netfilter_ipv4/ip_tables.h
++++ b/include/linux/netfilter_ipv4/ip_tables.h
+@@ -93,6 +93,7 @@ struct ipt_ip {
+ #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
+ #define IPT_F_GOTO 0x02 /* Set if jump is a goto */
+ #define IPT_F_MASK 0x03 /* All possible flag bits mask. */
++#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
+
+ /* Values for "inv" field in struct ipt_ip. */
+ #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -81,6 +81,9 @@ ip_packet_match(const struct iphdr *ip,
+
+ #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
+
++ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
++ return true;
++
+ if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
+ IPT_INV_SRCIP) ||
+ FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
+@@ -134,6 +137,29 @@ ip_packet_match(const struct iphdr *ip,
+ return true;
+ }
+
++static void
++ip_checkdefault(struct ipt_ip *ip)
++{
++ static const char iface_mask[IFNAMSIZ] = {};
++
++ if (ip->invflags || ip->flags & IPT_F_FRAG)
++ return;
++
++ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (ip->smsk.s_addr || ip->dmsk.s_addr)
++ return;
++
++ if (ip->proto)
++ return;
++
++ ip->flags |= IPT_F_NO_DEF_MATCH;
++}
++
+ static bool
+ ip_checkentry(const struct ipt_ip *ip)
+ {
+@@ -561,7 +587,7 @@ static void cleanup_match(struct xt_entr
+ }
+
+ static int
+-check_entry(const struct ipt_entry *e, const char *name)
++check_entry(struct ipt_entry *e, const char *name)
+ {
+ const struct xt_entry_target *t;
+
+@@ -570,6 +596,8 @@ check_entry(const struct ipt_entry *e, c
+ return -EINVAL;
+ }
+
++ ip_checkdefault(&e->ip);
++
+ if (e->target_offset + sizeof(struct xt_entry_target) >
+ e->next_offset)
+ return -EINVAL;
+@@ -931,6 +959,7 @@ copy_entries_to_user(unsigned int total_
+ const struct xt_table_info *private = table->private;
+ int ret = 0;
+ const void *loc_cpu_entry;
++ u8 flags;
+
+ counters = alloc_counters(table);
+ if (IS_ERR(counters))
+@@ -961,6 +990,14 @@ copy_entries_to_user(unsigned int total_
+ ret = -EFAULT;
+ goto free_counters;
+ }
++
++ flags = e->ip.flags & IPT_F_MASK;
++ if (copy_to_user(userptr + off
++ + offsetof(struct ipt_entry, ip.flags),
++ &flags, sizeof(flags)) != 0) {
++ ret = -EFAULT;
++ goto free_counters;
++ }
+
+ for (i = sizeof(struct ipt_entry);
+ i < e->target_offset;
diff --git a/target/linux/generic/patches-3.3/611-netfilter_match_bypass_default_table.patch b/target/linux/generic/patches-3.3/611-netfilter_match_bypass_default_table.patch
new file mode 100644
index 000000000..3cf0e5a32
--- /dev/null
+++ b/target/linux/generic/patches-3.3/611-netfilter_match_bypass_default_table.patch
@@ -0,0 +1,81 @@
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -310,6 +310,33 @@ struct ipt_entry *ipt_next_entry(const s
+ return (void *)entry + entry->next_offset;
+ }
+
++static bool
++ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
++{
++ struct xt_entry_target *t;
++ struct xt_standard_target *st;
++
++ if (e->target_offset != sizeof(struct ipt_entry))
++ return false;
++
++ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
++ return false;
++
++ t = ipt_get_target(e);
++ if (t->u.kernel.target->target)
++ return false;
++
++ st = (struct xt_standard_target *) t;
++ if (st->verdict == XT_RETURN)
++ return false;
++
++ if (st->verdict >= 0)
++ return false;
++
++ *verdict = (unsigned)(-st->verdict) - 1;
++ return true;
++}
++
+ /* Returns one of the generic firewall policies, like NF_ACCEPT. */
+ unsigned int
+ ipt_do_table(struct sk_buff *skb,
+@@ -334,6 +361,25 @@ ipt_do_table(struct sk_buff *skb,
+ ip = ip_hdr(skb);
+ indev = in ? in->name : nulldevname;
+ outdev = out ? out->name : nulldevname;
++
++ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
++ local_bh_disable();
++ addend = xt_write_recseq_begin();
++ private = table->private;
++ cpu = smp_processor_id();
++ table_base = private->entries[cpu];
++ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
++ stackptr = per_cpu_ptr(private->stackptr, cpu);
++ origptr = *stackptr;
++
++ e = get_entry(table_base, private->hook_entry[hook]);
++ if (ipt_handle_default_rule(e, &verdict)) {
++ ADD_COUNTER(e->counters, skb->len, 1);
++ xt_write_recseq_end(addend);
++ local_bh_enable();
++ return verdict;
++ }
++
+ /* We handle fragments by dealing with the first fragment as
+ * if it was a normal packet. All other fragments are treated
+ * normally, except that they will NEVER match rules that ask
+@@ -348,18 +394,6 @@ ipt_do_table(struct sk_buff *skb,
+ acpar.family = NFPROTO_IPV4;
+ acpar.hooknum = hook;
+
+- IP_NF_ASSERT(table->valid_hooks & (1 << hook));
+- local_bh_disable();
+- addend = xt_write_recseq_begin();
+- private = table->private;
+- cpu = smp_processor_id();
+- table_base = private->entries[cpu];
+- jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+- stackptr = per_cpu_ptr(private->stackptr, cpu);
+- origptr = *stackptr;
+-
+- e = get_entry(table_base, private->hook_entry[hook]);
+-
+ pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
+ table->name, hook, origptr,
+ get_entry(table_base, private->underflow[hook]));
diff --git a/target/linux/generic/patches-3.3/612-netfilter_match_reduce_memory_access.patch b/target/linux/generic/patches-3.3/612-netfilter_match_reduce_memory_access.patch
new file mode 100644
index 000000000..f506165e1
--- /dev/null
+++ b/target/linux/generic/patches-3.3/612-netfilter_match_reduce_memory_access.patch
@@ -0,0 +1,16 @@
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -84,9 +84,11 @@ ip_packet_match(const struct iphdr *ip,
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+ return true;
+
+- if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
++ if (FWINV(ipinfo->smsk.s_addr &&
++ (ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
+ IPT_INV_SRCIP) ||
+- FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
++ FWINV(ipinfo->dmsk.s_addr &&
++ (ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
+ IPT_INV_DSTIP)) {
+ dprintf("Source or dest mismatch.\n");
+
diff --git a/target/linux/generic/patches-3.3/613-netfilter_optional_tcp_window_check.patch b/target/linux/generic/patches-3.3/613-netfilter_optional_tcp_window_check.patch
new file mode 100644
index 000000000..1c259d4f0
--- /dev/null
+++ b/target/linux/generic/patches-3.3/613-netfilter_optional_tcp_window_check.patch
@@ -0,0 +1,36 @@
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -29,6 +29,9 @@
+ #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+ #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+
++/* Do not check the TCP window for incoming packets */
++static int nf_ct_tcp_no_window_check __read_mostly = 1;
++
+ /* "Be conservative in what you do,
+ be liberal in what you accept from others."
+ If it's non-zero, we mark only out of window RST segments as INVALID. */
+@@ -524,6 +527,9 @@ static bool tcp_in_window(const struct n
+ s16 receiver_offset;
+ bool res;
+
++ if (nf_ct_tcp_no_window_check)
++ return true;
++
+ /*
+ * Get the required data from the packet.
+ */
+@@ -1321,6 +1327,13 @@ static struct ctl_table tcp_sysctl_table
+ .proc_handler = proc_dointvec,
+ },
+ {
++ .procname = "nf_conntrack_tcp_no_window_check",
++ .data = &nf_ct_tcp_no_window_check,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
+ .procname = "nf_conntrack_tcp_be_liberal",
+ .data = &nf_ct_tcp_be_liberal,
+ .maxlen = sizeof(unsigned int),
diff --git a/target/linux/generic/patches-3.3/620-sched_esfq.patch b/target/linux/generic/patches-3.3/620-sched_esfq.patch
new file mode 100644
index 000000000..1fdf09d09
--- /dev/null
+++ b/target/linux/generic/patches-3.3/620-sched_esfq.patch
@@ -0,0 +1,791 @@
+--- a/include/linux/pkt_sched.h
++++ b/include/linux/pkt_sched.h
+@@ -193,6 +193,33 @@ struct tc_sfq_xstats {
+ __s32 allot;
+ };
+
++/* ESFQ section */
++
++enum
++{
++ /* traditional */
++ TCA_SFQ_HASH_CLASSIC,
++ TCA_SFQ_HASH_DST,
++ TCA_SFQ_HASH_SRC,
++ TCA_SFQ_HASH_FWMARK,
++ /* conntrack */
++ TCA_SFQ_HASH_CTORIGDST,
++ TCA_SFQ_HASH_CTORIGSRC,
++ TCA_SFQ_HASH_CTREPLDST,
++ TCA_SFQ_HASH_CTREPLSRC,
++ TCA_SFQ_HASH_CTNATCHG,
++};
++
++struct tc_esfq_qopt
++{
++ unsigned quantum; /* Bytes per round allocated to flow */
++ int perturb_period; /* Period of hash perturbation */
++ __u32 limit; /* Maximal packets in queue */
++ unsigned divisor; /* Hash divisor */
++ unsigned flows; /* Maximal number of flows */
++ unsigned hash_kind; /* Hash function to use for flow identification */
++};
++
+ /* RED section */
+
+ enum {
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -148,6 +148,37 @@ config NET_SCH_SFQ
+ To compile this code as a module, choose M here: the
+ module will be called sch_sfq.
+
++config NET_SCH_ESFQ
++ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
++ ---help---
++ Say Y here if you want to use the Enhanced Stochastic Fairness
++ Queueing (ESFQ) packet scheduling algorithm for some of your network
++ devices or as a leaf discipline for a classful qdisc such as HTB or
++ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
++ references to the SFQ algorithm).
++
++ This is an enchanced SFQ version which allows you to control some
++ hardcoded values in the SFQ scheduler.
++
++ ESFQ also adds control of the hash function used to identify packet
++ flows. The original SFQ discipline hashes by connection; ESFQ add
++ several other hashing methods, such as by src IP or by dst IP, which
++ can be more fair to users in some networking situations.
++
++ To compile this code as a module, choose M here: the
++ module will be called sch_esfq.
++
++config NET_SCH_ESFQ_NFCT
++ bool "Connection Tracking Hash Types"
++ depends on NET_SCH_ESFQ && NF_CONNTRACK
++ ---help---
++ Say Y here to enable support for hashing based on netfilter connection
++ tracking information. This is useful for a router that is also using
++ NAT to connect privately-addressed hosts to the Internet. If you want
++ to provide fair distribution of upstream bandwidth, ESFQ must use
++ connection tracking information, since all outgoing packets will share
++ the same source address.
++
+ config NET_SCH_TEQL
+ tristate "True Link Equalizer (TEQL)"
+ ---help---
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -26,6 +26,7 @@ obj-$(CONFIG_NET_SCH_INGRESS) += sch_ing
+ obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
+ obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
+ obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
++obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
+ obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
+ obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
+ obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
+--- /dev/null
++++ b/net/sched/sch_esfq.c
+@@ -0,0 +1,702 @@
++/*
++ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
++ *
++ * Changes: Alexander Atanasov, <alex@ssi.bg>
++ * Added dynamic depth,limit,divisor,hash_kind options.
++ * Added dst and src hashes.
++ *
++ * Alexander Clouter, <alex@digriz.org.uk>
++ * Ported ESFQ to Linux 2.6.
++ *
++ * Corey Hickey, <bugfood-c@fatooh.org>
++ * Maintenance of the Linux 2.6 port.
++ * Added fwmark hash (thanks to Robert Kurjata).
++ * Added usage of jhash.
++ * Added conntrack support.
++ * Added ctnatchg hash (thanks to Ben Pfountz).
++ */
++
++#include <linux/module.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <linux/bitops.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/jiffies.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/socket.h>
++#include <linux/sockios.h>
++#include <linux/in.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/if_ether.h>
++#include <linux/inet.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/notifier.h>
++#include <linux/init.h>
++#include <net/ip.h>
++#include <net/netlink.h>
++#include <linux/ipv6.h>
++#include <net/route.h>
++#include <linux/skbuff.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <linux/jhash.h>
++#ifdef CONFIG_NET_SCH_ESFQ_NFCT
++#include <net/netfilter/nf_conntrack.h>
++#endif
++
++/* Stochastic Fairness Queuing algorithm.
++ For more comments look at sch_sfq.c.
++ The difference is that you can change limit, depth,
++ hash table size and choose alternate hash types.
++
++ classic: same as in sch_sfq.c
++ dst: destination IP address
++ src: source IP address
++ fwmark: netfilter mark value
++ ctorigdst: original destination IP address
++ ctorigsrc: original source IP address
++ ctrepldst: reply destination IP address
++ ctreplsrc: reply source IP
++
++*/
++
++#define ESFQ_HEAD 0
++#define ESFQ_TAIL 1
++
++/* This type should contain at least SFQ_DEPTH*2 values */
++typedef unsigned int esfq_index;
++
++struct esfq_head
++{
++ esfq_index next;
++ esfq_index prev;
++};
++
++struct esfq_sched_data
++{
++/* Parameters */
++ int perturb_period;
++ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
++ int limit;
++ unsigned depth;
++ unsigned hash_divisor;
++ unsigned hash_kind;
++/* Variables */
++ struct timer_list perturb_timer;
++ int perturbation;
++ esfq_index tail; /* Index of current slot in round */
++ esfq_index max_depth; /* Maximal depth */
++
++ esfq_index *ht; /* Hash table */
++ esfq_index *next; /* Active slots link */
++ short *allot; /* Current allotment per slot */
++ unsigned short *hash; /* Hash value indexed by slots */
++ struct sk_buff_head *qs; /* Slot queue */
++ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
++};
++
++/* This contains the info we will hash. */
++struct esfq_packet_info
++{
++ u32 proto; /* protocol or port */
++ u32 src; /* source from packet header */
++ u32 dst; /* destination from packet header */
++ u32 ctorigsrc; /* original source from conntrack */
++ u32 ctorigdst; /* original destination from conntrack */
++ u32 ctreplsrc; /* reply source from conntrack */
++ u32 ctrepldst; /* reply destination from conntrack */
++ u32 mark; /* netfilter mark (fwmark) */
++};
++
++static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
++{
++ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
++}
++
++static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
++{
++ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
++}
++
++static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
++{
++ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
++}
++
++static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
++{
++ struct esfq_packet_info info;
++#ifdef CONFIG_NET_SCH_ESFQ_NFCT
++ enum ip_conntrack_info ctinfo;
++ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
++#endif
++
++ switch (skb->protocol) {
++ case __constant_htons(ETH_P_IP):
++ {
++ struct iphdr *iph = ip_hdr(skb);
++ info.dst = iph->daddr;
++ info.src = iph->saddr;
++ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
++ (iph->protocol == IPPROTO_TCP ||
++ iph->protocol == IPPROTO_UDP ||
++ iph->protocol == IPPROTO_SCTP ||
++ iph->protocol == IPPROTO_DCCP ||
++ iph->protocol == IPPROTO_ESP))
++ info.proto = *(((u32*)iph) + iph->ihl);
++ else
++ info.proto = iph->protocol;
++ break;
++ }
++ case __constant_htons(ETH_P_IPV6):
++ {
++ struct ipv6hdr *iph = ipv6_hdr(skb);
++ /* Hash ipv6 addresses into a u32. This isn't ideal,
++ * but the code is simple. */
++ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
++ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
++ if (iph->nexthdr == IPPROTO_TCP ||
++ iph->nexthdr == IPPROTO_UDP ||
++ iph->nexthdr == IPPROTO_SCTP ||
++ iph->nexthdr == IPPROTO_DCCP ||
++ iph->nexthdr == IPPROTO_ESP)
++ info.proto = *(u32*)&iph[1];
++ else
++ info.proto = iph->nexthdr;
++ break;
++ }
++ default:
++ info.dst = (u32)(unsigned long)skb_dst(skb);
++ info.src = (u32)(unsigned long)skb->sk;
++ info.proto = skb->protocol;
++ }
++
++ info.mark = skb->mark;
++
++#ifdef CONFIG_NET_SCH_ESFQ_NFCT
++ /* defaults if there is no conntrack info */
++ info.ctorigsrc = info.src;
++ info.ctorigdst = info.dst;
++ info.ctreplsrc = info.dst;
++ info.ctrepldst = info.src;
++ /* collect conntrack info */
++ if (ct && ct != &nf_conntrack_untracked) {
++ if (skb->protocol == __constant_htons(ETH_P_IP)) {
++ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
++ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
++ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
++ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
++ }
++ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
++ /* Again, hash ipv6 addresses into a single u32. */
++ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
++ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
++ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
++ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
++ }
++
++ }
++#endif
++
++ switch(q->hash_kind) {
++ case TCA_SFQ_HASH_CLASSIC:
++ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
++ case TCA_SFQ_HASH_DST:
++ return esfq_jhash_1word(q, info.dst);
++ case TCA_SFQ_HASH_SRC:
++ return esfq_jhash_1word(q, info.src);
++ case TCA_SFQ_HASH_FWMARK:
++ return esfq_jhash_1word(q, info.mark);
++#ifdef CONFIG_NET_SCH_ESFQ_NFCT
++ case TCA_SFQ_HASH_CTORIGDST:
++ return esfq_jhash_1word(q, info.ctorigdst);
++ case TCA_SFQ_HASH_CTORIGSRC:
++ return esfq_jhash_1word(q, info.ctorigsrc);
++ case TCA_SFQ_HASH_CTREPLDST:
++ return esfq_jhash_1word(q, info.ctrepldst);
++ case TCA_SFQ_HASH_CTREPLSRC:
++ return esfq_jhash_1word(q, info.ctreplsrc);
++ case TCA_SFQ_HASH_CTNATCHG:
++ {
++ if (info.ctorigdst == info.ctreplsrc)
++ return esfq_jhash_1word(q, info.ctorigsrc);
++ return esfq_jhash_1word(q, info.ctreplsrc);
++ }
++#endif
++ default:
++ if (net_ratelimit())
++ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
++ }
++ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
++}
++
++static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
++{
++ esfq_index p, n;
++ int d = q->qs[x].qlen + q->depth;
++
++ p = d;
++ n = q->dep[d].next;
++ q->dep[x].next = n;
++ q->dep[x].prev = p;
++ q->dep[p].next = q->dep[n].prev = x;
++}
++
++static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
++{
++ esfq_index p, n;
++
++ n = q->dep[x].next;
++ p = q->dep[x].prev;
++ q->dep[p].next = n;
++ q->dep[n].prev = p;
++
++ if (n == p && q->max_depth == q->qs[x].qlen + 1)
++ q->max_depth--;
++
++ esfq_link(q, x);
++}
++
++static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
++{
++ esfq_index p, n;
++ int d;
++
++ n = q->dep[x].next;
++ p = q->dep[x].prev;
++ q->dep[p].next = n;
++ q->dep[n].prev = p;
++ d = q->qs[x].qlen;
++ if (q->max_depth < d)
++ q->max_depth = d;
++
++ esfq_link(q, x);
++}
++
++static unsigned int esfq_drop(struct Qdisc *sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ esfq_index d = q->max_depth;
++ struct sk_buff *skb;
++ unsigned int len;
++
++ /* Queue is full! Find the longest slot and
++ drop a packet from it */
++
++ if (d > 1) {
++ esfq_index x = q->dep[d+q->depth].next;
++ skb = q->qs[x].prev;
++ len = skb->len;
++ __skb_unlink(skb, &q->qs[x]);
++ kfree_skb(skb);
++ esfq_dec(q, x);
++ sch->q.qlen--;
++ sch->qstats.drops++;
++ sch->qstats.backlog -= len;
++ return len;
++ }
++
++ if (d == 1) {
++ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
++ d = q->next[q->tail];
++ q->next[q->tail] = q->next[d];
++ q->allot[q->next[d]] += q->quantum;
++ skb = q->qs[d].prev;
++ len = skb->len;
++ __skb_unlink(skb, &q->qs[d]);
++ kfree_skb(skb);
++ esfq_dec(q, d);
++ sch->q.qlen--;
++ q->ht[q->hash[d]] = q->depth;
++ sch->qstats.drops++;
++ sch->qstats.backlog -= len;
++ return len;
++ }
++
++ return 0;
++}
++
++static void esfq_q_enqueue(struct sk_buff *skb, struct esfq_sched_data *q, unsigned int end)
++{
++ unsigned hash = esfq_hash(q, skb);
++ unsigned depth = q->depth;
++ esfq_index x;
++
++ x = q->ht[hash];
++ if (x == depth) {
++ q->ht[hash] = x = q->dep[depth].next;
++ q->hash[x] = hash;
++ }
++
++ if (end == ESFQ_TAIL)
++ __skb_queue_tail(&q->qs[x], skb);
++ else
++ __skb_queue_head(&q->qs[x], skb);
++
++ esfq_inc(q, x);
++ if (q->qs[x].qlen == 1) { /* The flow is new */
++ if (q->tail == depth) { /* It is the first flow */
++ q->tail = x;
++ q->next[x] = x;
++ q->allot[x] = q->quantum;
++ } else {
++ q->next[x] = q->next[q->tail];
++ q->next[q->tail] = x;
++ q->tail = x;
++ }
++ }
++}
++
++static int esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ esfq_q_enqueue(skb, q, ESFQ_TAIL);
++ sch->qstats.backlog += skb->len;
++ if (++sch->q.qlen < q->limit-1) {
++ sch->bstats.bytes += skb->len;
++ sch->bstats.packets++;
++ return 0;
++ }
++
++ sch->qstats.drops++;
++ esfq_drop(sch);
++ return NET_XMIT_CN;
++}
++
++static struct sk_buff *esfq_peek(struct Qdisc* sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ esfq_index a;
++
++ /* No active slots */
++ if (q->tail == q->depth)
++ return NULL;
++
++ a = q->next[q->tail];
++ return skb_peek(&q->qs[a]);
++}
++
++static struct sk_buff *esfq_q_dequeue(struct esfq_sched_data *q)
++{
++ struct sk_buff *skb;
++ unsigned depth = q->depth;
++ esfq_index a, old_a;
++
++ /* No active slots */
++ if (q->tail == depth)
++ return NULL;
++
++ a = old_a = q->next[q->tail];
++
++ /* Grab packet */
++ skb = __skb_dequeue(&q->qs[a]);
++ esfq_dec(q, a);
++
++ /* Is the slot empty? */
++ if (q->qs[a].qlen == 0) {
++ q->ht[q->hash[a]] = depth;
++ a = q->next[a];
++ if (a == old_a) {
++ q->tail = depth;
++ return skb;
++ }
++ q->next[q->tail] = a;
++ q->allot[a] += q->quantum;
++ } else if ((q->allot[a] -= skb->len) <= 0) {
++ q->tail = a;
++ a = q->next[a];
++ q->allot[a] += q->quantum;
++ }
++
++ return skb;
++}
++
++static struct sk_buff *esfq_dequeue(struct Qdisc* sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ struct sk_buff *skb;
++
++ skb = esfq_q_dequeue(q);
++ if (skb == NULL)
++ return NULL;
++ sch->q.qlen--;
++ sch->qstats.backlog -= skb->len;
++ return skb;
++}
++
++static void esfq_q_destroy(struct esfq_sched_data *q)
++{
++ del_timer(&q->perturb_timer);
++ if(q->ht)
++ kfree(q->ht);
++ if(q->dep)
++ kfree(q->dep);
++ if(q->next)
++ kfree(q->next);
++ if(q->allot)
++ kfree(q->allot);
++ if(q->hash)
++ kfree(q->hash);
++ if(q->qs)
++ kfree(q->qs);
++}
++
++static void esfq_destroy(struct Qdisc *sch)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ esfq_q_destroy(q);
++}
++
++
++static void esfq_reset(struct Qdisc* sch)
++{
++ struct sk_buff *skb;
++
++ while ((skb = esfq_dequeue(sch)) != NULL)
++ kfree_skb(skb);
++}
++
++static void esfq_perturbation(unsigned long arg)
++{
++ struct Qdisc *sch = (struct Qdisc*)arg;
++ struct esfq_sched_data *q = qdisc_priv(sch);
++
++ q->perturbation = net_random()&0x1F;
++
++ if (q->perturb_period) {
++ q->perturb_timer.expires = jiffies + q->perturb_period;
++ add_timer(&q->perturb_timer);
++ }
++}
++
++static unsigned int esfq_check_hash(unsigned int kind)
++{
++ switch (kind) {
++ case TCA_SFQ_HASH_CTORIGDST:
++ case TCA_SFQ_HASH_CTORIGSRC:
++ case TCA_SFQ_HASH_CTREPLDST:
++ case TCA_SFQ_HASH_CTREPLSRC:
++ case TCA_SFQ_HASH_CTNATCHG:
++#ifndef CONFIG_NET_SCH_ESFQ_NFCT
++ {
++ if (net_ratelimit())
++ printk(KERN_WARNING "ESFQ: Conntrack hash types disabled in kernel config. Falling back to classic.\n");
++ return TCA_SFQ_HASH_CLASSIC;
++ }
++#endif
++ case TCA_SFQ_HASH_CLASSIC:
++ case TCA_SFQ_HASH_DST:
++ case TCA_SFQ_HASH_SRC:
++ case TCA_SFQ_HASH_FWMARK:
++ return kind;
++ default:
++ {
++ if (net_ratelimit())
++ printk(KERN_WARNING "ESFQ: Unknown hash type. Falling back to classic.\n");
++ return TCA_SFQ_HASH_CLASSIC;
++ }
++ }
++}
++
++static int esfq_q_init(struct esfq_sched_data *q, struct nlattr *opt)
++{
++ struct tc_esfq_qopt *ctl = nla_data(opt);
++ esfq_index p = ~0U/2;
++ int i;
++
++ if (opt && opt->nla_len < nla_attr_size(sizeof(*ctl)))
++ return -EINVAL;
++
++ q->perturbation = 0;
++ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
++ q->max_depth = 0;
++ if (opt == NULL) {
++ q->perturb_period = 0;
++ q->hash_divisor = 1024;
++ q->tail = q->limit = q->depth = 128;
++
++ } else {
++ struct tc_esfq_qopt *ctl = nla_data(opt);
++ if (ctl->quantum)
++ q->quantum = ctl->quantum;
++ q->perturb_period = ctl->perturb_period*HZ;
++ q->hash_divisor = ctl->divisor ? : 1024;
++ q->tail = q->limit = q->depth = ctl->flows ? : 128;
++
++ if ( q->depth > p - 1 )
++ return -EINVAL;
++
++ if (ctl->limit)
++ q->limit = min_t(u32, ctl->limit, q->depth);
++
++ if (ctl->hash_kind) {
++ q->hash_kind = esfq_check_hash(ctl->hash_kind);
++ }
++ }
++
++ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
++ if (!q->ht)
++ goto err_case;
++ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
++ if (!q->dep)
++ goto err_case;
++ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
++ if (!q->next)
++ goto err_case;
++ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
++ if (!q->allot)
++ goto err_case;
++ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
++ if (!q->hash)
++ goto err_case;
++ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
++ if (!q->qs)
++ goto err_case;
++
++ for (i=0; i< q->hash_divisor; i++)
++ q->ht[i] = q->depth;
++ for (i=0; i<q->depth; i++) {
++ skb_queue_head_init(&q->qs[i]);
++ q->dep[i+q->depth].next = i+q->depth;
++ q->dep[i+q->depth].prev = i+q->depth;
++ }
++
++ for (i=0; i<q->depth; i++)
++ esfq_link(q, i);
++ return 0;
++err_case:
++ esfq_q_destroy(q);
++ return -ENOBUFS;
++}
++
++static int esfq_init(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ int err;
++
++ q->quantum = psched_mtu(qdisc_dev(sch)); /* default */
++ if ((err = esfq_q_init(q, opt)))
++ return err;
++
++ init_timer(&q->perturb_timer);
++ q->perturb_timer.data = (unsigned long)sch;
++ q->perturb_timer.function = esfq_perturbation;
++ if (q->perturb_period) {
++ q->perturb_timer.expires = jiffies + q->perturb_period;
++ add_timer(&q->perturb_timer);
++ }
++
++ return 0;
++}
++
++static int esfq_change(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ struct esfq_sched_data new;
++ struct sk_buff *skb;
++ int err;
++
++ /* set up new queue */
++ memset(&new, 0, sizeof(struct esfq_sched_data));
++ new.quantum = psched_mtu(qdisc_dev(sch)); /* default */
++ if ((err = esfq_q_init(&new, opt)))
++ return err;
++
++ /* copy all packets from the old queue to the new queue */
++ sch_tree_lock(sch);
++ while ((skb = esfq_q_dequeue(q)) != NULL)
++ esfq_q_enqueue(skb, &new, ESFQ_TAIL);
++
++ /* clean up the old queue */
++ esfq_q_destroy(q);
++
++ /* copy elements of the new queue into the old queue */
++ q->perturb_period = new.perturb_period;
++ q->quantum = new.quantum;
++ q->limit = new.limit;
++ q->depth = new.depth;
++ q->hash_divisor = new.hash_divisor;
++ q->hash_kind = new.hash_kind;
++ q->tail = new.tail;
++ q->max_depth = new.max_depth;
++ q->ht = new.ht;
++ q->dep = new.dep;
++ q->next = new.next;
++ q->allot = new.allot;
++ q->hash = new.hash;
++ q->qs = new.qs;
++
++ /* finish up */
++ if (q->perturb_period) {
++ q->perturb_timer.expires = jiffies + q->perturb_period;
++ add_timer(&q->perturb_timer);
++ } else {
++ q->perturbation = 0;
++ }
++ sch_tree_unlock(sch);
++ return 0;
++}
++
++static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++ struct esfq_sched_data *q = qdisc_priv(sch);
++ unsigned char *b = skb_tail_pointer(skb);
++ struct tc_esfq_qopt opt;
++
++ opt.quantum = q->quantum;
++ opt.perturb_period = q->perturb_period/HZ;
++
++ opt.limit = q->limit;
++ opt.divisor = q->hash_divisor;
++ opt.flows = q->depth;
++ opt.hash_kind = q->hash_kind;
++
++ NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
++
++ return skb->len;
++
++nla_put_failure:
++ nlmsg_trim(skb, b);
++ return -1;
++}
++
++static struct Qdisc_ops esfq_qdisc_ops =
++{
++ .next = NULL,
++ .cl_ops = NULL,
++ .id = "esfq",
++ .priv_size = sizeof(struct esfq_sched_data),
++ .enqueue = esfq_enqueue,
++ .dequeue = esfq_dequeue,
++ .peek = esfq_peek,
++ .drop = esfq_drop,
++ .init = esfq_init,
++ .reset = esfq_reset,
++ .destroy = esfq_destroy,
++ .change = esfq_change,
++ .dump = esfq_dump,
++ .owner = THIS_MODULE,
++};
++
++static int __init esfq_module_init(void)
++{
++ return register_qdisc(&esfq_qdisc_ops);
++}
++static void __exit esfq_module_exit(void)
++{
++ unregister_qdisc(&esfq_qdisc_ops);
++}
++module_init(esfq_module_init)
++module_exit(esfq_module_exit)
++MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/patches-3.3/621-sched_act_connmark.patch b/target/linux/generic/patches-3.3/621-sched_act_connmark.patch
new file mode 100644
index 000000000..b6adce1fe
--- /dev/null
+++ b/target/linux/generic/patches-3.3/621-sched_act_connmark.patch
@@ -0,0 +1,172 @@
+--- /dev/null
++++ b/net/sched/act_connmark.c
+@@ -0,0 +1,137 @@
++/*
++ * Copyright (c) 2011 Felix Fietkau <nbd@openwrt.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
++ * Place - Suite 330, Boston, MA 02111-1307 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/skbuff.h>
++#include <linux/rtnetlink.h>
++#include <linux/pkt_cls.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <net/netlink.h>
++#include <net/pkt_sched.h>
++#include <net/act_api.h>
++
++#include <net/netfilter/nf_conntrack.h>
++#include <net/netfilter/nf_conntrack_core.h>
++
++#define TCA_ACT_CONNMARK 20
++
++#define CONNMARK_TAB_MASK 3
++static struct tcf_common *tcf_connmark_ht[CONNMARK_TAB_MASK + 1];
++static u32 connmark_idx_gen;
++static DEFINE_RWLOCK(connmark_lock);
++
++static struct tcf_hashinfo connmark_hash_info = {
++ .htab = tcf_connmark_ht,
++ .hmask = CONNMARK_TAB_MASK,
++ .lock = &connmark_lock,
++};
++
++static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
++ struct tcf_result *res)
++{
++ struct nf_conn *c;
++ enum ip_conntrack_info ctinfo;
++ int proto;
++ int r;
++
++ if (skb->protocol == htons(ETH_P_IP)) {
++ if (skb->len < sizeof(struct iphdr))
++ goto out;
++ proto = PF_INET;
++ } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ if (skb->len < sizeof(struct ipv6hdr))
++ goto out;
++ proto = PF_INET6;
++ } else
++ goto out;
++
++ r = nf_conntrack_in(dev_net(skb->dev), proto, NF_INET_PRE_ROUTING, skb);
++ if (r != NF_ACCEPT)
++ goto out;
++
++ c = nf_ct_get(skb, &ctinfo);
++ if (!c)
++ goto out;
++
++ skb->mark = c->mark;
++ nf_conntrack_put(skb->nfct);
++ skb->nfct = NULL;
++
++out:
++ return TC_ACT_PIPE;
++}
++
++static int tcf_connmark_init(struct nlattr *nla, struct nlattr *est,
++ struct tc_action *a, int ovr, int bind)
++{
++ struct tcf_common *pc;
++
++ pc = tcf_hash_create(0, est, a, sizeof(*pc), bind,
++ &connmark_idx_gen, &connmark_hash_info);
++ if (IS_ERR(pc))
++ return PTR_ERR(pc);
++
++ tcf_hash_insert(pc, &connmark_hash_info);
++
++ return ACT_P_CREATED;
++}
++
++static inline int tcf_connmark_cleanup(struct tc_action *a, int bind)
++{
++ if (a->priv)
++ return tcf_hash_release(a->priv, bind, &connmark_hash_info);
++ return 0;
++}
++
++static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
++ int bind, int ref)
++{
++ return skb->len;
++}
++
++static struct tc_action_ops act_connmark_ops = {
++ .kind = "connmark",
++ .hinfo = &connmark_hash_info,
++ .type = TCA_ACT_CONNMARK,
++ .capab = TCA_CAP_NONE,
++ .owner = THIS_MODULE,
++ .act = tcf_connmark,
++ .dump = tcf_connmark_dump,
++ .cleanup = tcf_connmark_cleanup,
++ .init = tcf_connmark_init,
++ .walk = tcf_generic_walker,
++};
++
++MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
++MODULE_DESCRIPTION("Connection tracking mark restoring");
++MODULE_LICENSE("GPL");
++
++static int __init connmark_init_module(void)
++{
++ return tcf_register_action(&act_connmark_ops);
++}
++
++static void __exit connmark_cleanup_module(void)
++{
++ tcf_unregister_action(&act_connmark_ops);
++}
++
++module_init(connmark_init_module);
++module_exit(connmark_cleanup_module);
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -624,6 +624,19 @@ config NET_ACT_CSUM
+ To compile this code as a module, choose M here: the
+ module will be called act_csum.
+
++config NET_ACT_CONNMARK
++ tristate "Connection Tracking Marking"
++ depends on NET_CLS_ACT
++ depends on NF_CONNTRACK
++ depends on NF_CONNTRACK_MARK
++ ---help---
++ Say Y here to restore the connmark from a scheduler action
++
++ If unsure, say N.
++
++ To compile this code as a module, choose M here: the
++ module will be called act_connmark.
++
+ config NET_CLS_IND
+ bool "Incoming device classification"
+ depends on NET_CLS_U32 || NET_CLS_FW
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -16,6 +16,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit
+ obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
+ obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
+ obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
++obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
+ obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
+ obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
+ obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/target/linux/generic/patches-3.3/630-packet_socket_type.patch b/target/linux/generic/patches-3.3/630-packet_socket_type.patch
new file mode 100644
index 000000000..231b745c6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/630-packet_socket_type.patch
@@ -0,0 +1,132 @@
+This patch allows the user to specify desired packet types (outgoing,
+broadcast, unicast, etc.) on packet sockets via setsockopt.
+This can reduce the load in situations where only a limited number
+of packet types are necessary
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+
+--- a/include/linux/if_packet.h
++++ b/include/linux/if_packet.h
+@@ -29,6 +29,8 @@ struct sockaddr_ll {
+ /* These ones are invisible by user level */
+ #define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
+ #define PACKET_FASTROUTE 6 /* Fastrouted frame */
++#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */
++
+
+ /* Packet socket options */
+
+@@ -50,6 +52,7 @@ struct sockaddr_ll {
+ #define PACKET_TX_TIMESTAMP 16
+ #define PACKET_TIMESTAMP 17
+ #define PACKET_FANOUT 18
++#define PACKET_RECV_TYPE 19
+
+ #define PACKET_FANOUT_HASH 0
+ #define PACKET_FANOUT_LB 1
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -296,6 +296,7 @@ struct packet_sock {
+ unsigned int tp_loss:1;
+ unsigned int tp_tstamp;
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
++ unsigned int pkt_type;
+ };
+
+ #define PACKET_FANOUT_MAX 256
+@@ -1383,6 +1384,7 @@ static int packet_rcv_spkt(struct sk_buf
+ {
+ struct sock *sk;
+ struct sockaddr_pkt *spkt;
++ struct packet_sock *po;
+
+ /*
+ * When we registered the protocol we saved the socket in the data
+@@ -1390,6 +1392,7 @@ static int packet_rcv_spkt(struct sk_buf
+ */
+
+ sk = pt->af_packet_priv;
++ po = pkt_sk(sk);
+
+ /*
+ * Yank back the headers [hope the device set this
+@@ -1402,7 +1405,7 @@ static int packet_rcv_spkt(struct sk_buf
+ * so that this procedure is noop.
+ */
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
+ goto out;
+
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+@@ -1596,12 +1599,12 @@ static int packet_rcv(struct sk_buff *sk
+ int skb_len = skb->len;
+ unsigned int snaplen, res;
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -1720,12 +1723,12 @@ static int tpacket_rcv(struct sk_buff *s
+ struct timespec ts;
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -2595,6 +2598,7 @@ static int packet_create(struct net *net
+ spin_lock_init(&po->bind_lock);
+ mutex_init(&po->pg_vec_lock);
+ po->prot_hook.func = packet_rcv;
++ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK);
+
+ if (sock->type == SOCK_PACKET)
+ po->prot_hook.func = packet_rcv_spkt;
+@@ -3192,6 +3196,16 @@ packet_setsockopt(struct socket *sock, i
+
+ return fanout_add(sk, val & 0xffff, val >> 16);
+ }
++ case PACKET_RECV_TYPE:
++ {
++ unsigned int val;
++ if (optlen != sizeof(val))
++ return -EINVAL;
++ if (copy_from_user(&val, optval, sizeof(val)))
++ return -EFAULT;
++ po->pkt_type = val & ~PACKET_LOOPBACK;
++ return 0;
++ }
+ default:
+ return -ENOPROTOOPT;
+ }
+@@ -3262,6 +3276,13 @@ static int packet_getsockopt(struct sock
+
+ data = &val;
+ break;
++ case PACKET_RECV_TYPE:
++ if (len > sizeof(unsigned int))
++ len = sizeof(unsigned int);
++ val = po->pkt_type;
++
++ data = &val;
++ break;
+ case PACKET_VERSION:
+ if (len > sizeof(int))
+ len = sizeof(int);
diff --git a/target/linux/generic/patches-3.3/640-bridge_no_eap_forward.patch b/target/linux/generic/patches-3.3/640-bridge_no_eap_forward.patch
new file mode 100644
index 000000000..bbdb3bf1d
--- /dev/null
+++ b/target/linux/generic/patches-3.3/640-bridge_no_eap_forward.patch
@@ -0,0 +1,15 @@
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -78,7 +78,11 @@ int br_handle_frame_finish(struct sk_buf
+
+ dst = NULL;
+
+- if (is_broadcast_ether_addr(dest))
++ if (skb->protocol == htons(ETH_P_PAE)) {
++ skb2 = skb;
++ /* Do not forward 802.1x/EAP frames */
++ skb = NULL;
++ } else if (is_broadcast_ether_addr(dest))
+ skb2 = skb;
+ else if (is_multicast_ether_addr(dest)) {
+ mdst = br_mdb_get(br, skb);
diff --git a/target/linux/generic/patches-3.3/641-bridge_always_accept_eap.patch b/target/linux/generic/patches-3.3/641-bridge_always_accept_eap.patch
new file mode 100644
index 000000000..e04199b30
--- /dev/null
+++ b/target/linux/generic/patches-3.3/641-bridge_always_accept_eap.patch
@@ -0,0 +1,11 @@
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -65,7 +65,7 @@ int br_handle_frame_finish(struct sk_buf
+ br_multicast_rcv(br, p, skb))
+ goto drop;
+
+- if (p->state == BR_STATE_LEARNING)
++ if ((p->state == BR_STATE_LEARNING) && skb->protocol != htons(ETH_P_PAE))
+ goto drop;
+
+ BR_INPUT_SKB_CB(skb)->brdev = br->dev;
diff --git a/target/linux/generic/patches-3.3/642-bridge_port_isolate.patch b/target/linux/generic/patches-3.3/642-bridge_port_isolate.patch
new file mode 100644
index 000000000..68434bfc2
--- /dev/null
+++ b/target/linux/generic/patches-3.3/642-bridge_port_isolate.patch
@@ -0,0 +1,103 @@
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -135,6 +135,7 @@ struct net_bridge_port
+
+ unsigned long flags;
+ #define BR_HAIRPIN_MODE 0x00000001
++#define BR_ISOLATE_MODE 0x00000002
+
+ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ u32 multicast_startup_queries_sent;
+--- a/net/bridge/br_sysfs_if.c
++++ b/net/bridge/br_sysfs_if.c
+@@ -149,6 +149,22 @@ static int store_hairpin_mode(struct net
+ static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
+ show_hairpin_mode, store_hairpin_mode);
+
++static ssize_t show_isolate_mode(struct net_bridge_port *p, char *buf)
++{
++ int isolate_mode = (p->flags & BR_ISOLATE_MODE) ? 1 : 0;
++ return sprintf(buf, "%d\n", isolate_mode);
++}
++static ssize_t store_isolate_mode(struct net_bridge_port *p, unsigned long v)
++{
++ if (v)
++ p->flags |= BR_ISOLATE_MODE;
++ else
++ p->flags &= ~BR_ISOLATE_MODE;
++ return 0;
++}
++static BRPORT_ATTR(isolate_mode, S_IRUGO | S_IWUSR,
++ show_isolate_mode, store_isolate_mode);
++
+ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
+ {
+@@ -181,6 +197,7 @@ static struct brport_attribute *brport_a
+ &brport_attr_hold_timer,
+ &brport_attr_flush,
+ &brport_attr_hairpin_mode,
++ &brport_attr_isolate_mode,
+ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ &brport_attr_multicast_router,
+ #endif
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -98,7 +98,8 @@ int br_handle_frame_finish(struct sk_buf
+ skb2 = skb;
+
+ br->dev->stats.multicast++;
+- } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
++ } else if ((p->flags & BR_ISOLATE_MODE) ||
++ ((dst = __br_fdb_get(br, dest)) && dst->is_local)) {
+ skb2 = skb;
+ /* Do not forward the packet since it's local. */
+ skb = NULL;
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -109,7 +109,7 @@ void br_deliver(const struct net_bridge_
+ /* called with rcu_read_lock */
+ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
+ {
+- if (should_deliver(to, skb)) {
++ if (should_deliver(to, skb) && !(to->flags & BR_ISOLATE_MODE)) {
+ if (skb0)
+ deliver_clone(to, skb, __br_forward);
+ else
+@@ -164,7 +164,8 @@ out:
+ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
+ struct sk_buff *skb0,
+ void (*__packet_hook)(const struct net_bridge_port *p,
+- struct sk_buff *skb))
++ struct sk_buff *skb),
++ bool forward)
+ {
+ struct net_bridge_port *p;
+ struct net_bridge_port *prev;
+@@ -172,6 +173,9 @@ static void br_flood(struct net_bridge *
+ prev = NULL;
+
+ list_for_each_entry_rcu(p, &br->port_list, list) {
++ if (forward && (p->flags & BR_ISOLATE_MODE))
++ continue;
++
+ prev = maybe_deliver(prev, p, skb, __packet_hook);
+ if (IS_ERR(prev))
+ goto out;
+@@ -195,14 +199,14 @@ out:
+ /* called with rcu_read_lock */
+ void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
+ {
+- br_flood(br, skb, NULL, __br_deliver);
++ br_flood(br, skb, NULL, __br_deliver, false);
+ }
+
+ /* called under bridge lock */
+ void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+ struct sk_buff *skb2)
+ {
+- br_flood(br, skb, skb2, __br_forward);
++ br_flood(br, skb, skb2, __br_forward, true);
+ }
+
+ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
diff --git a/target/linux/generic/patches-3.3/643-bridge_remove_ipv6_dependency.patch b/target/linux/generic/patches-3.3/643-bridge_remove_ipv6_dependency.patch
new file mode 100644
index 000000000..301b6be44
--- /dev/null
+++ b/target/linux/generic/patches-3.3/643-bridge_remove_ipv6_dependency.patch
@@ -0,0 +1,107 @@
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -91,6 +91,12 @@ extern void addrconf_join_solict(struc
+ extern void addrconf_leave_solict(struct inet6_dev *idev,
+ const struct in6_addr *addr);
+
++extern int (*ipv6_dev_get_saddr_hook)(struct net *net,
++ struct net_device *dev,
++ const struct in6_addr *daddr,
++ unsigned int srcprefs,
++ struct in6_addr *saddr);
++
+ static inline unsigned long addrconf_timeout_fixup(u32 timeout,
+ unsigned unit)
+ {
+--- a/net/bridge/Kconfig
++++ b/net/bridge/Kconfig
+@@ -6,7 +6,6 @@ config BRIDGE
+ tristate "802.1d Ethernet Bridging"
+ select LLC
+ select STP
+- depends on IPV6 || IPV6=n
+ ---help---
+ If you say Y here, then your Linux box will be able to act as an
+ Ethernet bridge, which means that the different Ethernet segments it
+--- a/net/ipv6/Makefile
++++ b/net/ipv6/Makefile
+@@ -40,3 +40,4 @@ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.
+ obj-y += addrconf_core.o exthdrs_core.o
+
+ obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
++obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_stubs.o
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1103,7 +1103,7 @@ out:
+ return ret;
+ }
+
+-int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
++static int __ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
+ const struct in6_addr *daddr, unsigned int prefs,
+ struct in6_addr *saddr)
+ {
+@@ -1228,7 +1228,6 @@ try_nextdev:
+ in6_ifa_put(hiscore->ifa);
+ return 0;
+ }
+-EXPORT_SYMBOL(ipv6_dev_get_saddr);
+
+ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ unsigned char banned_flags)
+@@ -4840,6 +4839,9 @@ int __init addrconf_init(void)
+
+ ipv6_addr_label_rtnl_register();
+
++ BUG_ON(ipv6_dev_get_saddr_hook != NULL);
++ rcu_assign_pointer(ipv6_dev_get_saddr_hook, __ipv6_dev_get_saddr);
++
+ return 0;
+ errout:
+ rtnl_af_unregister(&inet6_ops);
+@@ -4858,6 +4860,9 @@ void addrconf_cleanup(void)
+ struct net_device *dev;
+ int i;
+
++ rcu_assign_pointer(ipv6_dev_get_saddr_hook, NULL);
++ synchronize_rcu();
++
+ unregister_netdevice_notifier(&ipv6_dev_notf);
+ unregister_pernet_subsys(&addrconf_ops);
+ ipv6_addr_label_cleanup();
+--- /dev/null
++++ b/net/ipv6/inet6_stubs.c
+@@ -0,0 +1,33 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++#include <linux/export.h>
++#include <net/ipv6.h>
++
++int (*ipv6_dev_get_saddr_hook)(struct net *net, struct net_device *dev,
++ const struct in6_addr *daddr, unsigned int srcprefs,
++ struct in6_addr *saddr);
++
++EXPORT_SYMBOL(ipv6_dev_get_saddr_hook);
++
++int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
++ const struct in6_addr *daddr, unsigned int prefs,
++ struct in6_addr *saddr)
++{
++ int ret = -EADDRNOTAVAIL;
++ typeof(ipv6_dev_get_saddr_hook) dev_get_saddr;
++
++ rcu_read_lock();
++ dev_get_saddr = rcu_dereference(ipv6_dev_get_saddr_hook);
++
++ if (dev_get_saddr)
++ ret = dev_get_saddr(net, dst_dev, daddr, prefs, saddr);
++
++ rcu_read_unlock();
++ return ret;
++}
++EXPORT_SYMBOL(ipv6_dev_get_saddr);
++
diff --git a/target/linux/generic/patches-3.3/644-bridge_optimize_netfilter_hooks.patch b/target/linux/generic/patches-3.3/644-bridge_optimize_netfilter_hooks.patch
new file mode 100644
index 000000000..dafe42081
--- /dev/null
+++ b/target/linux/generic/patches-3.3/644-bridge_optimize_netfilter_hooks.patch
@@ -0,0 +1,146 @@
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -55,7 +55,7 @@ int br_dev_queue_push_xmit(struct sk_buf
+
+ int br_forward_finish(struct sk_buff *skb)
+ {
+- return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
++ return BR_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+ br_dev_queue_push_xmit);
+
+ }
+@@ -74,7 +74,7 @@ static void __br_deliver(const struct ne
+ return;
+ }
+
+- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ br_forward_finish);
+ }
+
+@@ -91,7 +91,7 @@ static void __br_forward(const struct ne
+ skb->dev = to->dev;
+ skb_forward_csum(skb);
+
+- NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+ br_forward_finish);
+ }
+
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -40,7 +40,7 @@ static int br_pass_frame_up(struct sk_bu
+ indev = skb->dev;
+ skb->dev = brdev;
+
+- return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
++ return BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
+ netif_receive_skb);
+ }
+
+@@ -199,7 +199,7 @@ rx_handler_result_t br_handle_frame(stru
+ }
+
+ /* Deliver packet to local host only */
+- if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
++ if (BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
+ NULL, br_handle_local_finish)) {
+ return RX_HANDLER_CONSUMED; /* consumed by filter */
+ } else {
+@@ -224,7 +224,7 @@ forward:
+ if (!compare_ether_addr(p->br->dev->dev_addr, dest))
+ skb->pkt_type = PACKET_HOST;
+
+- NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_handle_frame_finish);
+ break;
+ default:
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -753,7 +753,7 @@ static void __br_multicast_send_query(st
+ if (port) {
+ __skb_push(skb, sizeof(struct ethhdr));
+ skb->dev = port->dev;
+- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ dev_queue_xmit);
+ } else
+ netif_rx(skb);
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -71,6 +71,15 @@ static int brnf_filter_pppoe_tagged __re
+ #define IS_ARP(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+
++int brnf_call_ebtables __read_mostly = 0;
++EXPORT_SYMBOL_GPL(brnf_call_ebtables);
++
++bool br_netfilter_run_hooks(void)
++{
++ return brnf_call_iptables | brnf_call_ip6tables | brnf_call_arptables |
++ brnf_call_ebtables;
++}
++
+ static inline __be16 vlan_proto(const struct sk_buff *skb)
+ {
+ if (vlan_tx_tag_present(skb))
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -486,15 +486,29 @@ static inline bool br_multicast_is_route
+
+ /* br_netfilter.c */
+ #ifdef CONFIG_BRIDGE_NETFILTER
++extern int brnf_call_ebtables;
+ extern int br_netfilter_init(void);
+ extern void br_netfilter_fini(void);
+ extern void br_netfilter_rtable_init(struct net_bridge *);
++extern bool br_netfilter_run_hooks(void);
+ #else
+ #define br_netfilter_init() (0)
+ #define br_netfilter_fini() do { } while(0)
+ #define br_netfilter_rtable_init(x)
++#define br_netfilter_run_hooks() false
+ #endif
+
++static inline int
++BR_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb,
++ struct net_device *in, struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ if (!br_netfilter_run_hooks())
++ return okfn(skb);
++
++ return NF_HOOK(pf, hook, skb, in, out, okfn);
++}
++
+ /* br_stp.c */
+ extern void br_log_state(const struct net_bridge_port *p);
+ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -52,7 +52,7 @@ static void br_send_bpdu(struct net_brid
+
+ skb_reset_mac_header(skb);
+
+- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ dev_queue_xmit);
+ }
+
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2403,11 +2403,13 @@ static int __init ebtables_init(void)
+ }
+
+ printk(KERN_INFO "Ebtables v2.0 registered\n");
++ brnf_call_ebtables = 1;
+ return 0;
+ }
+
+ static void __exit ebtables_fini(void)
+ {
++ brnf_call_ebtables = 0;
+ nf_unregister_sockopt(&ebt_sockopts);
+ xt_unregister_target(&ebt_standard_target);
+ printk(KERN_INFO "Ebtables v2.0 unregistered\n");
diff --git a/target/linux/generic/patches-3.3/650-pppoe_header_pad.patch b/target/linux/generic/patches-3.3/650-pppoe_header_pad.patch
new file mode 100644
index 000000000..5862dc113
--- /dev/null
+++ b/target/linux/generic/patches-3.3/650-pppoe_header_pad.patch
@@ -0,0 +1,20 @@
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -856,7 +856,7 @@ static int pppoe_sendmsg(struct kiocb *i
+ goto end;
+
+
+- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
++ skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32 + NET_SKB_PAD,
+ 0, GFP_KERNEL);
+ if (!skb) {
+ error = -ENOMEM;
+@@ -864,7 +864,7 @@ static int pppoe_sendmsg(struct kiocb *i
+ }
+
+ /* Reserve space for headers. */
+- skb_reserve(skb, dev->hard_header_len);
++ skb_reserve(skb, dev->hard_header_len + NET_SKB_PAD);
+ skb_reset_network_header(skb);
+
+ skb->dev = dev;
diff --git a/target/linux/generic/patches-3.3/651-wireless_mesh_header.patch b/target/linux/generic/patches-3.3/651-wireless_mesh_header.patch
new file mode 100644
index 000000000..5c83d1948
--- /dev/null
+++ b/target/linux/generic/patches-3.3/651-wireless_mesh_header.patch
@@ -0,0 +1,11 @@
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -145,7 +145,7 @@ static inline bool dev_xmit_complete(int
+ */
+
+ #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
+-# if defined(CONFIG_MAC80211_MESH)
++# if 1 || defined(CONFIG_MAC80211_MESH)
+ # define LL_MAX_HEADER 128
+ # else
+ # define LL_MAX_HEADER 96
diff --git a/target/linux/generic/patches-3.3/652-atm_header_changes.patch b/target/linux/generic/patches-3.3/652-atm_header_changes.patch
new file mode 100644
index 000000000..238d6f81c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/652-atm_header_changes.patch
@@ -0,0 +1,12 @@
+--- a/include/linux/atm.h
++++ b/include/linux/atm.h
+@@ -139,6 +139,9 @@ struct atm_trafprm {
+ int min_pcr; /* minimum PCR in cells per second */
+ int max_cdv; /* maximum CDV in microseconds */
+ int max_sdu; /* maximum SDU in bytes */
++ int scr; /* sustained rate in cells per second */
++ int mbs; /* maximum burst size (MBS) in cells */
++ int cdv; /* Cell delay varition */
+ /* extra params for ABR */
+ unsigned int icr; /* Initial Cell Rate (24-bit) */
+ unsigned int tbe; /* Transient Buffer Exposure (24-bit) */
diff --git a/target/linux/generic/patches-3.3/653-disable_netlink_trim.patch b/target/linux/generic/patches-3.3/653-disable_netlink_trim.patch
new file mode 100644
index 000000000..ffa6502cd
--- /dev/null
+++ b/target/linux/generic/patches-3.3/653-disable_netlink_trim.patch
@@ -0,0 +1,28 @@
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -854,25 +854,7 @@ void netlink_detachskb(struct sock *sk,
+
+ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
+ {
+- int delta;
+-
+ skb_orphan(skb);
+-
+- delta = skb->end - skb->tail;
+- if (delta * 2 < skb->truesize)
+- return skb;
+-
+- if (skb_shared(skb)) {
+- struct sk_buff *nskb = skb_clone(skb, allocation);
+- if (!nskb)
+- return skb;
+- kfree_skb(skb);
+- skb = nskb;
+- }
+-
+- if (!pskb_expand_head(skb, 0, -delta, allocation))
+- skb->truesize -= delta;
+-
+ return skb;
+ }
+
diff --git a/target/linux/generic/patches-3.3/654-avoid_skb_cow_realloc.patch b/target/linux/generic/patches-3.3/654-avoid_skb_cow_realloc.patch
new file mode 100644
index 000000000..5b2537127
--- /dev/null
+++ b/target/linux/generic/patches-3.3/654-avoid_skb_cow_realloc.patch
@@ -0,0 +1,21 @@
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1850,12 +1850,15 @@ static inline int skb_clone_writable(con
+ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
+ int cloned)
+ {
++ unsigned int alloc_headroom = headroom;
+ int delta = 0;
+
+ if (headroom < NET_SKB_PAD)
+- headroom = NET_SKB_PAD;
+- if (headroom > skb_headroom(skb))
+- delta = headroom - skb_headroom(skb);
++ alloc_headroom = NET_SKB_PAD;
++ if (headroom > skb_headroom(skb) ||
++ (cloned && alloc_headroom > skb_headroom(skb))) {
++ delta = alloc_headroom - skb_headroom(skb);
++ }
+
+ if (delta || cloned)
+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
diff --git a/target/linux/generic/patches-3.3/655-increase_skb_pad.patch b/target/linux/generic/patches-3.3/655-increase_skb_pad.patch
new file mode 100644
index 000000000..5d14daadf
--- /dev/null
+++ b/target/linux/generic/patches-3.3/655-increase_skb_pad.patch
@@ -0,0 +1,11 @@
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1558,7 +1558,7 @@ static inline int pskb_network_may_pull(
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+ */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(48, L1_CACHE_BYTES)
+ #endif
+
+ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/target/linux/generic/patches-3.3/700-swconfig.patch b/target/linux/generic/patches-3.3/700-swconfig.patch
new file mode 100644
index 000000000..48cb64372
--- /dev/null
+++ b/target/linux/generic/patches-3.3/700-swconfig.patch
@@ -0,0 +1,29 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -13,6 +13,16 @@ menuconfig PHYLIB
+
+ if PHYLIB
+
++config SWCONFIG
++ tristate "Switch configuration API"
++ ---help---
++ Switch configuration API using netlink. This allows
++ you to configure the VLAN features of certain switches.
++
++config SWCONFIG_LEDS
++ bool "Switch LED trigger support"
++ depends on (SWCONFIG && LEDS_TRIGGERS)
++
+ comment "MII PHY device drivers"
+
+ config MARVELL_PHY
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -3,6 +3,7 @@
+ libphy-objs := phy.o phy_device.o mdio_bus.o
+
+ obj-$(CONFIG_PHYLIB) += libphy.o
++obj-$(CONFIG_SWCONFIG) += swconfig.o
+ obj-$(CONFIG_MARVELL_PHY) += marvell.o
+ obj-$(CONFIG_DAVICOM_PHY) += davicom.o
+ obj-$(CONFIG_CICADA_PHY) += cicada.o
diff --git a/target/linux/generic/patches-3.3/701-phy_extension.patch b/target/linux/generic/patches-3.3/701-phy_extension.patch
new file mode 100644
index 000000000..201c857d2
--- /dev/null
+++ b/target/linux/generic/patches-3.3/701-phy_extension.patch
@@ -0,0 +1,72 @@
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -299,6 +299,50 @@ int phy_ethtool_gset(struct phy_device *
+ }
+ EXPORT_SYMBOL(phy_ethtool_gset);
+
++int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr)
++{
++ u32 cmd;
++ int tmp;
++ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
++ struct ethtool_value edata = { ETHTOOL_GLINK };
++
++ if (get_user(cmd, (u32 *) useraddr))
++ return -EFAULT;
++
++ switch (cmd) {
++ case ETHTOOL_GSET:
++ phy_ethtool_gset(phydev, &ecmd);
++ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
++ return -EFAULT;
++ return 0;
++
++ case ETHTOOL_SSET:
++ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
++ return -EFAULT;
++ return phy_ethtool_sset(phydev, &ecmd);
++
++ case ETHTOOL_NWAY_RST:
++ /* if autoneg is off, it's an error */
++ tmp = phy_read(phydev, MII_BMCR);
++ if (tmp & BMCR_ANENABLE) {
++ tmp |= (BMCR_ANRESTART);
++ phy_write(phydev, MII_BMCR, tmp);
++ return 0;
++ }
++ return -EINVAL;
++
++ case ETHTOOL_GLINK:
++ edata.data = (phy_read(phydev,
++ MII_BMSR) & BMSR_LSTATUS) ? 1 : 0;
++ if (copy_to_user(useraddr, &edata, sizeof(edata)))
++ return -EFAULT;
++ return 0;
++ }
++
++ return -EOPNOTSUPP;
++}
++EXPORT_SYMBOL(phy_ethtool_ioctl);
++
+ /**
+ * phy_mii_ioctl - generic PHY MII ioctl interface
+ * @phydev: the phy_device struct
+@@ -474,7 +518,7 @@ static void phy_force_reduction(struct p
+ int idx;
+
+ idx = phy_find_setting(phydev->speed, phydev->duplex);
+-
++
+ idx++;
+
+ idx = phy_find_valid(idx, phydev->supported);
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -515,6 +515,7 @@ void phy_start_machine(struct phy_device
+ void phy_stop_machine(struct phy_device *phydev);
+ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
++int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr);
+ int phy_mii_ioctl(struct phy_device *phydev,
+ struct ifreq *ifr, int cmd);
+ int phy_start_interrupts(struct phy_device *phydev);
diff --git a/target/linux/generic/patches-3.3/702-phy_add_aneg_done_function.patch b/target/linux/generic/patches-3.3/702-phy_add_aneg_done_function.patch
new file mode 100644
index 000000000..0649afb10
--- /dev/null
+++ b/target/linux/generic/patches-3.3/702-phy_add_aneg_done_function.patch
@@ -0,0 +1,45 @@
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -393,9 +393,18 @@ struct phy_driver {
+ */
+ int (*config_aneg)(struct phy_device *phydev);
+
++ /* Determine if autonegotiation is done */
++ int (*aneg_done)(struct phy_device *phydev);
++
+ /* Determines the negotiated speed and duplex */
+ int (*read_status)(struct phy_device *phydev);
+
++ /*
++ * Update the value in phydev->link to reflect the
++ * current link value
++ */
++ int (*update_link)(struct phy_device *phydev);
++
+ /* Clears any pending interrupts */
+ int (*ack_interrupt)(struct phy_device *phydev);
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -705,6 +705,9 @@ int genphy_update_link(struct phy_device
+ {
+ int status;
+
++ if (phydev->drv->update_link)
++ return phydev->drv->update_link(phydev);
++
+ /* Do a fake read */
+ status = phy_read(phydev, MII_BMSR);
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -106,6 +106,9 @@ static inline int phy_aneg_done(struct p
+ {
+ int retval;
+
++ if (phydev->drv->aneg_done)
++ return phydev->drv->aneg_done(phydev);
++
+ retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
diff --git a/target/linux/generic/patches-3.3/710-phy-add-mdio_register_board_info.patch b/target/linux/generic/patches-3.3/710-phy-add-mdio_register_board_info.patch
new file mode 100644
index 000000000..f4c00ca7f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/710-phy-add-mdio_register_board_info.patch
@@ -0,0 +1,191 @@
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -36,6 +36,8 @@
+ #include <asm/irq.h>
+ #include <asm/uaccess.h>
+
++#include "mdio-boardinfo.h"
++
+ /**
+ * mdiobus_alloc_size - allocate a mii_bus structure
+ * @size: extra amount of memory to allocate for private storage.
+@@ -192,15 +194,33 @@ void mdiobus_free(struct mii_bus *bus)
+ }
+ EXPORT_SYMBOL(mdiobus_free);
+
++static void mdiobus_setup_phydev_from_boardinfo(struct mii_bus *bus,
++ struct phy_device *phydev,
++ struct mdio_board_info *bi)
++{
++ if (strcmp(bus->id, bi->bus_id) ||
++ bi->phy_addr != phydev->addr)
++ return;
++
++ phydev->dev.platform_data = (void *) bi->platform_data;
++}
++
+ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
+ {
+ struct phy_device *phydev;
++ struct mdio_board_entry *be;
+ int err;
+
+ phydev = get_phy_device(bus, addr);
+ if (IS_ERR(phydev) || phydev == NULL)
+ return phydev;
+
++ mutex_lock(&__mdio_board_lock);
++ list_for_each_entry(be, &__mdio_board_list, list)
++ mdiobus_setup_phydev_from_boardinfo(bus, phydev,
++ &be->board_info);
++ mutex_unlock(&__mdio_board_lock);
++
+ err = phy_device_register(phydev);
+ if (err) {
+ phy_device_free(phydev);
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -543,4 +543,22 @@ int __init mdio_bus_init(void);
+ void mdio_bus_exit(void);
+
+ extern struct bus_type mdio_bus_type;
++
++struct mdio_board_info {
++ const char *bus_id;
++ int phy_addr;
++
++ const void *platform_data;
++};
++
++#ifdef CONFIG_MDIO_BOARDINFO
++int mdiobus_register_board_info(const struct mdio_board_info *info, unsigned n);
++#else
++static inline int
++mdiobus_register_board_info(const struct mdio_board_info *info, unsigned n)
++{
++ return 0;
++}
++#endif
++
+ #endif /* __PHY_H */
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -13,6 +13,10 @@ menuconfig PHYLIB
+
+ if PHYLIB
+
++config MDIO_BOARDINFO
++ bool
++ default y
++
+ config SWCONFIG
+ tristate "Switch configuration API"
+ ---help---
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -2,6 +2,8 @@
+
+ libphy-objs := phy.o phy_device.o mdio_bus.o
+
++obj-$(CONFIG_MDIO_BOARDINFO) += mdio-boardinfo.o
++
+ obj-$(CONFIG_PHYLIB) += libphy.o
+ obj-$(CONFIG_SWCONFIG) += swconfig.o
+ obj-$(CONFIG_MARVELL_PHY) += marvell.o
+--- /dev/null
++++ b/drivers/net/phy/mdio-boardinfo.c
+@@ -0,0 +1,58 @@
++/*
++ * mdio-boardinfo.c - collect pre-declarations of PHY devices
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/phy.h>
++#include <linux/slab.h>
++#include <linux/export.h>
++#include <linux/mutex.h>
++#include <linux/phy.h>
++
++#include "mdio-boardinfo.h"
++
++/*
++ * These symbols are exported ONLY FOR the mdio_bus component.
++ * No other users will be supported.
++ */
++
++LIST_HEAD(__mdio_board_list);
++EXPORT_SYMBOL_GPL(__mdio_board_list);
++
++DEFINE_MUTEX(__mdio_board_lock);
++EXPORT_SYMBOL_GPL(__mdio_board_lock);
++
++/**
++ * mdio_register_board_info - register PHY devices for a given board
++ * @info: array of chip descriptors
++ * @n: how many descriptors are provided
++ * Context: can sleep
++ *
++ * The board info passed can safely be __initdata ... but be careful of
++ * any embedded pointers (platform_data, etc), they're copied as-is.
++ */
++int __init
++mdiobus_register_board_info(struct mdio_board_info const *info, unsigned n)
++{
++ struct mdio_board_entry *be;
++ int i;
++
++ be = kzalloc(n * sizeof(*be), GFP_KERNEL);
++ if (!be)
++ return -ENOMEM;
++
++ for (i = 0; i < n; i++, be++, info++) {
++ memcpy(&be->board_info, info, sizeof(*info));
++ mutex_lock(&__mdio_board_lock);
++ list_add_tail(&be->list, &__mdio_board_list);
++ mutex_unlock(&__mdio_board_lock);
++ }
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/net/phy/mdio-boardinfo.h
+@@ -0,0 +1,22 @@
++/*
++ * mdio-boardinfo.h - boardinfo interface internal to the mdio_bus component
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include <linux/mutex.h>
++
++struct mdio_board_entry {
++ struct list_head list;
++ struct mdio_board_info board_info;
++};
++
++/* __mdio_board_lock protects __mdio_board_list
++ * only mdio_bus components are allowed to use these symbols.
++ */
++extern struct mutex __mdio_board_lock;
++extern struct list_head __mdio_board_list;
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -15,7 +15,7 @@ obj-$(CONFIG_MII) += mii.o
+ obj-$(CONFIG_MDIO) += mdio.o
+ obj-$(CONFIG_NET) += Space.o loopback.o
+ obj-$(CONFIG_NETCONSOLE) += netconsole.o
+-obj-$(CONFIG_PHYLIB) += phy/
++obj-y += phy/
+ obj-$(CONFIG_RIONET) += rionet.o
+ obj-$(CONFIG_NET_TEAM) += team/
+ obj-$(CONFIG_TUN) += tun.o
diff --git a/target/linux/generic/patches-3.3/720-phy_adm6996.patch b/target/linux/generic/patches-3.3/720-phy_adm6996.patch
new file mode 100644
index 000000000..c99824a6b
--- /dev/null
+++ b/target/linux/generic/patches-3.3/720-phy_adm6996.patch
@@ -0,0 +1,26 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -106,6 +106,13 @@ config MICREL_PHY
+ ---help---
+ Supports the KSZ9021, VSC8201, KS8001 PHYs.
+
++config ADM6996_PHY
++ tristate "Driver for ADM6996 switches"
++ select SWCONFIG
++ ---help---
++ Currently supports the ADM6996FC and ADM6996M switches.
++ Support for FC is very limited.
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -16,6 +16,7 @@ obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+ obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
++obj-$(CONFIG_ADM6996_PHY) += adm6996.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/target/linux/generic/patches-3.3/721-phy_packets.patch b/target/linux/generic/patches-3.3/721-phy_packets.patch
new file mode 100644
index 000000000..575fbaeaa
--- /dev/null
+++ b/target/linux/generic/patches-3.3/721-phy_packets.patch
@@ -0,0 +1,175 @@
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1078,6 +1078,11 @@ struct net_device {
+ const struct net_device_ops *netdev_ops;
+ const struct ethtool_ops *ethtool_ops;
+
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ void (*eth_mangle_rx)(struct net_device *dev, struct sk_buff *skb);
++ struct sk_buff *(*eth_mangle_tx)(struct net_device *dev, struct sk_buff *skb);
++#endif
++
+ /* Hardware header description */
+ const struct header_ops *header_ops;
+
+@@ -1134,6 +1139,9 @@ struct net_device {
+ void *ax25_ptr; /* AX.25 specific data */
+ struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
+ assign before registering */
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ void *phy_ptr; /* PHY device specific data */
++#endif
+
+ /*
+ * Cache lines mostly used on receive path (including eth_type_trans())
+--- a/include/linux/if.h
++++ b/include/linux/if.h
+@@ -80,6 +80,7 @@
+ * skbs on transmit */
+ #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
+ #define IFF_TEAM_PORT 0x40000 /* device used as team port */
++#define IFF_NO_IP_ALIGN 0x80000 /* do not ip-align allocated rx pkts */
+
+ #define IF_GET_IFACE 0x0001 /* for querying only */
+ #define IF_GET_PROTO 0x0002
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1661,6 +1661,10 @@ extern struct sk_buff *dev_alloc_skb(uns
+ extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
+ unsigned int length, gfp_t gfp_mask);
+
++extern struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
++ unsigned int length, gfp_t gfp);
++
++
+ /**
+ * netdev_alloc_skb - allocate an skbuff for rx on a specific device
+ * @dev: network device to receive on
+@@ -1680,16 +1684,6 @@ static inline struct sk_buff *netdev_all
+ return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
+ }
+
+-static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
+- unsigned int length, gfp_t gfp)
+-{
+- struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
+-
+- if (NET_IP_ALIGN && skb)
+- skb_reserve(skb, NET_IP_ALIGN);
+- return skb;
+-}
+-
+ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length)
+ {
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -23,6 +23,12 @@ menuconfig NET
+
+ if NET
+
++config ETHERNET_PACKET_MANGLE
++ bool
++ help
++ This option can be selected by phy drivers that need to mangle
++ packets going in or out of an ethernet device.
++
+ config WANT_COMPAT_NETLINK_MESSAGES
+ bool
+ help
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2267,9 +2267,19 @@ int dev_hard_start_xmit(struct sk_buff *
+ }
+ }
+
+- skb_len = skb->len;
+- rc = ops->ndo_start_xmit(skb, dev);
+- trace_net_dev_xmit(skb, rc, dev, skb_len);
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ if (!dev->eth_mangle_tx ||
++ (skb = dev->eth_mangle_tx(dev, skb)) != NULL)
++#else
++ if (1)
++#endif
++ {
++ skb_len = skb->len;
++ rc = ops->ndo_start_xmit(skb, dev);
++ trace_net_dev_xmit(skb, rc, dev, skb_len);
++ } else {
++ rc = NETDEV_TX_OK;
++ }
+ if (rc == NETDEV_TX_OK)
+ txq_trans_update(txq);
+ return rc;
+@@ -2289,9 +2299,19 @@ gso:
+ if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
+ skb_dst_drop(nskb);
+
+- skb_len = nskb->len;
+- rc = ops->ndo_start_xmit(nskb, dev);
+- trace_net_dev_xmit(nskb, rc, dev, skb_len);
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ if (!dev->eth_mangle_tx ||
++ (nskb = dev->eth_mangle_tx(dev, nskb)) != NULL)
++#else
++ if (1)
++#endif
++ {
++ skb_len = nskb->len;
++ rc = ops->ndo_start_xmit(nskb, dev);
++ trace_net_dev_xmit(nskb, rc, dev, skb_len);
++ } else {
++ rc = NETDEV_TX_OK;
++ }
+ if (unlikely(rc != NETDEV_TX_OK)) {
+ if (rc & ~NETDEV_TX_MASK)
+ goto out_kfree_gso_skb;
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -58,6 +58,7 @@
+ #include <linux/scatterlist.h>
+ #include <linux/errqueue.h>
+ #include <linux/prefetch.h>
++#include <linux/if.h>
+
+ #include <net/protocol.h>
+ #include <net/dst.h>
+@@ -320,6 +321,22 @@ struct sk_buff *__netdev_alloc_skb(struc
+ }
+ EXPORT_SYMBOL(__netdev_alloc_skb);
+
++struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
++ unsigned int length, gfp_t gfp)
++{
++ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
++
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ if (dev->priv_flags & IFF_NO_IP_ALIGN)
++ return skb;
++#endif
++
++ if (NET_IP_ALIGN && skb)
++ skb_reserve(skb, NET_IP_ALIGN);
++ return skb;
++}
++EXPORT_SYMBOL(__netdev_alloc_skb_ip_align);
++
+ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+ int size)
+ {
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -160,6 +160,12 @@ __be16 eth_type_trans(struct sk_buff *sk
+ struct ethhdr *eth;
+
+ skb->dev = dev;
++
++#ifdef CONFIG_ETHERNET_PACKET_MANGLE
++ if (dev->eth_mangle_rx)
++ dev->eth_mangle_rx(dev, skb);
++#endif
++
+ skb_reset_mac_header(skb);
+ skb_pull_inline(skb, ETH_HLEN);
+ eth = eth_hdr(skb);
diff --git a/target/linux/generic/patches-3.3/722-phy_mvswitch.patch b/target/linux/generic/patches-3.3/722-phy_mvswitch.patch
new file mode 100644
index 000000000..8a456117f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/722-phy_mvswitch.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -113,6 +113,10 @@ config ADM6996_PHY
+ Currently supports the ADM6996FC and ADM6996M switches.
+ Support for FC is very limited.
+
++config MVSWITCH_PHY
++ tristate "Driver for Marvell 88E6060 switches"
++ select ETHERNET_PACKET_MANGLE
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -17,6 +17,7 @@ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+ obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+ obj-$(CONFIG_ADM6996_PHY) += adm6996.o
++obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/target/linux/generic/patches-3.3/723-phy_ip175c.patch b/target/linux/generic/patches-3.3/723-phy_ip175c.patch
new file mode 100644
index 000000000..8eff10cf4
--- /dev/null
+++ b/target/linux/generic/patches-3.3/723-phy_ip175c.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -117,6 +117,10 @@ config MVSWITCH_PHY
+ tristate "Driver for Marvell 88E6060 switches"
+ select ETHERNET_PACKET_MANGLE
+
++config IP17XX_PHY
++ tristate "Driver for IC+ IP17xx switches"
++ select SWCONFIG
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -18,6 +18,7 @@ obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+ obj-$(CONFIG_ADM6996_PHY) += adm6996.o
+ obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
++obj-$(CONFIG_IP17XX_PHY) += ip17xx.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/target/linux/generic/patches-3.3/724-phy_ar8216.patch b/target/linux/generic/patches-3.3/724-phy_ar8216.patch
new file mode 100644
index 000000000..eed6e2b92
--- /dev/null
+++ b/target/linux/generic/patches-3.3/724-phy_ar8216.patch
@@ -0,0 +1,24 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -121,6 +121,11 @@ config IP17XX_PHY
+ tristate "Driver for IC+ IP17xx switches"
+ select SWCONFIG
+
++config AR8216_PHY
++ tristate "Driver for Atheros AR8216 switches"
++ select ETHERNET_PACKET_MANGLE
++ select SWCONFIG
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -20,6 +20,7 @@ obj-$(CONFIG_ADM6996_PHY) += adm6996.o
+ obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
+ obj-$(CONFIG_IP17XX_PHY) += ip17xx.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
++obj-$(CONFIG_AR8216_PHY) += ar8216.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/target/linux/generic/patches-3.3/725-phy_rtl8306.patch b/target/linux/generic/patches-3.3/725-phy_rtl8306.patch
new file mode 100644
index 000000000..db0ee6582
--- /dev/null
+++ b/target/linux/generic/patches-3.3/725-phy_rtl8306.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -126,6 +126,10 @@ config AR8216_PHY
+ select ETHERNET_PACKET_MANGLE
+ select SWCONFIG
+
++config RTL8306_PHY
++ tristate "Driver for Realtek RTL8306S switches"
++ select SWCONFIG
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -21,6 +21,7 @@ obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
+ obj-$(CONFIG_IP17XX_PHY) += ip17xx.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_AR8216_PHY) += ar8216.o
++obj-$(CONFIG_RTL8306_PHY) += rtl8306.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/target/linux/generic/patches-3.3/726-phy_rtl8366.patch b/target/linux/generic/patches-3.3/726-phy_rtl8366.patch
new file mode 100644
index 000000000..d258e4b56
--- /dev/null
+++ b/target/linux/generic/patches-3.3/726-phy_rtl8366.patch
@@ -0,0 +1,45 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -168,6 +168,30 @@ config MDIO_OCTEON
+
+ If in doubt, say Y.
+
++config RTL8366_SMI
++ tristate "Driver for the RTL8366 SMI interface"
++ depends on GENERIC_GPIO
++ ---help---
++ This module implements the SMI interface protocol which is used
++ by some RTL8366 ethernet switch devices via the generic GPIO API.
++
++if RTL8366_SMI
++
++config RTL8366_SMI_DEBUG_FS
++ bool "RTL8366 SMI interface debugfs support"
++ depends on DEBUG_FS
++ default n
++
++config RTL8366S_PHY
++ tristate "Driver for the Realtek RTL8366S switch"
++ select SWCONFIG
++
++config RTL8366RB_PHY
++ tristate "Driver for the Realtek RTL8366RB switch"
++ select SWCONFIG
++
++endif # RTL8366_SMI
++
+ endif # PHYLIB
+
+ config MICREL_KS8995MA
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -22,6 +22,9 @@ obj-$(CONFIG_IP17XX_PHY) += ip17xx.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_AR8216_PHY) += ar8216.o
+ obj-$(CONFIG_RTL8306_PHY) += rtl8306.o
++obj-$(CONFIG_RTL8366_SMI) += rtl8366_smi.o
++obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o
++obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/target/linux/generic/patches-3.3/727-phy-rtl8367.patch b/target/linux/generic/patches-3.3/727-phy-rtl8367.patch
new file mode 100644
index 000000000..a24f497c9
--- /dev/null
+++ b/target/linux/generic/patches-3.3/727-phy-rtl8367.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -190,6 +190,10 @@ config RTL8366RB_PHY
+ tristate "Driver for the Realtek RTL8366RB switch"
+ select SWCONFIG
+
++config RTL8367_PHY
++ tristate "Driver for the Realtek RTL8367R/M switches"
++ select SWCONFIG
++
+ endif # RTL8366_SMI
+
+ endif # PHYLIB
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -25,6 +25,7 @@ obj-$(CONFIG_RTL8306_PHY) += rtl8306.o
+ obj-$(CONFIG_RTL8366_SMI) += rtl8366_smi.o
+ obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o
+ obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o
++obj-$(CONFIG_RTL8367_PHY) += rtl8367.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
diff --git a/target/linux/generic/patches-3.3/728-phy-micrel.patch b/target/linux/generic/patches-3.3/728-phy-micrel.patch
new file mode 100644
index 000000000..c41d0309c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/728-phy-micrel.patch
@@ -0,0 +1,24 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -130,6 +130,11 @@ config RTL8306_PHY
+ tristate "Driver for Realtek RTL8306S switches"
+ select SWCONFIG
+
++config MICREL_PHY
++ tristate "Drivers for Micrel/Kendin PHYs"
++ ---help---
++ Currently has a driver for the KSZ8041
++
+ config FIXED_PHY
+ bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB=y
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -27,6 +27,7 @@ obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o
+ obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o
+ obj-$(CONFIG_RTL8367_PHY) += rtl8367.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
++obj-$(CONFIG_MICREL_PHY) += micrel.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
+ obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
diff --git a/target/linux/generic/patches-3.3/729-phy-tantos.patch b/target/linux/generic/patches-3.3/729-phy-tantos.patch
new file mode 100644
index 000000000..0466d94b4
--- /dev/null
+++ b/target/linux/generic/patches-3.3/729-phy-tantos.patch
@@ -0,0 +1,21 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -206,3 +206,8 @@ endif # PHYLIB
+ config MICREL_KS8995MA
+ tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+ depends on SPI
++
++config PSB6970_PHY
++ tristate "Lantiq XWAY Tantos (PSB6970) Ethernet switch"
++ select SWCONFIG
++ select ETHERNET_PACKET_MANGLE
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -28,6 +28,7 @@ obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb
+ obj-$(CONFIG_RTL8367_PHY) += rtl8367.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_MICREL_PHY) += micrel.o
++obj-$(CONFIG_PSB6970_PHY) += psb6970.o
+ obj-$(CONFIG_FIXED_PHY) += fixed.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
+ obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
diff --git a/target/linux/generic/patches-3.3/750-hostap_txpower.patch b/target/linux/generic/patches-3.3/750-hostap_txpower.patch
new file mode 100644
index 000000000..8e2ec9afc
--- /dev/null
+++ b/target/linux/generic/patches-3.3/750-hostap_txpower.patch
@@ -0,0 +1,154 @@
+--- a/drivers/net/wireless/hostap/hostap_ap.c
++++ b/drivers/net/wireless/hostap/hostap_ap.c
+@@ -2340,13 +2340,13 @@ int prism2_ap_get_sta_qual(local_info_t
+ addr[count].sa_family = ARPHRD_ETHER;
+ memcpy(addr[count].sa_data, sta->addr, ETH_ALEN);
+ if (sta->last_rx_silence == 0)
+- qual[count].qual = sta->last_rx_signal < 27 ?
+- 0 : (sta->last_rx_signal - 27) * 92 / 127;
++ qual[count].qual = (sta->last_rx_signal - 156) == 0 ?
++ 0 : (sta->last_rx_signal - 156) * 92 / 64;
+ else
+- qual[count].qual = sta->last_rx_signal -
+- sta->last_rx_silence - 35;
+- qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
+- qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
++ qual[count].qual = (sta->last_rx_signal -
++ sta->last_rx_silence) * 92 / 64;
++ qual[count].level = sta->last_rx_signal;
++ qual[count].noise = sta->last_rx_silence;
+ qual[count].updated = sta->last_rx_updated;
+
+ sta->last_rx_updated = IW_QUAL_DBM;
+@@ -2412,13 +2412,13 @@ int prism2_ap_translate_scan(struct net_
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVQUAL;
+ if (sta->last_rx_silence == 0)
+- iwe.u.qual.qual = sta->last_rx_signal < 27 ?
+- 0 : (sta->last_rx_signal - 27) * 92 / 127;
++ iwe.u.qual.qual = (sta->last_rx_signal -156) == 0 ?
++ 0 : (sta->last_rx_signal - 156) * 92 / 64;
+ else
+- iwe.u.qual.qual = sta->last_rx_signal -
+- sta->last_rx_silence - 35;
+- iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
+- iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
++ iwe.u.qual.qual = (sta->last_rx_signal -
++ sta->last_rx_silence) * 92 / 64;
++ iwe.u.qual.level = sta->last_rx_signal;
++ iwe.u.qual.noise = sta->last_rx_silence;
+ iwe.u.qual.updated = sta->last_rx_updated;
+ iwe.len = IW_EV_QUAL_LEN;
+ current_ev = iwe_stream_add_event(info, current_ev, end_buf,
+--- a/drivers/net/wireless/hostap/hostap_config.h
++++ b/drivers/net/wireless/hostap/hostap_config.h
+@@ -45,4 +45,9 @@
+ */
+ /* #define PRISM2_NO_STATION_MODES */
+
++/* Enable TX power Setting functions
++ * (min att = -128 , max att = 127)
++ */
++#define RAW_TXPOWER_SETTING
++
+ #endif /* HOSTAP_CONFIG_H */
+--- a/drivers/net/wireless/hostap/hostap.h
++++ b/drivers/net/wireless/hostap/hostap.h
+@@ -90,6 +90,7 @@ extern const struct iw_handler_def hosta
+ extern const struct ethtool_ops prism2_ethtool_ops;
+
+ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
++int hostap_restore_power(struct net_device *dev);
+
+
+ #endif /* HOSTAP_H */
+--- a/drivers/net/wireless/hostap/hostap_hw.c
++++ b/drivers/net/wireless/hostap/hostap_hw.c
+@@ -932,6 +932,7 @@ static int hfa384x_set_rid(struct net_de
+ prism2_hw_reset(dev);
+ }
+
++ hostap_restore_power(dev);
+ return res;
+ }
+
+--- a/drivers/net/wireless/hostap/hostap_info.c
++++ b/drivers/net/wireless/hostap/hostap_info.c
+@@ -434,6 +434,11 @@ static void handle_info_queue_linkstatus
+ }
+
+ /* Get BSSID if we have a valid AP address */
++
++ if ( val == HFA384X_LINKSTATUS_CONNECTED ||
++ val == HFA384X_LINKSTATUS_DISCONNECTED )
++ hostap_restore_power(local->dev);
++
+ if (connected) {
+ netif_carrier_on(local->dev);
+ netif_carrier_on(local->ddev);
+--- a/drivers/net/wireless/hostap/hostap_ioctl.c
++++ b/drivers/net/wireless/hostap/hostap_ioctl.c
+@@ -1478,23 +1478,20 @@ static int prism2_txpower_hfa386x_to_dBm
+ val = 255;
+
+ tmp = val;
+- tmp >>= 2;
+
+- return -12 - tmp;
++ return tmp;
+ }
+
+ static u16 prism2_txpower_dBm_to_hfa386x(int val)
+ {
+ signed char tmp;
+
+- if (val > 20)
+- return 128;
+- else if (val < -43)
++ if (val > 127)
+ return 127;
++ else if (val < -128)
++ return 128;
+
+ tmp = val;
+- tmp = -12 - tmp;
+- tmp <<= 2;
+
+ return (unsigned char) tmp;
+ }
+@@ -4057,3 +4054,35 @@ int hostap_ioctl(struct net_device *dev,
+
+ return ret;
+ }
++
++/* BUG FIX: Restore power setting value when lost due to F/W bug */
++
++int hostap_restore_power(struct net_device *dev)
++{
++ struct hostap_interface *iface = netdev_priv(dev);
++ local_info_t *local = iface->local;
++
++ u16 val;
++ int ret = 0;
++
++ if (local->txpower_type == PRISM2_TXPOWER_OFF) {
++ val = 0xff; /* use all standby and sleep modes */
++ ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
++ HFA386X_CR_A_D_TEST_MODES2,
++ &val, NULL);
++ }
++
++#ifdef RAW_TXPOWER_SETTING
++ if (local->txpower_type == PRISM2_TXPOWER_FIXED) {
++ val = HFA384X_TEST_CFG_BIT_ALC;
++ local->func->cmd(dev, HFA384X_CMDCODE_TEST |
++ (HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL);
++ val = prism2_txpower_dBm_to_hfa386x(local->txpower);
++ ret = (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
++ HFA386X_CR_MANUAL_TX_POWER, &val, NULL));
++ }
++#endif /* RAW_TXPOWER_SETTING */
++ return (ret ? -EOPNOTSUPP : 0);
++}
++
++EXPORT_SYMBOL(hostap_restore_power);
diff --git a/target/linux/generic/patches-3.3/810-pci_disable_common_quirks.patch b/target/linux/generic/patches-3.3/810-pci_disable_common_quirks.patch
new file mode 100644
index 000000000..480215718
--- /dev/null
+++ b/target/linux/generic/patches-3.3/810-pci_disable_common_quirks.patch
@@ -0,0 +1,43 @@
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -51,6 +51,12 @@ config XEN_PCIDEV_FRONTEND
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+
++config PCI_DISABLE_COMMON_QUIRKS
++ bool "PCI disable common quirks"
++ depends on PCI
++ help
++ If you don't know what to do here, say N.
++
+ config HT_IRQ
+ bool "Interrupts on hypertransport devices"
+ default y
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -105,6 +105,7 @@ static void __devinit quirk_mmio_always_
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_mmio_always_on);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ /* The Mellanox Tavor device gives false positive parity errors
+ * Mark this device with a broken_parity_status, to allow
+ * PCI scanning code to "skip" this now blacklisted device.
+@@ -1990,7 +1991,9 @@ static void __devinit fixup_rev1_53c810(
+ }
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ /* Enable 1k I/O space granularity on the Intel P64H2 */
+ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
+ {
+@@ -2666,6 +2669,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AT
+ quirk_msi_intx_disable_bug);
+
+ #endif /* CONFIG_PCI_MSI */
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
+
+ /* Allow manual resource allocation for PCI hotplug bridges
+ * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
diff --git a/target/linux/generic/patches-3.3/811-pci_disable_usb_common_quirks.patch b/target/linux/generic/patches-3.3/811-pci_disable_usb_common_quirks.patch
new file mode 100644
index 000000000..7bf8eaf1e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/811-pci_disable_usb_common_quirks.patch
@@ -0,0 +1,38 @@
+
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -432,6 +432,8 @@ reset_needed:
+ }
+ EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
+ {
+ u16 cmd;
+@@ -928,3 +930,5 @@ static void __devinit quirk_usb_early_ha
+ pci_disable_device(pdev);
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
++
++#endif
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -8,9 +8,17 @@ int usb_amd_find_chipset_info(void);
+ void usb_amd_dev_put(void);
+ void usb_amd_quirk_pll_disable(void);
+ void usb_amd_quirk_pll_enable(void);
++#if !defined(CONFIG_PCI_DISABLE_COMMON_QUIRKS)
+ bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
+ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
+ #else
++static inline bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
++{
++ return false;
++}
++static inline void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) {}
++#endif
++#else
+ static inline void usb_amd_quirk_pll_disable(void) {}
+ static inline void usb_amd_quirk_pll_enable(void) {}
+ static inline void usb_amd_dev_put(void) {}
diff --git a/target/linux/generic/patches-3.3/820-usb_add_usb_find_device_by_name.patch b/target/linux/generic/patches-3.3/820-usb_add_usb_find_device_by_name.patch
new file mode 100644
index 000000000..ee50ff90e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/820-usb_add_usb_find_device_by_name.patch
@@ -0,0 +1,84 @@
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -652,6 +652,71 @@ int __usb_get_extra_descriptor(char *buf
+ }
+ EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor);
+
++static struct usb_device *match_device_name(struct usb_device *dev,
++ const char *name)
++{
++ struct usb_device *ret_dev = NULL;
++ int child;
++
++ dev_dbg(&dev->dev, "check for name %s ...\n", name);
++
++ /* see if this device matches */
++ if (strcmp(dev_name(&dev->dev), name) == 0 ) {
++ dev_dbg(&dev->dev, "matched this device!\n");
++ ret_dev = usb_get_dev(dev);
++ goto exit;
++ }
++
++ /* look through all of the children of this device */
++ for (child = 0; child < dev->maxchild; ++child) {
++ if (dev->children[child]) {
++ usb_lock_device(dev->children[child]);
++ ret_dev = match_device_name(dev->children[child], name);
++ usb_unlock_device(dev->children[child]);
++ if (ret_dev)
++ goto exit;
++ }
++ }
++exit:
++ return ret_dev;
++}
++
++/**
++ * usb_find_device_by_name - find a specific usb device in the system
++ * @name: the name of the device to find
++ *
++ * Returns a pointer to a struct usb_device if such a specified usb
++ * device is present in the system currently. The usage count of the
++ * device will be incremented if a device is found. Make sure to call
++ * usb_put_dev() when the caller is finished with the device.
++ *
++ * If a device with the specified bus id is not found, NULL is returned.
++ */
++struct usb_device *usb_find_device_by_name(const char *name)
++{
++ struct list_head *buslist;
++ struct usb_bus *bus;
++ struct usb_device *dev = NULL;
++
++ mutex_lock(&usb_bus_list_lock);
++ for (buslist = usb_bus_list.next;
++ buslist != &usb_bus_list;
++ buslist = buslist->next) {
++ bus = container_of(buslist, struct usb_bus, bus_list);
++ if (!bus->root_hub)
++ continue;
++ usb_lock_device(bus->root_hub);
++ dev = match_device_name(bus->root_hub, name);
++ usb_unlock_device(bus->root_hub);
++ if (dev)
++ goto exit;
++ }
++exit:
++ mutex_unlock(&usb_bus_list_lock);
++ return dev;
++}
++EXPORT_SYMBOL_GPL(usb_find_device_by_name);
++
+ /**
+ * usb_alloc_coherent - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP
+ * @dev: device the buffer will be used with
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -531,6 +531,7 @@ extern int usb_lock_device_for_reset(str
+ extern int usb_reset_device(struct usb_device *dev);
+ extern void usb_queue_reset_device(struct usb_interface *dev);
+
++extern struct usb_device *usb_find_device_by_name(const char *name);
+
+ /* USB autosuspend and autoresume */
+ #ifdef CONFIG_USB_SUSPEND
diff --git a/target/linux/generic/patches-3.3/830-ledtrig_morse.patch b/target/linux/generic/patches-3.3/830-ledtrig_morse.patch
new file mode 100644
index 000000000..3283807f2
--- /dev/null
+++ b/target/linux/generic/patches-3.3/830-ledtrig_morse.patch
@@ -0,0 +1,28 @@
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -480,4 +480,8 @@ config LEDS_TRIGGER_DEFAULT_ON
+ comment "iptables trigger is under Netfilter config (LED target)"
+ depends on LEDS_TRIGGERS
+
++config LEDS_TRIGGER_MORSE
++ tristate "LED Morse Trigger"
++ depends on LEDS_TRIGGERS
++
+ endif # NEW_LEDS
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -57,3 +57,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) +=
+ obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
+ obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
+ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
++obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o
+--- a/drivers/leds/ledtrig-morse.c
++++ b/drivers/leds/ledtrig-morse.c
+@@ -26,7 +26,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
+-#include <linux/sysdev.h>
+ #include <linux/timer.h>
+ #include <linux/ctype.h>
+ #include <linux/leds.h>
diff --git a/target/linux/generic/patches-3.3/831-ledtrig_netdev.patch b/target/linux/generic/patches-3.3/831-ledtrig_netdev.patch
new file mode 100644
index 000000000..7f94b1cd3
--- /dev/null
+++ b/target/linux/generic/patches-3.3/831-ledtrig_netdev.patch
@@ -0,0 +1,51 @@
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -484,4 +484,11 @@ config LEDS_TRIGGER_MORSE
+ tristate "LED Morse Trigger"
+ depends on LEDS_TRIGGERS
+
++config LEDS_TRIGGER_NETDEV
++ tristate "LED Netdev Trigger"
++ depends on NET && LEDS_TRIGGERS
++ help
++ This allows LEDs to be controlled by network device activity.
++ If unsure, say Y.
++
+ endif # NEW_LEDS
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -58,3 +58,4 @@ obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) +=
+ obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
+ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+ obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o
++obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
+--- a/drivers/leds/ledtrig-netdev.c
++++ b/drivers/leds/ledtrig-netdev.c
+@@ -22,7 +22,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
+-#include <linux/sysdev.h>
+ #include <linux/netdevice.h>
+ #include <linux/timer.h>
+ #include <linux/ctype.h>
+@@ -307,8 +306,9 @@ done:
+ static void netdev_trig_timer(unsigned long arg)
+ {
+ struct led_netdev_data *trigger_data = (struct led_netdev_data *)arg;
+- const struct net_device_stats *dev_stats;
++ struct rtnl_link_stats64 *dev_stats;
+ unsigned new_activity;
++ struct rtnl_link_stats64 temp;
+
+ write_lock(&trigger_data->lock);
+
+@@ -318,7 +318,7 @@ static void netdev_trig_timer(unsigned l
+ goto no_restart;
+ }
+
+- dev_stats = dev_get_stats(trigger_data->net_dev);
++ dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
+ new_activity =
+ ((trigger_data->mode & MODE_TX) ? dev_stats->tx_packets : 0) +
+ ((trigger_data->mode & MODE_RX) ? dev_stats->rx_packets : 0);
diff --git a/target/linux/generic/patches-3.3/832-ledtrig_usbdev.patch b/target/linux/generic/patches-3.3/832-ledtrig_usbdev.patch
new file mode 100644
index 000000000..8933497e8
--- /dev/null
+++ b/target/linux/generic/patches-3.3/832-ledtrig_usbdev.patch
@@ -0,0 +1,31 @@
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -491,4 +491,11 @@ config LEDS_TRIGGER_NETDEV
+ This allows LEDs to be controlled by network device activity.
+ If unsure, say Y.
+
++config LEDS_TRIGGER_USBDEV
++ tristate "LED USB device Trigger"
++ depends on USB && LEDS_TRIGGERS
++ help
++ This allows LEDs to be controlled by the presence/activity of
++ an USB device. If unsure, say N.
++
+ endif # NEW_LEDS
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -59,3 +59,4 @@ obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledt
+ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+ obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o
+ obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
++obj-$(CONFIG_LEDS_TRIGGER_USBDEV) += ledtrig-usbdev.o
+--- a/drivers/leds/ledtrig-usbdev.c
++++ b/drivers/leds/ledtrig-usbdev.c
+@@ -24,7 +24,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
+-#include <linux/sysdev.h>
+ #include <linux/timer.h>
+ #include <linux/ctype.h>
+ #include <linux/slab.h>
diff --git a/target/linux/generic/patches-3.3/835-gpiodev.patch b/target/linux/generic/patches-3.3/835-gpiodev.patch
new file mode 100644
index 000000000..f41d5a685
--- /dev/null
+++ b/target/linux/generic/patches-3.3/835-gpiodev.patch
@@ -0,0 +1,27 @@
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -511,6 +511,14 @@ config NSC_GPIO
+ pc8736x_gpio drivers. If those drivers are built as
+ modules, this one will be too, named nsc_gpio
+
++config GPIO_DEVICE
++ tristate "GPIO device support"
++ depends on GENERIC_GPIO
++ help
++ Say Y to enable Linux GPIO device support. This allows control of
++ GPIO pins using a character device
++
++
+ config RAW_DRIVER
+ tristate "RAW driver (/dev/raw/rawN)"
+ depends on BLOCK
+--- a/drivers/char/Makefile
++++ b/drivers/char/Makefile
+@@ -47,6 +47,7 @@ obj-$(CONFIG_NWFLASH) += nwflash.o
+ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+ obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
+ obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
++obj-$(CONFIG_GPIO_DEVICE) += gpio_dev.o
+ obj-$(CONFIG_GPIO_TB0219) += tb0219.o
+ obj-$(CONFIG_TELCLOCK) += tlclk.o
+
diff --git a/target/linux/generic/patches-3.3/840-rtc7301.patch b/target/linux/generic/patches-3.3/840-rtc7301.patch
new file mode 100644
index 000000000..35dd3b8bd
--- /dev/null
+++ b/target/linux/generic/patches-3.3/840-rtc7301.patch
@@ -0,0 +1,250 @@
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -719,6 +719,15 @@ config RTC_DRV_NUC900
+ If you say yes here you get support for the RTC subsystem of the
+ NUC910/NUC920 used in embedded systems.
+
++config RTC_DRV_RTC7301
++ tristate "Epson RTC-7301 SF/DG"
++ help
++ If you say Y here you will get support for the
++ Epson RTC-7301 SF/DG RTC chips.
++
++ This driver can also be built as a module. If so, the module
++ will be called rtc-7301.
++
+ comment "on-CPU RTC drivers"
+
+ config RTC_DRV_DAVINCI
+--- a/drivers/rtc/Makefile
++++ b/drivers/rtc/Makefile
+@@ -86,6 +86,7 @@ obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c
+ obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
+ obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
+ obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
++obj-$(CONFIG_RTC_DRV_RTC7301) += rtc-rtc7301.o
+ obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
+ obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
+ obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
+--- /dev/null
++++ b/drivers/rtc/rtc-rtc7301.c
+@@ -0,0 +1,219 @@
++/*
++ * Driver for Epson RTC-7301SF/DG
++ *
++ * Copyright (C) 2009 Jose Vasconcellos
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/rtc.h>
++#include <linux/platform_device.h>
++#include <linux/io.h>
++#include <linux/delay.h>
++#include <linux/bcd.h>
++
++#define RTC_NAME "rtc7301"
++#define RTC_VERSION "0.1"
++
++/* Epson RTC-7301 register addresses */
++#define RTC7301_SEC 0x00
++#define RTC7301_SEC10 0x01
++#define RTC7301_MIN 0x02
++#define RTC7301_MIN10 0x03
++#define RTC7301_HOUR 0x04
++#define RTC7301_HOUR10 0x05
++#define RTC7301_WEEKDAY 0x06
++#define RTC7301_DAY 0x07
++#define RTC7301_DAY10 0x08
++#define RTC7301_MON 0x09
++#define RTC7301_MON10 0x0A
++#define RTC7301_YEAR 0x0B
++#define RTC7301_YEAR10 0x0C
++#define RTC7301_YEAR100 0x0D
++#define RTC7301_YEAR1000 0x0E
++#define RTC7301_CTRLREG 0x0F
++
++static uint8_t __iomem *rtc7301_base;
++
++#define read_reg(offset) (readb(rtc7301_base + offset) & 0xf)
++#define write_reg(offset, data) writeb(data, rtc7301_base + (offset))
++
++#define rtc7301_isbusy() (read_reg(RTC7301_CTRLREG) & 1)
++
++static void rtc7301_init_settings(void)
++{
++ int i;
++
++ write_reg(RTC7301_CTRLREG, 2);
++ write_reg(RTC7301_YEAR1000, 2);
++ udelay(122);
++
++ /* bank 1 */
++ write_reg(RTC7301_CTRLREG, 6);
++ for (i=0; i<15; i++)
++ write_reg(i, 0);
++
++ /* bank 2 */
++ write_reg(RTC7301_CTRLREG, 14);
++ for (i=0; i<15; i++)
++ write_reg(i, 0);
++ write_reg(RTC7301_CTRLREG, 0);
++}
++
++static int rtc7301_get_datetime(struct device *dev, struct rtc_time *dt)
++{
++ int cnt;
++ uint8_t buf[16];
++
++ cnt = 0;
++ while (rtc7301_isbusy()) {
++ udelay(244);
++ if (cnt++ > 100) {
++ dev_err(dev, "%s: timeout error %x\n", __func__, rtc7301_base[RTC7301_CTRLREG]);
++ return -EIO;
++ }
++ }
++
++ for (cnt=0; cnt<16; cnt++)
++ buf[cnt] = read_reg(cnt);
++
++ if (buf[RTC7301_SEC10] & 8) {
++ dev_err(dev, "%s: RTC not set\n", __func__);
++ return -EINVAL;
++ }
++
++ memset(dt, 0, sizeof(*dt));
++
++ dt->tm_sec = buf[RTC7301_SEC] + buf[RTC7301_SEC10]*10;
++ dt->tm_min = buf[RTC7301_MIN] + buf[RTC7301_MIN10]*10;
++ dt->tm_hour = buf[RTC7301_HOUR] + buf[RTC7301_HOUR10]*10;
++
++ dt->tm_mday = buf[RTC7301_DAY] + buf[RTC7301_DAY10]*10;
++ dt->tm_mon = buf[RTC7301_MON] + buf[RTC7301_MON10]*10 - 1;
++ dt->tm_year = buf[RTC7301_YEAR] + buf[RTC7301_YEAR10]*10 +
++ buf[RTC7301_YEAR100]*100 +
++ ((buf[RTC7301_YEAR1000] & 3)*1000) - 1900;
++
++ /* the rtc device may contain illegal values on power up
++ * according to the data sheet. make sure they are valid.
++ */
++
++ return rtc_valid_tm(dt);
++}
++
++static int rtc7301_set_datetime(struct device *dev, struct rtc_time *dt)
++{
++ int data;
++
++ data = dt->tm_year + 1900;
++ if (data >= 2100 || data < 1900)
++ return -EINVAL;
++
++ write_reg(RTC7301_CTRLREG, 2);
++ udelay(122);
++
++ data = bin2bcd(dt->tm_sec);
++ write_reg(RTC7301_SEC, data);
++ write_reg(RTC7301_SEC10, (data >> 4));
++
++ data = bin2bcd(dt->tm_min);
++ write_reg(RTC7301_MIN, data );
++ write_reg(RTC7301_MIN10, (data >> 4));
++
++ data = bin2bcd(dt->tm_hour);
++ write_reg(RTC7301_HOUR, data);
++ write_reg(RTC7301_HOUR10, (data >> 4));
++
++ data = bin2bcd(dt->tm_mday);
++ write_reg(RTC7301_DAY, data);
++ write_reg(RTC7301_DAY10, (data>> 4));
++
++ data = bin2bcd(dt->tm_mon + 1);
++ write_reg(RTC7301_MON, data);
++ write_reg(RTC7301_MON10, (data >> 4));
++
++ data = bin2bcd(dt->tm_year % 100);
++ write_reg(RTC7301_YEAR, data);
++ write_reg(RTC7301_YEAR10, (data >> 4));
++ data = bin2bcd((1900 + dt->tm_year) / 100);
++ write_reg(RTC7301_YEAR100, data);
++
++ data = bin2bcd(dt->tm_wday);
++ write_reg(RTC7301_WEEKDAY, data);
++
++ write_reg(RTC7301_CTRLREG, 0);
++
++ return 0;
++}
++
++static const struct rtc_class_ops rtc7301_rtc_ops = {
++ .read_time = rtc7301_get_datetime,
++ .set_time = rtc7301_set_datetime,
++};
++
++static int __devinit rtc7301_probe(struct platform_device *pdev)
++{
++ struct rtc_device *rtc;
++ struct resource *res;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -ENOENT;
++
++ rtc7301_base = ioremap_nocache(res->start, 0x1000 /*res->end - res->start + 1*/);
++ if (!rtc7301_base)
++ return -EINVAL;
++
++ rtc = rtc_device_register(RTC_NAME, &pdev->dev,
++ &rtc7301_rtc_ops, THIS_MODULE);
++ if (IS_ERR(rtc)) {
++ iounmap(rtc7301_base);
++ return PTR_ERR(rtc);
++ }
++
++ platform_set_drvdata(pdev, rtc);
++
++ rtc7301_init_settings();
++ return 0;
++}
++
++static int __devexit rtc7301_remove(struct platform_device *pdev)
++{
++ struct rtc_device *rtc = platform_get_drvdata(pdev);
++
++ if (rtc)
++ rtc_device_unregister(rtc);
++ if (rtc7301_base)
++ iounmap(rtc7301_base);
++ return 0;
++}
++
++static struct platform_driver rtc7301_driver = {
++ .driver = {
++ .name = RTC_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = rtc7301_probe,
++ .remove = __devexit_p(rtc7301_remove),
++};
++
++static __init int rtc7301_init(void)
++{
++ return platform_driver_register(&rtc7301_driver);
++}
++module_init(rtc7301_init);
++
++static __exit void rtc7301_exit(void)
++{
++ platform_driver_unregister(&rtc7301_driver);
++}
++module_exit(rtc7301_exit);
++
++MODULE_DESCRIPTION("Epson 7301 RTC driver");
++MODULE_AUTHOR("Jose Vasconcellos <jvasco@verizon.net>");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:" RTC_NAME);
++MODULE_VERSION(RTC_VERSION);
diff --git a/target/linux/generic/patches-3.3/841-rtc_pt7c4338.patch b/target/linux/generic/patches-3.3/841-rtc_pt7c4338.patch
new file mode 100644
index 000000000..2c9e6035a
--- /dev/null
+++ b/target/linux/generic/patches-3.3/841-rtc_pt7c4338.patch
@@ -0,0 +1,247 @@
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -379,6 +379,15 @@ config RTC_DRV_RV3029C2
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rv3029c2.
+
++config RTC_DRV_PT7C4338
++ tristate "Pericom Technology Inc. PT7C4338 RTC"
++ help
++ If you say yes here you get support for the Pericom Technology
++ Inc. PT7C4338 RTC chip.
++
++ This driver can also be built as a module. If so, the module
++ will be called rtc-pt7c4338.
++
+ endif # I2C
+
+ comment "SPI RTC drivers"
+--- a/drivers/rtc/Makefile
++++ b/drivers/rtc/Makefile
+@@ -79,6 +79,7 @@ obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030
+ obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
+ obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
+ obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
++obj-$(CONFIG_RTC_DRV_PT7C4338) += rtc-pt7c4338.o
+ obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
+ obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
+ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
+--- /dev/null
++++ b/drivers/rtc/rtc-pt7c4338.c
+@@ -0,0 +1,216 @@
++/*
++ * Copyright 2010 Freescale Semiconductor, Inc.
++ *
++ * Author: Priyanka Jain <Priyanka.Jain@freescale.com>
++ *
++ * See file CREDITS for list of people who contributed to this
++ * project.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of
++ * the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
++ * MA 02111-1307 USA
++ */
++
++/*
++ * This file provides Date & Time support (no alarms) for PT7C4338 chip.
++ *
++ * This file is based on drivers/rtc/rtc-ds1307.c
++ *
++ * PT7C4338 chip is manufactured by Pericom Technology Inc.
++ * It is a serial real-time clock which provides
++ * 1)Low-power clock/calendar.
++ * 2)Programmable square-wave output.
++ * It has 56 bytes of nonvolatile RAM.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/rtc.h>
++#include <linux/bcd.h>
++
++/* RTC register addresses */
++#define PT7C4338_REG_SECONDS 0x00
++#define PT7C4338_REG_MINUTES 0x01
++#define PT7C4338_REG_HOURS 0x02
++#define PT7C4338_REG_AMPM 0x02
++#define PT7C4338_REG_DAY 0x03
++#define PT7C4338_REG_DATE 0x04
++#define PT7C4338_REG_MONTH 0x05
++#define PT7C4338_REG_YEAR 0x06
++#define PT7C4338_REG_CTRL_STAT 0x07
++
++/* RTC second register address bit */
++#define PT7C4338_SEC_BIT_CH 0x80 /*Clock Halt (in Register 0)*/
++
++/* RTC control and status register bits */
++#define PT7C4338_CTRL_STAT_BIT_RS0 0x1 /*Rate select 0*/
++#define PT7C4338_CTRL_STAT_BIT_RS1 0x2 /*Rate select 1*/
++#define PT7C4338_CTRL_STAT_BIT_SQWE 0x10 /*Square Wave Enable*/
++#define PT7C4338_CTRL_STAT_BIT_OSF 0x20 /*Oscillator Stop Flag*/
++#define PT7C4338_CTRL_STAT_BIT_OUT 0x80 /*Output Level Control*/
++
++static const struct i2c_device_id pt7c4338_id[] = {
++ { "pt7c4338", 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, pt7c4338_id);
++
++struct pt7c4338{
++ struct i2c_client *client;
++ struct rtc_device *rtc;
++};
++
++static int pt7c4338_read_time(struct device *dev, struct rtc_time *time)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret;
++ u8 buf[7];
++ u8 year, month, day, hour, minute, second;
++ u8 week, twelve_hr, am_pm;
++
++ ret = i2c_smbus_read_i2c_block_data(client,
++ PT7C4338_REG_SECONDS, 7, buf);
++ if (ret < 0)
++ return ret;
++ if (ret < 7)
++ return -EIO;
++
++ second = buf[0];
++ minute = buf[1];
++ hour = buf[2];
++ week = buf[3];
++ day = buf[4];
++ month = buf[5];
++ year = buf[6];
++
++ /* Extract additional information for AM/PM */
++ twelve_hr = hour & 0x40;
++ am_pm = hour & 0x20;
++
++ /* Write to rtc_time structure */
++ time->tm_sec = bcd2bin(second & 0x7f);
++ time->tm_min = bcd2bin(minute & 0x7f);
++ if (twelve_hr) {
++ /* Convert to 24 hr */
++ if (am_pm)
++ time->tm_hour = bcd2bin(hour & 0x10) + 12;
++ else
++ time->tm_hour = bcd2bin(hour & 0xBF);
++ } else {
++ time->tm_hour = bcd2bin(hour);
++ }
++
++ time->tm_wday = bcd2bin(week & 0x07) - 1;
++ time->tm_mday = bcd2bin(day & 0x3f);
++ time->tm_mon = bcd2bin(month & 0x1F) - 1;
++ /* assume 20YY not 19YY */
++ time->tm_year = bcd2bin(year) + 100;
++
++ return 0;
++}
++
++static int pt7c4338_set_time(struct device *dev, struct rtc_time *time)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ u8 buf[7];
++
++ /* Extract time from rtc_time and load into pt7c4338*/
++ buf[0] = bin2bcd(time->tm_sec);
++ buf[1] = bin2bcd(time->tm_min);
++ buf[2] = bin2bcd(time->tm_hour);
++ buf[3] = bin2bcd(time->tm_wday + 1); /* Day of the week */
++ buf[4] = bin2bcd(time->tm_mday); /* Date */
++ buf[5] = bin2bcd(time->tm_mon + 1);
++
++ /* assume 20YY not 19YY */
++ if (time->tm_year >= 100)
++ buf[6] = bin2bcd(time->tm_year - 100);
++ else
++ buf[6] = bin2bcd(time->tm_year);
++
++ return i2c_smbus_write_i2c_block_data(client,
++ PT7C4338_REG_SECONDS, 7, buf);
++}
++
++static const struct rtc_class_ops pt7c4338_rtc_ops = {
++ .read_time = pt7c4338_read_time,
++ .set_time = pt7c4338_set_time,
++};
++
++static int pt7c4338_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct pt7c4338 *pt7c4338;
++ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
++ int ret;
++
++ pt7c4338 = kzalloc(sizeof(struct pt7c4338), GFP_KERNEL);
++ if (!pt7c4338)
++ return -ENOMEM;
++
++ pt7c4338->client = client;
++ i2c_set_clientdata(client, pt7c4338);
++ pt7c4338->rtc = rtc_device_register(client->name, &client->dev,
++ &pt7c4338_rtc_ops, THIS_MODULE);
++ if (IS_ERR(pt7c4338->rtc)) {
++ ret = PTR_ERR(pt7c4338->rtc);
++ dev_err(&client->dev, "unable to register the class device\n");
++ goto out_free;
++ }
++
++ return 0;
++out_free:
++ i2c_set_clientdata(client, NULL);
++ kfree(pt7c4338);
++ return ret;
++}
++
++static int __devexit pt7c4338_remove(struct i2c_client *client)
++{
++ struct pt7c4338 *pt7c4338 = i2c_get_clientdata(client);
++
++ rtc_device_unregister(pt7c4338->rtc);
++ i2c_set_clientdata(client, NULL);
++ kfree(pt7c4338);
++ return 0;
++}
++
++static struct i2c_driver pt7c4338_driver = {
++ .driver = {
++ .name = "rtc-pt7c4338",
++ .owner = THIS_MODULE,
++ },
++ .probe = pt7c4338_probe,
++ .remove = __devexit_p(pt7c4338_remove),
++ .id_table = pt7c4338_id,
++};
++
++static int __init pt7c4338_init(void)
++{
++ return i2c_add_driver(&pt7c4338_driver);
++}
++
++static void __exit pt7c4338_exit(void)
++{
++ i2c_del_driver(&pt7c4338_driver);
++}
++
++module_init(pt7c4338_init);
++module_exit(pt7c4338_exit);
++
++MODULE_AUTHOR("Priyanka Jain <Priyanka.Jain@freescale.com>");
++MODULE_DESCRIPTION("pericom Technology Inc. PT7C4338 RTC Driver");
++MODULE_LICENSE("GPL");
diff --git a/target/linux/generic/patches-3.3/850-glamo_headers.patch b/target/linux/generic/patches-3.3/850-glamo_headers.patch
new file mode 100644
index 000000000..c75e1d62e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/850-glamo_headers.patch
@@ -0,0 +1,21 @@
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -127,6 +127,7 @@
+ #define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */
+ #define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */
+ #define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */
++#define FB_ACCEL_GLAMO 50 /* SMedia Glamo */
+ #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
+ #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
+ #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -144,6 +144,8 @@ header-y += generic_serial.h
+ header-y += genetlink.h
+ header-y += gfs2_ondisk.h
+ header-y += gigaset_dev.h
++header-y += glamofb.h
++header-y += glamo-engine.h
+ header-y += hdlc.h
+ header-y += hdlcdrv.h
+ header-y += hdreg.h
diff --git a/target/linux/generic/patches-3.3/861-04_spi_gpio_implement_spi_delay.patch b/target/linux/generic/patches-3.3/861-04_spi_gpio_implement_spi_delay.patch
new file mode 100644
index 000000000..7828869c5
--- /dev/null
+++ b/target/linux/generic/patches-3.3/861-04_spi_gpio_implement_spi_delay.patch
@@ -0,0 +1,58 @@
+Implement the SPI-GPIO delay function for busses that need speed limitation.
+
+--mb
+
+
+
+--- a/drivers/spi/spi-gpio.c
++++ b/drivers/spi/spi-gpio.c
+@@ -22,6 +22,7 @@
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+ #include <linux/gpio.h>
++#include <linux/delay.h>
+
+ #include <linux/spi/spi.h>
+ #include <linux/spi/spi_bitbang.h>
+@@ -70,6 +71,7 @@ struct spi_gpio {
+ * #define SPI_MOSI_GPIO 120
+ * #define SPI_SCK_GPIO 121
+ * #define SPI_N_CHIPSEL 4
++ * #undef NEED_SPIDELAY
+ * #include "spi-gpio.c"
+ */
+
+@@ -77,6 +79,7 @@ struct spi_gpio {
+ #define DRIVER_NAME "spi_gpio"
+
+ #define GENERIC_BITBANG /* vs tight inlines */
++#define NEED_SPIDELAY 1
+
+ /* all functions referencing these symbols must define pdata */
+ #define SPI_MISO_GPIO ((pdata)->miso)
+@@ -121,12 +124,20 @@ static inline int getmiso(const struct s
+ #undef pdata
+
+ /*
+- * NOTE: this clocks "as fast as we can". It "should" be a function of the
+- * requested device clock. Software overhead means we usually have trouble
+- * reaching even one Mbit/sec (except when we can inline bitops), so for now
+- * we'll just assume we never need additional per-bit slowdowns.
++ * NOTE: to clock "as fast as we can", set spi_device.max_speed_hz
++ * and spi_transfer.speed_hz to 0.
++ * Otherwise this is a function of the requested device clock.
++ * Software overhead means we usually have trouble
++ * reaching even one Mbit/sec (except when we can inline bitops). So on small
++ * embedded devices with fast SPI slaves you usually don't need a delay.
+ */
+-#define spidelay(nsecs) do {} while (0)
++static inline void spidelay(unsigned nsecs)
++{
++#ifdef NEED_SPIDELAY
++ if (unlikely(nsecs))
++ ndelay(nsecs);
++#endif /* NEED_SPIDELAY */
++}
+
+ #include "spi-bitbang-txrx.h"
+
diff --git a/target/linux/generic/patches-3.3/862-gpio_spi_driver.patch b/target/linux/generic/patches-3.3/862-gpio_spi_driver.patch
new file mode 100644
index 000000000..6cbea32a3
--- /dev/null
+++ b/target/linux/generic/patches-3.3/862-gpio_spi_driver.patch
@@ -0,0 +1,373 @@
+THIS CODE IS DEPRECATED.
+
+Please use the new mainline SPI-GPIO driver, as of 2.6.29.
+
+--mb
+
+
+
+---
+ drivers/spi/Kconfig | 9 +
+ drivers/spi/Makefile | 1
+ drivers/spi/spi_gpio_old.c | 251 +++++++++++++++++++++++++++++++++++++++
+ include/linux/spi/spi_gpio_old.h | 73 +++++++++++
+ 4 files changed, 334 insertions(+)
+
+--- /dev/null
++++ b/include/linux/spi/spi_gpio_old.h
+@@ -0,0 +1,73 @@
++/*
++ * spi_gpio interface to platform code
++ *
++ * Copyright (c) 2008 Piotr Skamruk
++ * Copyright (c) 2008 Michael Buesch
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef _LINUX_SPI_SPI_GPIO
++#define _LINUX_SPI_SPI_GPIO
++
++#include <linux/types.h>
++#include <linux/spi/spi.h>
++
++
++/**
++ * struct spi_gpio_platform_data - Data definitions for a SPI-GPIO device.
++ *
++ * This structure holds information about a GPIO-based SPI device.
++ *
++ * @pin_clk: The GPIO pin number of the CLOCK pin.
++ *
++ * @pin_miso: The GPIO pin number of the MISO pin.
++ *
++ * @pin_mosi: The GPIO pin number of the MOSI pin.
++ *
++ * @pin_cs: The GPIO pin number of the CHIPSELECT pin.
++ *
++ * @cs_activelow: If true, the chip is selected when the CS line is low.
++ *
++ * @no_spi_delay: If true, no delay is done in the lowlevel bitbanging.
++ * Note that doing no delay is not standards compliant,
++ * but it might be needed to speed up transfers on some
++ * slow embedded machines.
++ *
++ * @boardinfo_setup: This callback is called after the
++ * SPI master device was registered, but before the
++ * device is registered.
++ * @boardinfo_setup_data: Data argument passed to boardinfo_setup().
++ */
++struct spi_gpio_platform_data {
++ unsigned int pin_clk;
++ unsigned int pin_miso;
++ unsigned int pin_mosi;
++ unsigned int pin_cs;
++ bool cs_activelow;
++ bool no_spi_delay;
++ int (*boardinfo_setup)(struct spi_board_info *bi,
++ struct spi_master *master,
++ void *data);
++ void *boardinfo_setup_data;
++};
++
++/**
++ * SPI_GPIO_PLATDEV_NAME - The platform device name string.
++ *
++ * The name string that has to be used for platform_device_alloc
++ * when allocating a spi-gpio device.
++ */
++#define SPI_GPIO_PLATDEV_NAME "spi-gpio"
++
++/**
++ * spi_gpio_next_id - Get another platform device ID number.
++ *
++ * This returns the next platform device ID number that has to be used
++ * for platform_device_alloc. The ID is opaque and should not be used for
++ * anything else.
++ */
++int spi_gpio_next_id(void);
++
++#endif /* _LINUX_SPI_SPI_GPIO */
+--- /dev/null
++++ b/drivers/spi/spi_gpio_old.c
+@@ -0,0 +1,251 @@
++/*
++ * Bitbanging SPI bus driver using GPIO API
++ *
++ * Copyright (c) 2008 Piotr Skamruk
++ * Copyright (c) 2008 Michael Buesch
++ *
++ * based on spi_s3c2410_gpio.c
++ * Copyright (c) 2006 Ben Dooks
++ * Copyright (c) 2006 Simtec Electronics
++ * and on i2c-gpio.c
++ * Copyright (C) 2007 Atmel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/spi_bitbang.h>
++#include <linux/spi/spi_gpio_old.h>
++#include <linux/gpio.h>
++#include <asm/atomic.h>
++
++
++struct spi_gpio {
++ struct spi_bitbang bitbang;
++ struct spi_gpio_platform_data *info;
++ struct platform_device *pdev;
++ struct spi_board_info bi;
++};
++
++
++static inline struct spi_gpio *spidev_to_sg(struct spi_device *dev)
++{
++ return dev->controller_data;
++}
++
++static inline void setsck(struct spi_device *dev, int val)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++ gpio_set_value(sp->info->pin_clk, val ? 1 : 0);
++}
++
++static inline void setmosi(struct spi_device *dev, int val)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++ gpio_set_value(sp->info->pin_mosi, val ? 1 : 0);
++}
++
++static inline u32 getmiso(struct spi_device *dev)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++ return gpio_get_value(sp->info->pin_miso) ? 1 : 0;
++}
++
++static inline void do_spidelay(struct spi_device *dev, unsigned nsecs)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++
++ if (!sp->info->no_spi_delay)
++ ndelay(nsecs);
++}
++
++#define spidelay(nsecs) do { \
++ /* Steal the spi_device pointer from our caller. \
++ * The bitbang-API should probably get fixed here... */ \
++ do_spidelay(spi, nsecs); \
++ } while (0)
++
++#define EXPAND_BITBANG_TXRX
++#include "spi-bitbang-txrx.h"
++
++static u32 spi_gpio_txrx_mode0(struct spi_device *spi,
++ unsigned nsecs, u32 word, u8 bits)
++{
++ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
++}
++
++static u32 spi_gpio_txrx_mode1(struct spi_device *spi,
++ unsigned nsecs, u32 word, u8 bits)
++{
++ return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
++}
++
++static u32 spi_gpio_txrx_mode2(struct spi_device *spi,
++ unsigned nsecs, u32 word, u8 bits)
++{
++ return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
++}
++
++static u32 spi_gpio_txrx_mode3(struct spi_device *spi,
++ unsigned nsecs, u32 word, u8 bits)
++{
++ return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
++}
++
++static void spi_gpio_chipselect(struct spi_device *dev, int on)
++{
++ struct spi_gpio *sp = spidev_to_sg(dev);
++
++ if (sp->info->cs_activelow)
++ on = !on;
++ gpio_set_value(sp->info->pin_cs, on ? 1 : 0);
++}
++
++static int spi_gpio_probe(struct platform_device *pdev)
++{
++ struct spi_master *master;
++ struct spi_gpio_platform_data *pdata;
++ struct spi_gpio *sp;
++ struct spi_device *spidev;
++ int err;
++
++ pdata = pdev->dev.platform_data;
++ if (!pdata)
++ return -ENXIO;
++
++ err = -ENOMEM;
++ master = spi_alloc_master(&pdev->dev, sizeof(struct spi_gpio));
++ if (!master)
++ goto err_alloc_master;
++
++ sp = spi_master_get_devdata(master);
++ platform_set_drvdata(pdev, sp);
++ sp->info = pdata;
++
++ err = gpio_request(pdata->pin_clk, "spi_clock");
++ if (err)
++ goto err_request_clk;
++ err = gpio_request(pdata->pin_mosi, "spi_mosi");
++ if (err)
++ goto err_request_mosi;
++ err = gpio_request(pdata->pin_miso, "spi_miso");
++ if (err)
++ goto err_request_miso;
++ err = gpio_request(pdata->pin_cs, "spi_cs");
++ if (err)
++ goto err_request_cs;
++
++ sp->bitbang.master = spi_master_get(master);
++ sp->bitbang.master->bus_num = -1;
++ sp->bitbang.master->num_chipselect = 1;
++ sp->bitbang.chipselect = spi_gpio_chipselect;
++ sp->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_mode0;
++ sp->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_mode1;
++ sp->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_mode2;
++ sp->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_mode3;
++
++ gpio_direction_output(pdata->pin_clk, 0);
++ gpio_direction_output(pdata->pin_mosi, 0);
++ gpio_direction_output(pdata->pin_cs,
++ pdata->cs_activelow ? 1 : 0);
++ gpio_direction_input(pdata->pin_miso);
++
++ err = spi_bitbang_start(&sp->bitbang);
++ if (err)
++ goto err_no_bitbang;
++ err = pdata->boardinfo_setup(&sp->bi, master,
++ pdata->boardinfo_setup_data);
++ if (err)
++ goto err_bi_setup;
++ sp->bi.controller_data = sp;
++ spidev = spi_new_device(master, &sp->bi);
++ if (!spidev)
++ goto err_new_dev;
++
++ return 0;
++
++err_new_dev:
++err_bi_setup:
++ spi_bitbang_stop(&sp->bitbang);
++err_no_bitbang:
++ spi_master_put(sp->bitbang.master);
++ gpio_free(pdata->pin_cs);
++err_request_cs:
++ gpio_free(pdata->pin_miso);
++err_request_miso:
++ gpio_free(pdata->pin_mosi);
++err_request_mosi:
++ gpio_free(pdata->pin_clk);
++err_request_clk:
++ kfree(master);
++
++err_alloc_master:
++ return err;
++}
++
++static int __devexit spi_gpio_remove(struct platform_device *pdev)
++{
++ struct spi_gpio *sp;
++ struct spi_gpio_platform_data *pdata;
++
++ pdata = pdev->dev.platform_data;
++ sp = platform_get_drvdata(pdev);
++
++ gpio_free(pdata->pin_clk);
++ gpio_free(pdata->pin_mosi);
++ gpio_free(pdata->pin_miso);
++ gpio_free(pdata->pin_cs);
++ spi_bitbang_stop(&sp->bitbang);
++ spi_master_put(sp->bitbang.master);
++
++ return 0;
++}
++
++static struct platform_driver spi_gpio_driver = {
++ .driver = {
++ .name = SPI_GPIO_PLATDEV_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = spi_gpio_probe,
++ .remove = __devexit_p(spi_gpio_remove),
++};
++
++int spi_gpio_next_id(void)
++{
++ static atomic_t counter = ATOMIC_INIT(-1);
++
++ return atomic_inc_return(&counter);
++}
++EXPORT_SYMBOL(spi_gpio_next_id);
++
++static int __init spi_gpio_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&spi_gpio_driver);
++ if (err)
++ printk(KERN_ERR "spi-gpio: register failed: %d\n", err);
++
++ return err;
++}
++module_init(spi_gpio_init);
++
++static void __exit spi_gpio_exit(void)
++{
++ platform_driver_unregister(&spi_gpio_driver);
++}
++module_exit(spi_gpio_exit);
++
++MODULE_AUTHOR("Piot Skamruk <piotr.skamruk at gmail.com>");
++MODULE_AUTHOR("Michael Buesch");
++MODULE_DESCRIPTION("Platform independent GPIO bitbanging SPI driver");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -154,6 +154,15 @@ config SPI_GPIO
+ GPIO operations, you should be able to leverage that for better
+ speed with a custom version of this driver; see the source code.
+
++config SPI_GPIO_OLD
++ tristate "Old GPIO API based bitbanging SPI controller (DEPRECATED)"
++ depends on SPI_MASTER && GENERIC_GPIO
++ select SPI_BITBANG
++ help
++ This code is deprecated. Please use the new mainline SPI-GPIO driver.
++
++ If unsure, say N.
++
+ config SPI_IMX
+ tristate "Freescale i.MX SPI controllers"
+ depends on ARCH_MXC
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -29,6 +29,7 @@ obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-li
+ obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
+ obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
+ obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
++obj-$(CONFIG_SPI_GPIO_OLD) += spi_gpio_old.o
+ obj-$(CONFIG_SPI_IMX) += spi-imx.o
+ obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
diff --git a/target/linux/generic/patches-3.3/863-gpiommc.patch b/target/linux/generic/patches-3.3/863-gpiommc.patch
new file mode 100644
index 000000000..32606711f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/863-gpiommc.patch
@@ -0,0 +1,844 @@
+--- /dev/null
++++ b/drivers/mmc/host/gpiommc.c
+@@ -0,0 +1,609 @@
++/*
++ * Driver an MMC/SD card on a bitbanging GPIO SPI bus.
++ * This module hooks up the mmc_spi and spi_gpio modules and also
++ * provides a configfs interface.
++ *
++ * Copyright 2008 Michael Buesch <mb@bu3sch.de>
++ *
++ * Licensed under the GNU/GPL. See COPYING for details.
++ */
++
++#include <linux/module.h>
++#include <linux/mmc/gpiommc.h>
++#include <linux/platform_device.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/spi/spi_gpio_old.h>
++#include <linux/configfs.h>
++#include <linux/gpio.h>
++#include <asm/atomic.h>
++
++
++#define PFX "gpio-mmc: "
++
++
++struct gpiommc_device {
++ struct platform_device *pdev;
++ struct platform_device *spi_pdev;
++ struct spi_board_info boardinfo;
++};
++
++
++MODULE_DESCRIPTION("GPIO based MMC driver");
++MODULE_AUTHOR("Michael Buesch");
++MODULE_LICENSE("GPL");
++
++
++static int gpiommc_boardinfo_setup(struct spi_board_info *bi,
++ struct spi_master *master,
++ void *data)
++{
++ struct gpiommc_device *d = data;
++ struct gpiommc_platform_data *pdata = d->pdev->dev.platform_data;
++
++ /* Bind the SPI master to the MMC-SPI host driver. */
++ strlcpy(bi->modalias, "mmc_spi", sizeof(bi->modalias));
++
++ bi->max_speed_hz = pdata->max_bus_speed;
++ bi->bus_num = master->bus_num;
++ bi->mode = pdata->mode;
++
++ return 0;
++}
++
++static int gpiommc_probe(struct platform_device *pdev)
++{
++ struct gpiommc_platform_data *mmc_pdata = pdev->dev.platform_data;
++ struct spi_gpio_platform_data spi_pdata;
++ struct gpiommc_device *d;
++ int err;
++
++ err = -ENXIO;
++ if (!mmc_pdata)
++ goto error;
++
++#ifdef CONFIG_MMC_SPI_MODULE
++ err = request_module("mmc_spi");
++ if (err) {
++ printk(KERN_WARNING PFX
++ "Failed to request mmc_spi module.\n");
++ }
++#endif /* CONFIG_MMC_SPI_MODULE */
++
++ /* Allocate the GPIO-MMC device */
++ err = -ENOMEM;
++ d = kzalloc(sizeof(*d), GFP_KERNEL);
++ if (!d)
++ goto error;
++ d->pdev = pdev;
++
++ /* Create the SPI-GPIO device */
++ d->spi_pdev = platform_device_alloc(SPI_GPIO_PLATDEV_NAME,
++ spi_gpio_next_id());
++ if (!d->spi_pdev)
++ goto err_free_d;
++
++ memset(&spi_pdata, 0, sizeof(spi_pdata));
++ spi_pdata.pin_clk = mmc_pdata->pins.gpio_clk;
++ spi_pdata.pin_miso = mmc_pdata->pins.gpio_do;
++ spi_pdata.pin_mosi = mmc_pdata->pins.gpio_di;
++ spi_pdata.pin_cs = mmc_pdata->pins.gpio_cs;
++ spi_pdata.cs_activelow = mmc_pdata->pins.cs_activelow;
++ spi_pdata.no_spi_delay = mmc_pdata->no_spi_delay;
++ spi_pdata.boardinfo_setup = gpiommc_boardinfo_setup;
++ spi_pdata.boardinfo_setup_data = d;
++
++ err = platform_device_add_data(d->spi_pdev, &spi_pdata,
++ sizeof(spi_pdata));
++ if (err)
++ goto err_free_pdev;
++ err = platform_device_add(d->spi_pdev);
++ if (err)
++ goto err_free_pdata;
++ platform_set_drvdata(pdev, d);
++
++ printk(KERN_INFO PFX "MMC-Card \"%s\" "
++ "attached to GPIO pins di=%u, do=%u, clk=%u, cs=%u\n",
++ mmc_pdata->name, mmc_pdata->pins.gpio_di,
++ mmc_pdata->pins.gpio_do,
++ mmc_pdata->pins.gpio_clk,
++ mmc_pdata->pins.gpio_cs);
++
++ return 0;
++
++err_free_pdata:
++ kfree(d->spi_pdev->dev.platform_data);
++ d->spi_pdev->dev.platform_data = NULL;
++err_free_pdev:
++ platform_device_put(d->spi_pdev);
++err_free_d:
++ kfree(d);
++error:
++ return err;
++}
++
++static int gpiommc_remove(struct platform_device *pdev)
++{
++ struct gpiommc_device *d = platform_get_drvdata(pdev);
++ struct gpiommc_platform_data *pdata = d->pdev->dev.platform_data;
++
++ platform_device_unregister(d->spi_pdev);
++ printk(KERN_INFO PFX "GPIO based MMC-Card \"%s\" removed\n",
++ pdata->name);
++ platform_device_put(d->spi_pdev);
++
++ return 0;
++}
++
++#ifdef CONFIG_GPIOMMC_CONFIGFS
++
++/* A device that was created through configfs */
++struct gpiommc_configfs_device {
++ struct config_item item;
++ /* The platform device, after registration. */
++ struct platform_device *pdev;
++ /* The configuration */
++ struct gpiommc_platform_data pdata;
++};
++
++#define GPIO_INVALID -1
++
++static inline bool gpiommc_is_registered(struct gpiommc_configfs_device *dev)
++{
++ return (dev->pdev != NULL);
++}
++
++static inline struct gpiommc_configfs_device *ci_to_gpiommc(struct config_item *item)
++{
++ return item ? container_of(item, struct gpiommc_configfs_device, item) : NULL;
++}
++
++static struct configfs_attribute gpiommc_attr_DI = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_data_in",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_DO = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_data_out",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_CLK = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_clock",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_CS = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_chipselect",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_CS_activelow = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "gpio_chipselect_activelow",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_spimode = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "spi_mode",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_spidelay = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "spi_delay",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_max_bus_speed = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "max_bus_speed",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute gpiommc_attr_register = {
++ .ca_owner = THIS_MODULE,
++ .ca_name = "register",
++ .ca_mode = S_IRUGO | S_IWUSR,
++};
++
++static struct configfs_attribute *gpiommc_config_attrs[] = {
++ &gpiommc_attr_DI,
++ &gpiommc_attr_DO,
++ &gpiommc_attr_CLK,
++ &gpiommc_attr_CS,
++ &gpiommc_attr_CS_activelow,
++ &gpiommc_attr_spimode,
++ &gpiommc_attr_spidelay,
++ &gpiommc_attr_max_bus_speed,
++ &gpiommc_attr_register,
++ NULL,
++};
++
++static ssize_t gpiommc_config_attr_show(struct config_item *item,
++ struct configfs_attribute *attr,
++ char *page)
++{
++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item);
++ ssize_t count = 0;
++ unsigned int gpio;
++ int err = 0;
++
++ if (attr == &gpiommc_attr_DI) {
++ gpio = dev->pdata.pins.gpio_di;
++ if (gpio == GPIO_INVALID)
++ count = snprintf(page, PAGE_SIZE, "not configured\n");
++ else
++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_DO) {
++ gpio = dev->pdata.pins.gpio_do;
++ if (gpio == GPIO_INVALID)
++ count = snprintf(page, PAGE_SIZE, "not configured\n");
++ else
++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CLK) {
++ gpio = dev->pdata.pins.gpio_clk;
++ if (gpio == GPIO_INVALID)
++ count = snprintf(page, PAGE_SIZE, "not configured\n");
++ else
++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CS) {
++ gpio = dev->pdata.pins.gpio_cs;
++ if (gpio == GPIO_INVALID)
++ count = snprintf(page, PAGE_SIZE, "not configured\n");
++ else
++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CS_activelow) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ dev->pdata.pins.cs_activelow);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_spimode) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ dev->pdata.mode);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_spidelay) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ !dev->pdata.no_spi_delay);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_max_bus_speed) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ dev->pdata.max_bus_speed);
++ goto out;
++ }
++ if (attr == &gpiommc_attr_register) {
++ count = snprintf(page, PAGE_SIZE, "%u\n",
++ gpiommc_is_registered(dev));
++ goto out;
++ }
++ WARN_ON(1);
++ err = -ENOSYS;
++out:
++ return err ? err : count;
++}
++
++static int gpiommc_do_register(struct gpiommc_configfs_device *dev,
++ const char *name)
++{
++ int err;
++
++ if (gpiommc_is_registered(dev))
++ return 0;
++
++ if (!gpio_is_valid(dev->pdata.pins.gpio_di) ||
++ !gpio_is_valid(dev->pdata.pins.gpio_do) ||
++ !gpio_is_valid(dev->pdata.pins.gpio_clk) ||
++ !gpio_is_valid(dev->pdata.pins.gpio_cs)) {
++ printk(KERN_ERR PFX
++ "configfs: Invalid GPIO pin number(s)\n");
++ return -EINVAL;
++ }
++
++ strlcpy(dev->pdata.name, name,
++ sizeof(dev->pdata.name));
++
++ dev->pdev = platform_device_alloc(GPIOMMC_PLATDEV_NAME,
++ gpiommc_next_id());
++ if (!dev->pdev)
++ return -ENOMEM;
++ err = platform_device_add_data(dev->pdev, &dev->pdata,
++ sizeof(dev->pdata));
++ if (err) {
++ platform_device_put(dev->pdev);
++ return err;
++ }
++ err = platform_device_add(dev->pdev);
++ if (err) {
++ platform_device_put(dev->pdev);
++ return err;
++ }
++
++ return 0;
++}
++
++static void gpiommc_do_unregister(struct gpiommc_configfs_device *dev)
++{
++ if (!gpiommc_is_registered(dev))
++ return;
++
++ platform_device_unregister(dev->pdev);
++ dev->pdev = NULL;
++}
++
++static ssize_t gpiommc_config_attr_store(struct config_item *item,
++ struct configfs_attribute *attr,
++ const char *page, size_t count)
++{
++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item);
++ int err = -EINVAL;
++ unsigned long data;
++
++ if (attr == &gpiommc_attr_register) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (data == 1)
++ err = gpiommc_do_register(dev, item->ci_name);
++ if (data == 0) {
++ gpiommc_do_unregister(dev);
++ err = 0;
++ }
++ goto out;
++ }
++
++ if (gpiommc_is_registered(dev)) {
++ /* The rest of the config parameters can only be set
++ * as long as the device is not registered, yet. */
++ err = -EBUSY;
++ goto out;
++ }
++
++ if (attr == &gpiommc_attr_DI) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (!gpio_is_valid(data))
++ goto out;
++ dev->pdata.pins.gpio_di = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_DO) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (!gpio_is_valid(data))
++ goto out;
++ dev->pdata.pins.gpio_do = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CLK) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (!gpio_is_valid(data))
++ goto out;
++ dev->pdata.pins.gpio_clk = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CS) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (!gpio_is_valid(data))
++ goto out;
++ dev->pdata.pins.gpio_cs = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_CS_activelow) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (data != 0 && data != 1)
++ goto out;
++ dev->pdata.pins.cs_activelow = data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_spimode) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ switch (data) {
++ case 0:
++ dev->pdata.mode = SPI_MODE_0;
++ break;
++ case 1:
++ dev->pdata.mode = SPI_MODE_1;
++ break;
++ case 2:
++ dev->pdata.mode = SPI_MODE_2;
++ break;
++ case 3:
++ dev->pdata.mode = SPI_MODE_3;
++ break;
++ default:
++ goto out;
++ }
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_spidelay) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (data != 0 && data != 1)
++ goto out;
++ dev->pdata.no_spi_delay = !data;
++ err = 0;
++ goto out;
++ }
++ if (attr == &gpiommc_attr_max_bus_speed) {
++ err = strict_strtoul(page, 10, &data);
++ if (err)
++ goto out;
++ err = -EINVAL;
++ if (data > UINT_MAX)
++ goto out;
++ dev->pdata.max_bus_speed = data;
++ err = 0;
++ goto out;
++ }
++ WARN_ON(1);
++ err = -ENOSYS;
++out:
++ return err ? err : count;
++}
++
++static void gpiommc_config_item_release(struct config_item *item)
++{
++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item);
++
++ kfree(dev);
++}
++
++static struct configfs_item_operations gpiommc_config_item_ops = {
++ .release = gpiommc_config_item_release,
++ .show_attribute = gpiommc_config_attr_show,
++ .store_attribute = gpiommc_config_attr_store,
++};
++
++static struct config_item_type gpiommc_dev_ci_type = {
++ .ct_item_ops = &gpiommc_config_item_ops,
++ .ct_attrs = gpiommc_config_attrs,
++ .ct_owner = THIS_MODULE,
++};
++
++static struct config_item *gpiommc_make_item(struct config_group *group,
++ const char *name)
++{
++ struct gpiommc_configfs_device *dev;
++
++ if (strlen(name) > GPIOMMC_MAX_NAMELEN) {
++ printk(KERN_ERR PFX "configfs: device name too long\n");
++ return NULL;
++ }
++
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (!dev)
++ return NULL;
++
++ config_item_init_type_name(&dev->item, name,
++ &gpiommc_dev_ci_type);
++
++ /* Assign default configuration */
++ dev->pdata.pins.gpio_di = GPIO_INVALID;
++ dev->pdata.pins.gpio_do = GPIO_INVALID;
++ dev->pdata.pins.gpio_clk = GPIO_INVALID;
++ dev->pdata.pins.gpio_cs = GPIO_INVALID;
++ dev->pdata.pins.cs_activelow = 1;
++ dev->pdata.mode = SPI_MODE_0;
++ dev->pdata.no_spi_delay = 0;
++ dev->pdata.max_bus_speed = 5000000; /* 5 MHz */
++
++ return &(dev->item);
++}
++
++static void gpiommc_drop_item(struct config_group *group,
++ struct config_item *item)
++{
++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item);
++
++ gpiommc_do_unregister(dev);
++ kfree(dev);
++}
++
++static struct configfs_group_operations gpiommc_ct_group_ops = {
++ .make_item = gpiommc_make_item,
++ .drop_item = gpiommc_drop_item,
++};
++
++static struct config_item_type gpiommc_ci_type = {
++ .ct_group_ops = &gpiommc_ct_group_ops,
++ .ct_owner = THIS_MODULE,
++};
++
++static struct configfs_subsystem gpiommc_subsys = {
++ .su_group = {
++ .cg_item = {
++ .ci_namebuf = GPIOMMC_PLATDEV_NAME,
++ .ci_type = &gpiommc_ci_type,
++ },
++ },
++ .su_mutex = __MUTEX_INITIALIZER(gpiommc_subsys.su_mutex),
++};
++
++#endif /* CONFIG_GPIOMMC_CONFIGFS */
++
++static struct platform_driver gpiommc_plat_driver = {
++ .probe = gpiommc_probe,
++ .remove = gpiommc_remove,
++ .driver = {
++ .name = GPIOMMC_PLATDEV_NAME,
++ .owner = THIS_MODULE,
++ },
++};
++
++int gpiommc_next_id(void)
++{
++ static atomic_t counter = ATOMIC_INIT(-1);
++
++ return atomic_inc_return(&counter);
++}
++EXPORT_SYMBOL(gpiommc_next_id);
++
++static int __init gpiommc_modinit(void)
++{
++ int err;
++
++ err = platform_driver_register(&gpiommc_plat_driver);
++ if (err)
++ return err;
++
++#ifdef CONFIG_GPIOMMC_CONFIGFS
++ config_group_init(&gpiommc_subsys.su_group);
++ err = configfs_register_subsystem(&gpiommc_subsys);
++ if (err) {
++ platform_driver_unregister(&gpiommc_plat_driver);
++ return err;
++ }
++#endif /* CONFIG_GPIOMMC_CONFIGFS */
++
++ return 0;
++}
++module_init(gpiommc_modinit);
++
++static void __exit gpiommc_modexit(void)
++{
++#ifdef CONFIG_GPIOMMC_CONFIGFS
++ configfs_unregister_subsystem(&gpiommc_subsys);
++#endif
++ platform_driver_unregister(&gpiommc_plat_driver);
++}
++module_exit(gpiommc_modexit);
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -474,6 +474,31 @@ config MMC_SDHI
+ This provides support for the SDHI SD/SDIO controller found in
+ SuperH and ARM SH-Mobile SoCs
+
++config GPIOMMC
++ tristate "MMC/SD over GPIO-based SPI"
++ depends on MMC && MMC_SPI && SPI_GPIO_OLD
++ help
++ This driver hooks up the mmc_spi and spi_gpio modules so that
++ MMC/SD cards can be used on a GPIO based bus by bitbanging
++ the SPI protocol in software.
++
++ This driver provides a configfs interface to dynamically create
++ and destroy GPIO-based MMC/SD card devices. It also provides
++ a platform device interface API.
++ See Documentation/gpiommc.txt for details.
++
++ The module will be called gpiommc.
++
++ If unsure, say N.
++
++config GPIOMMC_CONFIGFS
++ bool
++ depends on GPIOMMC && CONFIGFS_FS
++ default y
++ help
++ This option automatically enables configfs support for gpiommc
++ if configfs is available.
++
+ config MMC_CB710
+ tristate "ENE CB710 MMC/SD Interface support"
+ depends on PCI
+--- a/drivers/mmc/host/Makefile
++++ b/drivers/mmc/host/Makefile
+@@ -37,6 +37,7 @@ tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_S
+ obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
+ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
+ obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
++obj-$(CONFIG_GPIOMMC) += gpiommc.o
+ obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+ obj-$(CONFIG_MMC_DW) += dw_mmc.o
+ obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
+--- /dev/null
++++ b/include/linux/mmc/gpiommc.h
+@@ -0,0 +1,71 @@
++/*
++ * Device driver for MMC/SD cards driven over a GPIO bus.
++ *
++ * Copyright (c) 2008 Michael Buesch
++ *
++ * Licensed under the GNU/GPL version 2.
++ */
++#ifndef LINUX_GPIOMMC_H_
++#define LINUX_GPIOMMC_H_
++
++#include <linux/types.h>
++
++
++#define GPIOMMC_MAX_NAMELEN 15
++#define GPIOMMC_MAX_NAMELEN_STR __stringify(GPIOMMC_MAX_NAMELEN)
++
++/**
++ * struct gpiommc_pins - Hardware pin assignments
++ *
++ * @gpio_di: The GPIO number of the DATA IN pin
++ * @gpio_do: The GPIO number of the DATA OUT pin
++ * @gpio_clk: The GPIO number of the CLOCK pin
++ * @gpio_cs: The GPIO number of the CHIPSELECT pin
++ * @cs_activelow: If true, the chip is considered selected if @gpio_cs is low.
++ */
++struct gpiommc_pins {
++ unsigned int gpio_di;
++ unsigned int gpio_do;
++ unsigned int gpio_clk;
++ unsigned int gpio_cs;
++ bool cs_activelow;
++};
++
++/**
++ * struct gpiommc_platform_data - Platform data for a MMC-over-SPI-GPIO device.
++ *
++ * @name: The unique name string of the device.
++ * @pins: The hardware pin assignments.
++ * @mode: The hardware mode. This is either SPI_MODE_0,
++ * SPI_MODE_1, SPI_MODE_2 or SPI_MODE_3. See the SPI documentation.
++ * @no_spi_delay: Do not use delays in the lowlevel SPI bitbanging code.
++ * This is not standards compliant, but may be required for some
++ * embedded machines to gain reasonable speed.
++ * @max_bus_speed: The maximum speed of the SPI bus, in Hertz.
++ */
++struct gpiommc_platform_data {
++ char name[GPIOMMC_MAX_NAMELEN + 1];
++ struct gpiommc_pins pins;
++ u8 mode;
++ bool no_spi_delay;
++ unsigned int max_bus_speed;
++};
++
++/**
++ * GPIOMMC_PLATDEV_NAME - The platform device name string.
++ *
++ * The name string that has to be used for platform_device_alloc
++ * when allocating a gpiommc device.
++ */
++#define GPIOMMC_PLATDEV_NAME "gpiommc"
++
++/**
++ * gpiommc_next_id - Get another platform device ID number.
++ *
++ * This returns the next platform device ID number that has to be used
++ * for platform_device_alloc. The ID is opaque and should not be used for
++ * anything else.
++ */
++int gpiommc_next_id(void);
++
++#endif /* LINUX_GPIOMMC_H_ */
+--- /dev/null
++++ b/Documentation/gpiommc.txt
+@@ -0,0 +1,97 @@
++GPIOMMC - Driver for an MMC/SD card on a bitbanging GPIO SPI bus
++================================================================
++
++The gpiommc module hooks up the mmc_spi and spi_gpio modules for running an
++MMC or SD card on GPIO pins.
++
++Two interfaces for registering a new MMC/SD card device are provided:
++A static platform-device based mechanism and a dynamic configfs based interface.
++
++
++Registering devices via platform-device
++=======================================
++
++The platform-device interface is used for registering MMC/SD devices that are
++part of the hardware platform. This is most useful only for embedded machines
++with MMC/SD devices statically connected to the platform GPIO bus.
++
++The data structures are declared in <linux/mmc/gpiommc.h>.
++
++To register a new device, define an instance of struct gpiommc_platform_data.
++This structure holds any information about how the device is hooked up to the
++GPIO pins and what hardware modes the device supports. See the docbook-style
++documentation in the header file for more information on the struct fields.
++
++Then allocate a new instance of a platform device by doing:
++
++ pdev = platform_device_alloc(GPIOMMC_PLATDEV_NAME, gpiommc_next_id());
++
++This will allocate the platform device data structures and hook it up to the
++gpiommc driver.
++Then add the gpiommc_platform_data to the platform device.
++
++ err = platform_device_add_data(pdev, pdata, sizeof(struct gpiommc_platform_data));
++
++You may free the local instance of struct gpiommc_platform_data now. (So the
++struct may be allocated on the stack, too).
++Now simply register the platform device.
++
++ err = platform_device_add(pdev);
++
++Done. The gpiommc probe routine will be invoked now and you should see a kernel
++log message for the added device.
++
++
++Registering devices via configfs
++================================
++
++MMC/SD cards connected via GPIO often are a pretty dynamic thing, as for example
++selfmade hacks for soldering an MMC/SD card to standard GPIO pins on embedded
++hardware are a common situation.
++So we provide a dynamic interface to conveniently handle adding and removing
++devices from userspace, without the need to recompile the kernel.
++
++The "gpiommc" subdirectory at the configfs mountpoint is used for handling
++the dynamic configuration.
++
++To create a new device, it must first be allocated with mkdir.
++The following command will allocate a device named "my_mmc":
++ mkdir /config/gpiommc/my_mmc
++
++There are several configuration files available in the new
++/config/gpiommc/my_mmc/ directory:
++
++gpio_data_in = The SPI data-IN GPIO pin number.
++gpio_data_out = The SPI data-OUT GPIO pin number.
++gpio_clock = The SPI Clock GPIO pin number.
++gpio_chipselect = The SPI Chipselect GPIO pin number.
++gpio_chipselect_activelow = Boolean. If 0, Chipselect is active-HIGH.
++ If 1, Chipselect is active-LOW.
++spi_mode = The SPI data mode. Can be 0-3.
++spi_delay = Enable all delays in the lowlevel bitbanging.
++max_bus_speed = The maximum SPI bus speed. In Hertz.
++
++register = Not a configuration parameter.
++ Used to register the configured card
++ with the kernel.
++
++The device must first get configured and then registered by writing "1" to
++the "register" file.
++The configuration parameters "gpio_data_in", "gpio_data_out", "gpio_clock"
++and "gpio_chipselect" are essential and _must_ be configured before writing
++"1" to the "register" file. The registration will fail, otherwise.
++
++The default values for the other parameters are:
++gpio_chipselect_activelow = 1 (CS active-LOW)
++spi_mode = 0 (SPI_MODE_0)
++spi_delay = 1 (enabled)
++max_bus_speed = 5000000 (5 Mhz)
++
++Configuration values can not be changed after registration. To unregister
++the device, write a "0" to the "register" file. The configuration can be
++changed again after unregistering.
++
++To completely remove the device, simply rmdir the directory
++(/config/gpiommc/my_mmc in this example).
++There's no need to first unregister the device before removing it. That will
++be done automatically.
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3045,6 +3045,11 @@ L: linuxppc-dev@lists.ozlabs.org
+ S: Odd Fixes
+ F: drivers/tty/hvc/
+
++GPIOMMC DRIVER
++P: Michael Buesch
++M: mb@bu3sch.de
++S: Maintained
++
+ HARDWARE MONITORING
+ M: Jean Delvare <khali@linux-fr.org>
+ M: Guenter Roeck <guenter.roeck@ericsson.com>
diff --git a/target/linux/generic/patches-3.3/864-gpiommc_configfs_locking.patch b/target/linux/generic/patches-3.3/864-gpiommc_configfs_locking.patch
new file mode 100644
index 000000000..d4201eb50
--- /dev/null
+++ b/target/linux/generic/patches-3.3/864-gpiommc_configfs_locking.patch
@@ -0,0 +1,58 @@
+The gpiommc configfs context structure needs locking, as configfs
+does not lock access between files.
+
+--- a/drivers/mmc/host/gpiommc.c
++++ b/drivers/mmc/host/gpiommc.c
+@@ -144,6 +144,8 @@ struct gpiommc_configfs_device {
+ struct platform_device *pdev;
+ /* The configuration */
+ struct gpiommc_platform_data pdata;
++ /* Mutex to protect this structure */
++ struct mutex mutex;
+ };
+
+ #define GPIO_INVALID -1
+@@ -234,6 +236,8 @@ static ssize_t gpiommc_config_attr_show(
+ unsigned int gpio;
+ int err = 0;
+
++ mutex_lock(&dev->mutex);
++
+ if (attr == &gpiommc_attr_DI) {
+ gpio = dev->pdata.pins.gpio_di;
+ if (gpio == GPIO_INVALID)
+@@ -294,6 +298,8 @@ static ssize_t gpiommc_config_attr_show(
+ WARN_ON(1);
+ err = -ENOSYS;
+ out:
++ mutex_unlock(&dev->mutex);
++
+ return err ? err : count;
+ }
+
+@@ -353,6 +359,8 @@ static ssize_t gpiommc_config_attr_store
+ int err = -EINVAL;
+ unsigned long data;
+
++ mutex_lock(&dev->mutex);
++
+ if (attr == &gpiommc_attr_register) {
+ err = strict_strtoul(page, 10, &data);
+ if (err)
+@@ -478,6 +486,8 @@ static ssize_t gpiommc_config_attr_store
+ WARN_ON(1);
+ err = -ENOSYS;
+ out:
++ mutex_unlock(&dev->mutex);
++
+ return err ? err : count;
+ }
+
+@@ -514,6 +524,7 @@ static struct config_item *gpiommc_make_
+ if (!dev)
+ return NULL;
+
++ mutex_init(&dev->mutex);
+ config_item_init_type_name(&dev->item, name,
+ &gpiommc_dev_ci_type);
+
diff --git a/target/linux/generic/patches-3.3/865-gpiopwm.patch b/target/linux/generic/patches-3.3/865-gpiopwm.patch
new file mode 100644
index 000000000..015f14ab3
--- /dev/null
+++ b/target/linux/generic/patches-3.3/865-gpiopwm.patch
@@ -0,0 +1,21 @@
+--- a/drivers/Kconfig
++++ b/drivers/Kconfig
+@@ -60,6 +60,8 @@ source "drivers/pinctrl/Kconfig"
+
+ source "drivers/gpio/Kconfig"
+
++source "drivers/pwm/Kconfig"
++
+ source "drivers/w1/Kconfig"
+
+ source "drivers/power/Kconfig"
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -8,6 +8,7 @@
+ # GPIO must come after pinctrl as gpios may need to mux pins etc
+ obj-y += pinctrl/
+ obj-y += gpio/
++obj-$(CONFIG_GENERIC_PWM) += pwm/
+ obj-$(CONFIG_PCI) += pci/
+ obj-$(CONFIG_PARISC) += parisc/
+ obj-$(CONFIG_RAPIDIO) += rapidio/
diff --git a/target/linux/generic/patches-3.3/870-hifn795x_byteswap.patch b/target/linux/generic/patches-3.3/870-hifn795x_byteswap.patch
new file mode 100644
index 000000000..3a37c951e
--- /dev/null
+++ b/target/linux/generic/patches-3.3/870-hifn795x_byteswap.patch
@@ -0,0 +1,17 @@
+--- a/drivers/crypto/hifn_795x.c
++++ b/drivers/crypto/hifn_795x.c
+@@ -682,12 +682,12 @@ static inline u32 hifn_read_1(struct hif
+
+ static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
+ {
+- writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
++ writel(val, dev->bar[0] + reg);
+ }
+
+ static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
+ {
+- writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
++ writel(val, dev->bar[1] + reg);
+ }
+
+ static void hifn_wait_puc(struct hifn_device *dev)
diff --git a/target/linux/generic/patches-3.3/900-slab_maxsize.patch b/target/linux/generic/patches-3.3/900-slab_maxsize.patch
new file mode 100644
index 000000000..1c9569450
--- /dev/null
+++ b/target/linux/generic/patches-3.3/900-slab_maxsize.patch
@@ -0,0 +1,13 @@
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -127,8 +127,8 @@ unsigned int kmem_cache_size(struct kmem
+ * to do various tricks to work around compiler limitations in order to
+ * ensure proper constant folding.
+ */
+-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
+- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
++#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 17 ? \
++ (MAX_ORDER + PAGE_SHIFT - 1) : 17)
+
+ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
+ #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
diff --git a/target/linux/generic/patches-3.3/910-kobject_uevent.patch b/target/linux/generic/patches-3.3/910-kobject_uevent.patch
new file mode 100644
index 000000000..aa9a40f09
--- /dev/null
+++ b/target/linux/generic/patches-3.3/910-kobject_uevent.patch
@@ -0,0 +1,21 @@
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -50,6 +50,18 @@ static const char *kobject_actions[] = {
+ [KOBJ_OFFLINE] = "offline",
+ };
+
++u64 uevent_next_seqnum(void)
++{
++ u64 seq;
++
++ mutex_lock(&uevent_sock_mutex);
++ seq = ++uevent_seqnum;
++ mutex_unlock(&uevent_sock_mutex);
++
++ return seq;
++}
++EXPORT_SYMBOL_GPL(uevent_next_seqnum);
++
+ /**
+ * kobject_action_type - translate action string to numeric type
+ *
diff --git a/target/linux/generic/patches-3.3/911-kobject_add_broadcast_uevent.patch b/target/linux/generic/patches-3.3/911-kobject_add_broadcast_uevent.patch
new file mode 100644
index 000000000..104df13bf
--- /dev/null
+++ b/target/linux/generic/patches-3.3/911-kobject_add_broadcast_uevent.patch
@@ -0,0 +1,85 @@
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -31,6 +31,8 @@
+ #define UEVENT_NUM_ENVP 32 /* number of env pointers */
+ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
+
++struct sk_buff;
++
+ /* path to the userspace helper executed on an event */
+ extern char uevent_helper[];
+
+@@ -213,6 +215,10 @@ int add_uevent_var(struct kobj_uevent_en
+
+ int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type);
++
++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group,
++ gfp_t allocation);
++
+ #else
+ static inline int kobject_uevent(struct kobject *kobj,
+ enum kobject_action action)
+@@ -229,6 +235,16 @@ int add_uevent_var(struct kobj_uevent_en
+ static inline int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type)
+ { return -EINVAL; }
++
++void kfree_skb(struct sk_buff *);
++
++static inline int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group,
++ gfp_t allocation)
++{
++ kfree_skb(skb);
++ return 0;
++}
++
+ #endif
+
+ #endif /* _KOBJECT_H_ */
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -381,6 +381,43 @@ int add_uevent_var(struct kobj_uevent_en
+ EXPORT_SYMBOL_GPL(add_uevent_var);
+
+ #if defined(CONFIG_NET)
++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group,
++ gfp_t allocation)
++{
++ struct uevent_sock *ue_sk;
++ int err = 0;
++
++ /* send netlink message */
++ mutex_lock(&uevent_sock_mutex);
++ list_for_each_entry(ue_sk, &uevent_sock_list, list) {
++ struct sock *uevent_sock = ue_sk->sk;
++ struct sk_buff *skb2;
++
++ skb2 = skb_clone(skb, allocation);
++ if (!skb2)
++ break;
++
++ err = netlink_broadcast(uevent_sock, skb2, pid, group,
++ allocation);
++ if (err)
++ break;
++ }
++ mutex_unlock(&uevent_sock_mutex);
++
++ kfree_skb(skb);
++ return err;
++}
++#else
++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group,
++ gfp_t allocation)
++{
++ kfree_skb(skb);
++ return 0;
++}
++#endif
++EXPORT_SYMBOL_GPL(broadcast_uevent);
++
++#if defined(CONFIG_NET)
+ static int uevent_net_init(struct net *net)
+ {
+ struct uevent_sock *ue_sk;
diff --git a/target/linux/generic/patches-3.3/920-unable_to_open_console.patch b/target/linux/generic/patches-3.3/920-unable_to_open_console.patch
new file mode 100644
index 000000000..11f67dc2c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/920-unable_to_open_console.patch
@@ -0,0 +1,11 @@
+--- a/init/main.c
++++ b/init/main.c
+@@ -816,7 +816,7 @@ static int __init kernel_init(void * unu
+
+ /* Open the /dev/console on the rootfs, this should never fail */
+ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+- printk(KERN_WARNING "Warning: unable to open an initial console.\n");
++ printk(KERN_WARNING "Please be patient, while OpenWrt loads ...\n");
+
+ (void) sys_dup(0);
+ (void) sys_dup(0);
diff --git a/target/linux/generic/patches-3.3/921-use_preinit_as_init.patch b/target/linux/generic/patches-3.3/921-use_preinit_as_init.patch
new file mode 100644
index 000000000..a71251dcd
--- /dev/null
+++ b/target/linux/generic/patches-3.3/921-use_preinit_as_init.patch
@@ -0,0 +1,14 @@
+--- a/init/main.c
++++ b/init/main.c
+@@ -774,10 +774,7 @@ static noinline int init_post(void)
+ printk(KERN_WARNING "Failed to execute %s. Attempting "
+ "defaults...\n", execute_command);
+ }
+- run_init_process("/sbin/init");
+- run_init_process("/etc/init");
+- run_init_process("/bin/init");
+- run_init_process("/bin/sh");
++ run_init_process("/etc/preinit");
+
+ panic("No init found. Try passing init= option to kernel. "
+ "See Linux Documentation/init.txt for guidance.");
diff --git a/target/linux/generic/patches-3.3/930-crashlog.patch b/target/linux/generic/patches-3.3/930-crashlog.patch
new file mode 100644
index 000000000..bce0ea40c
--- /dev/null
+++ b/target/linux/generic/patches-3.3/930-crashlog.patch
@@ -0,0 +1,285 @@
+--- /dev/null
++++ b/include/linux/crashlog.h
+@@ -0,0 +1,17 @@
++#ifndef __CRASHLOG_H
++#define __CRASHLOG_H
++
++#ifdef CONFIG_CRASHLOG
++void crashlog_init_bootmem(struct bootmem_data *bdata);
++void crashlog_init_memblock(phys_addr_t addr, phys_addr_t size);
++#else
++static inline void crashlog_init_bootmem(struct bootmem_data *bdata)
++{
++}
++
++static inline void crashlog_init_memblock(phys_addr_t addr, phys_addr_t size)
++{
++}
++#endif
++
++#endif
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -926,6 +926,10 @@ config RELAY
+
+ If unsure, say N.
+
++config CRASHLOG
++ bool "Crash logging"
++ depends on (!NO_BOOTMEM || HAVE_MEMBLOCK) && !ARM
++
+ config BLK_DEV_INITRD
+ bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
+ depends on BROKEN || !FRV
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -107,6 +107,7 @@ obj-$(CONFIG_USER_RETURN_NOTIFIER) += us
+ obj-$(CONFIG_PADATA) += padata.o
+ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
++obj-$(CONFIG_CRASHLOG) += crashlog.o
+
+ $(obj)/configs.o: $(obj)/config_data.h
+
+--- /dev/null
++++ b/kernel/crashlog.c
+@@ -0,0 +1,190 @@
++/*
++ * Crash information logger
++ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
++ *
++ * Based on ramoops.c
++ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/bootmem.h>
++#include <linux/memblock.h>
++#include <linux/debugfs.h>
++#include <linux/crashlog.h>
++#include <linux/kmsg_dump.h>
++#include <linux/module.h>
++#include <linux/pfn.h>
++#include <asm/io.h>
++
++#define CRASHLOG_PAGES 4
++#define CRASHLOG_SIZE (CRASHLOG_PAGES * PAGE_SIZE)
++#define CRASHLOG_MAGIC 0xa1eedead
++
++/*
++ * Start the log at 1M before the end of RAM, as some boot loaders like
++ * to use the end of the RAM for stack usage and other things
++ * If this fails, fall back to using the last part.
++ */
++#define CRASHLOG_OFFSET (1024 * 1024)
++
++struct crashlog_data {
++ u32 magic;
++ u32 len;
++ u8 data[];
++};
++
++static struct debugfs_blob_wrapper crashlog_blob;
++static unsigned long crashlog_addr = 0;
++static struct crashlog_data *crashlog_buf;
++static struct kmsg_dumper dump;
++static bool first = true;
++
++extern struct list_head *crashlog_modules;
++
++#ifndef CONFIG_NO_BOOTMEM
++void __init crashlog_init_bootmem(bootmem_data_t *bdata)
++{
++ unsigned long addr;
++
++ if (crashlog_addr)
++ return;
++
++ addr = PFN_PHYS(bdata->node_low_pfn) - CRASHLOG_OFFSET;
++ if (reserve_bootmem(addr, CRASHLOG_SIZE, BOOTMEM_EXCLUSIVE) < 0) {
++ printk("Crashlog failed to allocate RAM at address 0x%lx\n", addr);
++ bdata->node_low_pfn -= CRASHLOG_PAGES;
++ addr = PFN_PHYS(bdata->node_low_pfn);
++ }
++ crashlog_addr = addr;
++}
++#endif
++
++#ifdef CONFIG_HAVE_MEMBLOCK
++void __meminit crashlog_init_memblock(phys_addr_t addr, phys_addr_t size)
++{
++ if (crashlog_addr)
++ return;
++
++ addr += size - CRASHLOG_OFFSET;
++ if (memblock_reserve(addr, CRASHLOG_SIZE)) {
++ printk("Crashlog failed to allocate RAM at address 0x%lx\n", (unsigned long) addr);
++ return;
++ }
++
++ crashlog_addr = addr;
++}
++#endif
++
++static void __init crashlog_copy(void)
++{
++ if (crashlog_buf->magic != CRASHLOG_MAGIC)
++ return;
++
++ if (!crashlog_buf->len || crashlog_buf->len >
++ CRASHLOG_SIZE - sizeof(*crashlog_buf))
++ return;
++
++ crashlog_blob.size = crashlog_buf->len;
++ crashlog_blob.data = kmemdup(crashlog_buf->data,
++ crashlog_buf->len, GFP_KERNEL);
++
++ debugfs_create_blob("crashlog", 0700, NULL, &crashlog_blob);
++}
++
++static int get_maxlen(void)
++{
++ return CRASHLOG_SIZE - sizeof(*crashlog_buf) - crashlog_buf->len;
++}
++
++static void crashlog_printf(const char *fmt, ...)
++{
++ va_list args;
++ int len = get_maxlen();
++
++ if (!len)
++ return;
++
++ va_start(args, fmt);
++ crashlog_buf->len += vsnprintf(
++ &crashlog_buf->data[crashlog_buf->len],
++ len, fmt, args);
++ va_end(args);
++}
++
++static void crashlog_do_dump(struct kmsg_dumper *dumper,
++ enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
++ const char *s2, unsigned long l2)
++{
++ unsigned long s1_start, s2_start;
++ unsigned long l1_cpy, l2_cpy;
++ struct timeval tv;
++ struct module *m;
++ char *buf;
++ int len;
++
++ if (!first)
++ crashlog_printf("\n===================================\n");
++
++ do_gettimeofday(&tv);
++ crashlog_printf("Time: %lu.%lu\n",
++ (long)tv.tv_sec, (long)tv.tv_usec);
++
++ if (first) {
++ crashlog_printf("Modules:");
++ list_for_each_entry(m, crashlog_modules, list) {
++ crashlog_printf("\t%s@%p+%x", m->name,
++ m->module_core, m->core_size,
++ m->module_init, m->init_size);
++ }
++ crashlog_printf("\n");
++ first = false;
++ }
++
++ buf = (char *)&crashlog_buf->data[crashlog_buf->len];
++ len = get_maxlen();
++
++ l2_cpy = min(l2, (unsigned long)len);
++ l1_cpy = min(l1, (unsigned long)len - l2_cpy);
++
++ s2_start = l2 - l2_cpy;
++ s1_start = l1 - l1_cpy;
++
++ memcpy(buf, s1 + s1_start, l1_cpy);
++ memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
++ crashlog_buf->len += l1_cpy + l2_cpy;
++}
++
++
++int __init crashlog_init_fs(void)
++{
++ if (!crashlog_addr)
++ return -ENOMEM;
++
++ crashlog_buf = ioremap(crashlog_addr, CRASHLOG_SIZE);
++
++ crashlog_copy();
++
++ crashlog_buf->magic = CRASHLOG_MAGIC;
++ crashlog_buf->len = 0;
++
++ dump.dump = crashlog_do_dump;
++ kmsg_dump_register(&dump);
++
++ return 0;
++}
++module_init(crashlog_init_fs);
+--- a/mm/bootmem.c
++++ b/mm/bootmem.c
+@@ -15,6 +15,7 @@
+ #include <linux/export.h>
+ #include <linux/kmemleak.h>
+ #include <linux/range.h>
++#include <linux/crashlog.h>
+ #include <linux/memblock.h>
+
+ #include <asm/bug.h>
+@@ -177,6 +178,7 @@ static unsigned long __init free_all_boo
+ if (!bdata->node_bootmem_map)
+ return 0;
+
++ crashlog_init_bootmem(bdata);
+ start = bdata->node_min_pfn;
+ end = bdata->node_low_pfn;
+
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -101,6 +101,9 @@ static LIST_HEAD(modules);
+ #ifdef CONFIG_KGDB_KDB
+ struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
+ #endif /* CONFIG_KGDB_KDB */
++#ifdef CONFIG_CRASHLOG
++struct list_head *crashlog_modules = &modules;
++#endif
+
+
+ /* Block module loading/unloading? */
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -19,6 +19,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+ #include <linux/memblock.h>
++#include <linux/crashlog.h>
+
+ static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+ static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+@@ -305,6 +306,8 @@ static void __init_memblock memblock_ins
+ memblock_set_region_node(rgn, nid);
+ type->cnt++;
+ type->total_size += size;
++ if (type == &memblock.memory && idx == 0)
++ crashlog_init_memblock(base, size);
+ }
+
+ /**
diff --git a/target/linux/generic/patches-3.3/940-ocf_kbuild_integration.patch b/target/linux/generic/patches-3.3/940-ocf_kbuild_integration.patch
new file mode 100644
index 000000000..b5cce90a1
--- /dev/null
+++ b/target/linux/generic/patches-3.3/940-ocf_kbuild_integration.patch
@@ -0,0 +1,20 @@
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -968,3 +968,6 @@ config CRYPTO_USER_API_SKCIPHER
+ source "drivers/crypto/Kconfig"
+
+ endif # if CRYPTO
++
++source "crypto/ocf/Kconfig"
++
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -92,6 +92,8 @@ obj-$(CONFIG_CRYPTO_USER_API) += af_alg.
+ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
+ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
+
++obj-$(CONFIG_OCF_OCF) += ocf/
++
+ #
+ # generic algorithms and the async_tx api
+ #
diff --git a/target/linux/generic/patches-3.3/941-ocf_20120127.patch b/target/linux/generic/patches-3.3/941-ocf_20120127.patch
new file mode 100644
index 000000000..794627a85
--- /dev/null
+++ b/target/linux/generic/patches-3.3/941-ocf_20120127.patch
@@ -0,0 +1,164 @@
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -131,6 +131,9 @@
+ * void add_interrupt_randomness(int irq, int irq_flags);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
++ * void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
++ * int random_input_wait(void);
++ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+@@ -152,6 +155,13 @@
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
++ * random_input_words() just provides a raw block of entropy to the input
++ * pool, such as from a hardware entropy generator.
++ *
++ * random_input_wait() suspends the caller until such time as the
++ * entropy pool falls below the write threshold, and returns a count of how
++ * much entropy (in bits) is needed to sustain the pool.
++ *
+ * All of these routines try to estimate how many bits of randomness a
+ * particular randomness source. They do this by keeping track of the
+ * first and second order deltas of the event timings.
+@@ -796,6 +806,63 @@ void add_disk_randomness(struct gendisk
+ }
+ #endif
+
++/*
++ * random_input_words - add bulk entropy to pool
++ *
++ * @buf: buffer to add
++ * @wordcount: number of __u32 words to add
++ * @ent_count: total amount of entropy (in bits) to credit
++ *
++ * this provides bulk input of entropy to the input pool
++ *
++ */
++void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
++{
++ mix_pool_bytes(&input_pool, buf, wordcount*4, NULL);
++
++ credit_entropy_bits(&input_pool, ent_count);
++
++ DEBUG_ENT("crediting %d bits => %d\n",
++ ent_count, input_pool.entropy_count);
++ /*
++ * Wake up waiting processes if we have enough
++ * entropy.
++ */
++ if (input_pool.entropy_count >= random_read_wakeup_thresh)
++ wake_up_interruptible(&random_read_wait);
++}
++EXPORT_SYMBOL(random_input_words);
++
++/*
++ * random_input_wait - wait until random needs entropy
++ *
++ * this function sleeps until the /dev/random subsystem actually
++ * needs more entropy, and then return the amount of entropy
++ * that it would be nice to have added to the system.
++ */
++int random_input_wait(void)
++{
++ int count;
++
++ wait_event_interruptible(random_write_wait,
++ input_pool.entropy_count < random_write_wakeup_thresh);
++
++ count = random_write_wakeup_thresh - input_pool.entropy_count;
++
++ /* likely we got woken up due to a signal */
++ if (count <= 0) count = random_read_wakeup_thresh;
++
++ DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
++ count,
++ input_pool.entropy_count, random_write_wakeup_thresh);
++
++ return count;
++}
++EXPORT_SYMBOL(random_input_wait);
++
++
++#define EXTRACT_SIZE 10
++
+ /*********************************************************************
+ *
+ * Entropy extraction routines
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -142,6 +142,7 @@ SYSCALL_DEFINE1(dup, unsigned int, filde
+ }
+ return ret;
+ }
++EXPORT_SYMBOL(sys_dup);
+
+ #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
+
+--- a/include/linux/miscdevice.h
++++ b/include/linux/miscdevice.h
+@@ -19,6 +19,7 @@
+ #define APOLLO_MOUSE_MINOR 7
+ #define PC110PAD_MINOR 9
+ /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
++#define CRYPTODEV_MINOR 70 /* /dev/crypto */
+ #define WATCHDOG_MINOR 130 /* Watchdog timer */
+ #define TEMP_MINOR 131 /* Temperature Sensor */
+ #define RTC_MINOR 135
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -34,6 +34,30 @@
+ /* Clear the entropy pool and associated counters. (Superuser only.) */
+ #define RNDCLEARPOOL _IO( 'R', 0x06 )
+
++#ifdef CONFIG_FIPS_RNG
++
++/* Size of seed value - equal to AES blocksize */
++#define AES_BLOCK_SIZE_BYTES 16
++#define SEED_SIZE_BYTES AES_BLOCK_SIZE_BYTES
++/* Size of AES key */
++#define KEY_SIZE_BYTES 16
++
++/* ioctl() structure used by FIPS 140-2 Tests */
++struct rand_fips_test {
++ unsigned char key[KEY_SIZE_BYTES]; /* Input */
++ unsigned char datetime[SEED_SIZE_BYTES]; /* Input */
++ unsigned char seed[SEED_SIZE_BYTES]; /* Input */
++ unsigned char result[SEED_SIZE_BYTES]; /* Output */
++};
++
++/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
++#define RNDFIPSVST _IOWR('R', 0x10, struct rand_fips_test)
++
++/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
++#define RNDFIPSMCT _IOWR('R', 0x11, struct rand_fips_test)
++
++#endif /* #ifdef CONFIG_FIPS_RNG */
++
+ struct rand_pool_info {
+ int entropy_count;
+ int buf_size;
+@@ -53,6 +77,10 @@ extern void add_input_randomness(unsigne
+ unsigned int value);
+ extern void add_interrupt_randomness(int irq, int irq_flags);
+
++extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
++extern int random_input_wait(void);
++#define HAS_RANDOM_INPUT_WAIT 1
++
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -430,6 +430,7 @@ struct task_struct *find_task_by_vpid(pi
+ {
+ return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
+ }
++EXPORT_SYMBOL(find_task_by_vpid);
+
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {
diff --git a/target/linux/generic/patches-3.3/950-vm_exports.patch b/target/linux/generic/patches-3.3/950-vm_exports.patch
new file mode 100644
index 000000000..1cd970c80
--- /dev/null
+++ b/target/linux/generic/patches-3.3/950-vm_exports.patch
@@ -0,0 +1,117 @@
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2490,6 +2490,16 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
+
+ /* common code */
+
++void shmem_set_file(struct vm_area_struct *vma, struct file *file)
++{
++ if (vma->vm_file)
++ fput(vma->vm_file);
++ vma->vm_file = file;
++ vma->vm_ops = &shmem_vm_ops;
++ vma->vm_flags |= VM_CAN_NONLINEAR;
++}
++EXPORT_SYMBOL_GPL(shmem_set_file);
++
+ /**
+ * shmem_file_setup - get an unlinked file living in tmpfs
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+@@ -2567,11 +2577,8 @@ int shmem_zero_setup(struct vm_area_stru
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+- if (vma->vm_file)
+- fput(vma->vm_file);
+- vma->vm_file = file;
+- vma->vm_ops = &shmem_vm_ops;
+- vma->vm_flags |= VM_CAN_NONLINEAR;
++ shmem_set_file(vma, file);
++
+ return 0;
+ }
+
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -268,6 +268,7 @@ int expand_files(struct files_struct *fi
+ /* All good, so we try */
+ return expand_fdtable(files, nr);
+ }
++EXPORT_SYMBOL_GPL(expand_files);
+
+ static int count_open_files(struct fdtable *fdt)
+ {
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -500,6 +500,7 @@ struct files_struct *get_files_struct(st
+
+ return files;
+ }
++EXPORT_SYMBOL_GPL(get_files_struct);
+
+ void put_files_struct(struct files_struct *files)
+ {
+@@ -521,6 +522,7 @@ void put_files_struct(struct files_struc
+ rcu_read_unlock();
+ }
+ }
++EXPORT_SYMBOL_GPL(put_files_struct);
+
+ void reset_files_struct(struct files_struct *files)
+ {
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3854,6 +3854,7 @@ int can_nice(const struct task_struct *p
+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+ capable(CAP_SYS_NICE));
+ }
++EXPORT_SYMBOL_GPL(can_nice);
+
+ #ifdef __ARCH_WANT_SYS_NICE
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1412,6 +1412,7 @@ unsigned long zap_page_range(struct vm_a
+ tlb_finish_mmu(&tlb, address, end);
+ return end;
+ }
++EXPORT_SYMBOL_GPL(zap_page_range);
+
+ /**
+ * zap_vma_ptes - remove ptes mapping the vma
+@@ -3087,6 +3088,7 @@ static inline int check_stack_guard_page
+ }
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(vmtruncate_range);
+
+ /*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1257,6 +1257,7 @@ void unmap_kernel_range(unsigned long ad
+ vunmap_page_range(addr, end);
+ flush_tlb_kernel_range(addr, end);
+ }
++EXPORT_SYMBOL_GPL(unmap_kernel_range);
+
+ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
+ {
+@@ -1394,6 +1395,7 @@ struct vm_struct *get_vm_area(unsigned l
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+ -1, GFP_KERNEL, __builtin_return_address(0));
+ }
++EXPORT_SYMBOL_GPL(get_vm_area);
+
+ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
+ void *caller)
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -872,6 +872,7 @@ extern bool skip_free_areas_node(unsigne
+
+ int shmem_lock(struct file *file, int lock, struct user_struct *user);
+ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
++void shmem_set_file(struct vm_area_struct *vma, struct file *file);
+ int shmem_zero_setup(struct vm_area_struct *);
+
+ extern int can_do_mlock(void);
diff --git a/target/linux/generic/patches-3.3/960-decompress_unlzo_fix.patch b/target/linux/generic/patches-3.3/960-decompress_unlzo_fix.patch
new file mode 100644
index 000000000..ee7c118ef
--- /dev/null
+++ b/target/linux/generic/patches-3.3/960-decompress_unlzo_fix.patch
@@ -0,0 +1,23 @@
+--- /dev/null
++++ b/include/linux/decompress/unlzo_mm.h
+@@ -0,0 +1,10 @@
++#ifndef UNLZO_MM_H
++#define UNLZO_MM_H
++
++#ifdef STATIC
++#define INIT
++#else
++#define INIT __init
++#endif
++
++#endif
+--- a/lib/decompress_unlzo.c
++++ b/lib/decompress_unlzo.c
+@@ -38,6 +38,7 @@
+
+ #include <linux/types.h>
+ #include <linux/lzo.h>
++#include <linux/decompress/unlzo_mm.h>
+ #include <linux/decompress/mm.h>
+
+ #include <linux/compiler.h>
diff --git a/target/linux/generic/patches-3.3/980-update_arm_machtypes.patch b/target/linux/generic/patches-3.3/980-update_arm_machtypes.patch
new file mode 100644
index 000000000..391511c8f
--- /dev/null
+++ b/target/linux/generic/patches-3.3/980-update_arm_machtypes.patch
@@ -0,0 +1,3618 @@
+--- a/arch/arm/tools/mach-types
++++ b/arch/arm/tools/mach-types
+@@ -12,531 +12,3179 @@
+ #
+ # http://www.arm.linux.org.uk/developer/machines/?action=new
+ #
+-# This is a cut-down version of the file; it contains only machines that
+-# are merged into mainline or have been edited in the machine database
+-# within the last 12 months. References to machine_is_NAME() do not count!
+-#
+-# Last update: Tue Dec 6 11:07:38 2011
++# Last update: Fri Apr 20 08:30:36 2012
+ #
+ # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
+ #
+ ebsa110 ARCH_EBSA110 EBSA110 0
+ riscpc ARCH_RPC RISCPC 1
++nexuspci ARCH_NEXUSPCI NEXUSPCI 3
+ ebsa285 ARCH_EBSA285 EBSA285 4
+ netwinder ARCH_NETWINDER NETWINDER 5
+ cats ARCH_CATS CATS 6
++tbox ARCH_TBOX TBOX 7
++co285 ARCH_CO285 CO285 8
++clps7110 ARCH_CLPS7110 CLPS7110 9
++archimedes ARCH_ARC ARCHIMEDES 10
++a5k ARCH_A5K A5K 11
++etoile ARCH_ETOILE ETOILE 12
++lacie_nas ARCH_LACIE_NAS LACIE_NAS 13
++clps7500 ARCH_CLPS7500 CLPS7500 14
+ shark ARCH_SHARK SHARK 15
+ brutus SA1100_BRUTUS BRUTUS 16
+ personal_server ARCH_PERSONAL_SERVER PERSONAL_SERVER 17
++itsy SA1100_ITSY ITSY 18
+ l7200 ARCH_L7200 L7200 19
+ pleb SA1100_PLEB PLEB 20
+ integrator ARCH_INTEGRATOR INTEGRATOR 21
+ h3600 SA1100_H3600 H3600 22
++ixp1200 ARCH_IXP1200 IXP1200 23
+ p720t ARCH_P720T P720T 24
+ assabet SA1100_ASSABET ASSABET 25
++victor SA1100_VICTOR VICTOR 26
+ lart SA1100_LART LART 27
++ranger SA1100_RANGER RANGER 28
+ graphicsclient SA1100_GRAPHICSCLIENT GRAPHICSCLIENT 29
+ xp860 SA1100_XP860 XP860 30
+ cerf SA1100_CERF CERF 31
+ nanoengine SA1100_NANOENGINE NANOENGINE 32
++fpic SA1100_FPIC FPIC 33
++extenex1 SA1100_EXTENEX1 EXTENEX1 34
++sherman SA1100_SHERMAN SHERMAN 35
++accelent_sa SA1100_ACCELENT ACCELENT_SA 36
++accelent_l7200 ARCH_L7200_ACCELENT ACCELENT_L7200 37
++netport SA1100_NETPORT NETPORT 38
++pangolin SA1100_PANGOLIN PANGOLIN 39
++yopy SA1100_YOPY YOPY 40
++coolidge SA1100_COOLIDGE COOLIDGE 41
++huw_webpanel SA1100_HUW_WEBPANEL HUW_WEBPANEL 42
++spotme ARCH_SPOTME SPOTME 43
++freebird ARCH_FREEBIRD FREEBIRD 44
++ti925 ARCH_TI925 TI925 45
++riscstation ARCH_RISCSTATION RISCSTATION 46
++cavy SA1100_CAVY CAVY 47
+ jornada720 SA1100_JORNADA720 JORNADA720 48
++omnimeter SA1100_OMNIMETER OMNIMETER 49
+ edb7211 ARCH_EDB7211 EDB7211 50
++citygo SA1100_CITYGO CITYGO 51
+ pfs168 SA1100_PFS168 PFS168 52
++spot SA1100_SPOT SPOT 53
+ flexanet SA1100_FLEXANET FLEXANET 54
++webpal ARCH_WEBPAL WEBPAL 55
++linpda SA1100_LINPDA LINPDA 56
++anakin ARCH_ANAKIN ANAKIN 57
++mvi SA1100_MVI MVI 58
++jupiter SA1100_JUPITER JUPITER 59
++psionw ARCH_PSIONW PSIONW 60
++aln SA1100_ALN ALN 61
++epxa ARCH_CAMELOT CAMELOT 62
++gds2200 SA1100_GDS2200 GDS2200 63
++netbook SA1100_PSION_SERIES7 PSION_SERIES7 64
++xfile SA1100_XFILE XFILE 65
++accelent_ep9312 ARCH_ACCELENT_EP9312 ACCELENT_EP9312 66
++ic200 ARCH_IC200 IC200 67
++creditlart SA1100_CREDITLART CREDITLART 68
++htm SA1100_HTM HTM 69
++iq80310 ARCH_IQ80310 IQ80310 70
++freebot SA1100_FREEBOT FREEBOT 71
++entel ARCH_ENTEL ENTEL 72
++enp3510 ARCH_ENP3510 ENP3510 73
++trizeps SA1100_TRIZEPS TRIZEPS 74
++nesa SA1100_NESA NESA 75
++venus ARCH_VENUS VENUS 76
++tardis ARCH_TARDIS TARDIS 77
++mercury ARCH_MERCURY MERCURY 78
++empeg SA1100_EMPEG EMPEG 79
++adi_evb ARCH_I80200FCC I80200FCC 80
++itt_cpb SA1100_ITT_CPB ITT_CPB 81
++svc SA1100_SVC SVC 82
++alpha2 SA1100_ALPHA2 ALPHA2 84
++alpha1 SA1100_ALPHA1 ALPHA1 85
++netarm ARCH_NETARM NETARM 86
+ simpad SA1100_SIMPAD SIMPAD 87
++pda1 ARCH_PDA1 PDA1 88
+ lubbock ARCH_LUBBOCK LUBBOCK 89
++aniko ARCH_ANIKO ANIKO 90
+ clep7212 ARCH_CLEP7212 CLEP7212 91
++cs89712 ARCH_CS89712 CS89712 92
++weararm SA1100_WEARARM WEARARM 93
++possio_px SA1100_POSSIO_PX POSSIO_PX 94
++sidearm SA1100_SIDEARM SIDEARM 95
++stork SA1100_STORK STORK 96
+ shannon SA1100_SHANNON SHANNON 97
++ace ARCH_ACE ACE 98
++ballyarm SA1100_BALLYARM BALLYARM 99
++simputer SA1100_SIMPUTER SIMPUTER 100
++nexterm SA1100_NEXTERM NEXTERM 101
++sa1100_elf SA1100_SA1100_ELF SA1100_ELF 102
++gator SA1100_GATOR GATOR 103
++granite ARCH_GRANITE GRANITE 104
+ consus SA1100_CONSUS CONSUS 105
+ aaed2000 ARCH_AAED2000 AAED2000 106
+ cdb89712 ARCH_CDB89712 CDB89712 107
+ graphicsmaster SA1100_GRAPHICSMASTER GRAPHICSMASTER 108
+ adsbitsy SA1100_ADSBITSY ADSBITSY 109
+ pxa_idp ARCH_PXA_IDP PXA_IDP 110
++plce ARCH_PLCE PLCE 111
+ pt_system3 SA1100_PT_SYSTEM3 PT_SYSTEM3 112
++murphy ARCH_MEDALB MEDALB 113
++eagle ARCH_EAGLE EAGLE 114
++dsc21 ARCH_DSC21 DSC21 115
++dsc24 ARCH_DSC24 DSC24 116
++ti5472 ARCH_TI5472 TI5472 117
+ autcpu12 ARCH_AUTCPU12 AUTCPU12 118
++uengine ARCH_UENGINE UENGINE 119
++bluestem SA1100_BLUESTEM BLUESTEM 120
++xingu8 ARCH_XINGU8 XINGU8 121
++bushstb ARCH_BUSHSTB BUSHSTB 122
++epsilon1 SA1100_EPSILON1 EPSILON1 123
++balloon SA1100_BALLOON BALLOON 124
++puppy ARCH_PUPPY PUPPY 125
++elroy SA1100_ELROY ELROY 126
++gms720 ARCH_GMS720 GMS720 127
++s24x ARCH_S24X S24X 128
++jtel_clep7312 ARCH_JTEL_CLEP7312 JTEL_CLEP7312 129
++cx821xx ARCH_CX821XX CX821XX 130
++edb7312 ARCH_EDB7312 EDB7312 131
++bsa1110 SA1100_BSA1110 BSA1110 132
++powerpin ARCH_POWERPIN POWERPIN 133
++openarm ARCH_OPENARM OPENARM 134
++whitechapel SA1100_WHITECHAPEL WHITECHAPEL 135
+ h3100 SA1100_H3100 H3100 136
++h3800 SA1100_H3800 H3800 137
++blue_v1 ARCH_BLUE_V1 BLUE_V1 138
++pxa_cerf ARCH_PXA_CERF PXA_CERF 139
++arm7tevb ARCH_ARM7TEVB ARM7TEVB 140
++d7400 SA1100_D7400 D7400 141
++piranha ARCH_PIRANHA PIRANHA 142
++sbcamelot SA1100_SBCAMELOT SBCAMELOT 143
++kings SA1100_KINGS KINGS 144
++smdk2400 ARCH_SMDK2400 SMDK2400 145
+ collie SA1100_COLLIE COLLIE 146
++idr ARCH_IDR IDR 147
+ badge4 SA1100_BADGE4 BADGE4 148
++webnet ARCH_WEBNET WEBNET 149
++d7300 SA1100_D7300 D7300 150
++cep SA1100_CEP CEP 151
+ fortunet ARCH_FORTUNET FORTUNET 152
++vc547x ARCH_VC547X VC547X 153
++filewalker SA1100_FILEWALKER FILEWALKER 154
++netgateway SA1100_NETGATEWAY NETGATEWAY 155
++symbol2800 SA1100_SYMBOL2800 SYMBOL2800 156
++suns SA1100_SUNS SUNS 157
++frodo SA1100_FRODO FRODO 158
++ms301 SA1100_MACH_TYTE_MS301 MACH_TYTE_MS301 159
+ mx1ads ARCH_MX1ADS MX1ADS 160
+ h7201 ARCH_H7201 H7201 161
+ h7202 ARCH_H7202 H7202 162
++amico ARCH_AMICO AMICO 163
++iam SA1100_IAM IAM 164
++tt530 SA1100_TT530 TT530 165
++sam2400 ARCH_SAM2400 SAM2400 166
++jornada56x SA1100_JORNADA56X JORNADA56X 167
++active SA1100_ACTIVE ACTIVE 168
+ iq80321 ARCH_IQ80321 IQ80321 169
++wid SA1100_WID WID 170
++sabinal ARCH_SABINAL SABINAL 171
++ixp425_matacumbe ARCH_IXP425_MATACUMBE IXP425_MATACUMBE 172
++miniprint SA1100_MINIPRINT MINIPRINT 173
++adm510x ARCH_ADM510X ADM510X 174
++svs200 SA1100_SVS200 SVS200 175
++atg_tcu ARCH_ATG_TCU ATG_TCU 176
++jornada820 SA1100_JORNADA820 JORNADA820 177
++s3c44b0 ARCH_S3C44B0 S3C44B0 178
++margis2 ARCH_MARGIS2 MARGIS2 179
+ ks8695 ARCH_KS8695 KS8695 180
++brh ARCH_BRH BRH 181
++s3c2410 ARCH_S3C2410 S3C2410 182
++possio_px30 ARCH_POSSIO_PX30 POSSIO_PX30 183
++s3c2800 ARCH_S3C2800 S3C2800 184
++fleetwood SA1100_FLEETWOOD FLEETWOOD 185
++omaha ARCH_OMAHA OMAHA 186
++ta7 ARCH_TA7 TA7 187
++nova SA1100_NOVA NOVA 188
++hmk ARCH_HMK HMK 189
+ karo ARCH_KARO KARO 190
++fester SA1100_FESTER FESTER 191
++gpi ARCH_GPI GPI 192
+ smdk2410 ARCH_SMDK2410 SMDK2410 193
++i519 ARCH_I519 I519 194
++nexio SA1100_NEXIO NEXIO 195
++bitbox SA1100_BITBOX BITBOX 196
++g200 SA1100_G200 G200 197
++gill SA1100_GILL GILL 198
++pxa_mercury ARCH_PXA_MERCURY PXA_MERCURY 199
+ ceiva ARCH_CEIVA CEIVA 200
++fret SA1100_FRET FRET 201
++emailphone SA1100_EMAILPHONE EMAILPHONE 202
++h3900 ARCH_H3900 H3900 203
++pxa1 ARCH_PXA1 PXA1 204
++koan369 SA1100_KOAN369 KOAN369 205
++cogent ARCH_COGENT COGENT 206
++esl_simputer ARCH_ESL_SIMPUTER ESL_SIMPUTER 207
++esl_simputer_clr ARCH_ESL_SIMPUTER_CLR ESL_SIMPUTER_CLR 208
++esl_simputer_bw ARCH_ESL_SIMPUTER_BW ESL_SIMPUTER_BW 209
++hhp_cradle ARCH_HHP_CRADLE HHP_CRADLE 210
++he500 ARCH_HE500 HE500 211
++inhandelf2 SA1100_INHANDELF2 INHANDELF2 212
++inhandftip SA1100_INHANDFTIP INHANDFTIP 213
++dnp1110 SA1100_DNP1110 DNP1110 214
++pnp1110 SA1100_PNP1110 PNP1110 215
++csb226 ARCH_CSB226 CSB226 216
++arnold SA1100_ARNOLD ARNOLD 217
+ voiceblue MACH_VOICEBLUE VOICEBLUE 218
++jz8028 ARCH_JZ8028 JZ8028 219
+ h5400 ARCH_H5400 H5400 220
++forte SA1100_FORTE FORTE 221
++acam SA1100_ACAM ACAM 222
++abox SA1100_ABOX ABOX 223
++atmel ARCH_ATMEL ATMEL 224
++sitsang ARCH_SITSANG SITSANG 225
++cpu1110lcdnet SA1100_CPU1110LCDNET CPU1110LCDNET 226
++mpl_vcma9 ARCH_MPL_VCMA9 MPL_VCMA9 227
++opus_a1 ARCH_OPUS_A1 OPUS_A1 228
++daytona ARCH_DAYTONA DAYTONA 229
++killbear SA1100_KILLBEAR KILLBEAR 230
++yoho ARCH_YOHO YOHO 231
++jasper ARCH_JASPER JASPER 232
++dsc25 ARCH_DSC25 DSC25 233
+ omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234
++mnci ARCH_RAMSES RAMSES 235
++s28x ARCH_S28X S28X 236
++mport3 ARCH_MPORT3 MPORT3 237
++pxa_eagle250 ARCH_PXA_EAGLE250 PXA_EAGLE250 238
++pdb ARCH_PDB PDB 239
++blue_2g SA1100_BLUE_2G BLUE_2G 240
++bluearch SA1100_BLUEARCH BLUEARCH 241
+ ixdp2400 ARCH_IXDP2400 IXDP2400 242
+ ixdp2800 ARCH_IXDP2800 IXDP2800 243
++explorer SA1100_EXPLORER EXPLORER 244
+ ixdp425 ARCH_IXDP425 IXDP425 245
++chimp ARCH_CHIMP CHIMP 246
++stork_nest ARCH_STORK_NEST STORK_NEST 247
++stork_egg ARCH_STORK_EGG STORK_EGG 248
++wismo SA1100_WISMO WISMO 249
++ezlinx ARCH_EZLINX EZLINX 250
++at91rm9200 ARCH_AT91RM9200 AT91RM9200 251
++adtech_orion ARCH_ADTECH_ORION ADTECH_ORION 252
++neptune ARCH_NEPTUNE NEPTUNE 253
+ hackkit SA1100_HACKKIT HACKKIT 254
++pxa_wins30 ARCH_PXA_WINS30 PXA_WINS30 255
++lavinna SA1100_LAVINNA LAVINNA 256
++pxa_uengine ARCH_PXA_UENGINE PXA_UENGINE 257
++innokom ARCH_INNOKOM INNOKOM 258
++bms ARCH_BMS BMS 259
+ ixcdp1100 ARCH_IXCDP1100 IXCDP1100 260
++prpmc1100 ARCH_PRPMC1100 PRPMC1100 261
+ at91rm9200dk ARCH_AT91RM9200DK AT91RM9200DK 262
++armstick ARCH_ARMSTICK ARMSTICK 263
++armonie ARCH_ARMONIE ARMONIE 264
++mport1 ARCH_MPORT1 MPORT1 265
++s3c5410 ARCH_S3C5410 S3C5410 266
++zcp320a ARCH_ZCP320A ZCP320A 267
++i_box ARCH_I_BOX I_BOX 268
++stlc1502 ARCH_STLC1502 STLC1502 269
++siren ARCH_SIREN SIREN 270
++greenlake ARCH_GREENLAKE GREENLAKE 271
++argus ARCH_ARGUS ARGUS 272
++combadge SA1100_COMBADGE COMBADGE 273
++rokepxa ARCH_ROKEPXA ROKEPXA 274
+ cintegrator ARCH_CINTEGRATOR CINTEGRATOR 275
++guidea07 ARCH_GUIDEA07 GUIDEA07 276
++tat257 ARCH_TAT257 TAT257 277
++igp2425 ARCH_IGP2425 IGP2425 278
++bluegrama ARCH_BLUEGRAMMA BLUEGRAMMA 279
++ipod ARCH_IPOD IPOD 280
++adsbitsyx ARCH_ADSBITSYX ADSBITSYX 281
++trizeps2 ARCH_TRIZEPS2 TRIZEPS2 282
+ viper ARCH_VIPER VIPER 283
++adsbitsyplus SA1100_ADSBITSYPLUS ADSBITSYPLUS 284
++adsagc SA1100_ADSAGC ADSAGC 285
++stp7312 ARCH_STP7312 STP7312 286
++nx_phnx MACH_NX_PHNX NX_PHNX 287
++wep_ep250 ARCH_WEP_EP250 WEP_EP250 288
++inhandelf3 ARCH_INHANDELF3 INHANDELF3 289
+ adi_coyote ARCH_ADI_COYOTE ADI_COYOTE 290
++iyonix ARCH_IYONIX IYONIX 291
++damicam1 ARCH_DAMICAM_SA1110 DAMICAM_SA1110 292
++meg03 ARCH_MEG03 MEG03 293
++pxa_whitechapel ARCH_PXA_WHITECHAPEL PXA_WHITECHAPEL 294
++nwsc ARCH_NWSC NWSC 295
++nwlarm ARCH_NWLARM NWLARM 296
++ixp425_mguard ARCH_IXP425_MGUARD IXP425_MGUARD 297
++pxa_netdcu4 ARCH_PXA_NETDCU4 PXA_NETDCU4 298
+ ixdp2401 ARCH_IXDP2401 IXDP2401 299
+ ixdp2801 ARCH_IXDP2801 IXDP2801 300
++zodiac ARCH_ZODIAC ZODIAC 301
++armmodul ARCH_ARMMODUL ARMMODUL 302
++ketop SA1100_KETOP KETOP 303
++av7200 ARCH_AV7200 AV7200 304
++arch_ti925 ARCH_ARCH_TI925 ARCH_TI925 305
++acq200 ARCH_ACQ200 ACQ200 306
++pt_dafit SA1100_PT_DAFIT PT_DAFIT 307
++ihba ARCH_IHBA IHBA 308
++quinque ARCH_QUINQUE QUINQUE 309
++nimbraone ARCH_NIMBRAONE NIMBRAONE 310
++nimbra29x ARCH_NIMBRA29X NIMBRA29X 311
++nimbra210 ARCH_NIMBRA210 NIMBRA210 312
++hhp_d95xx ARCH_HHP_D95XX HHP_D95XX 313
++labarm ARCH_LABARM LABARM 314
++m825xx ARCH_M825XX M825XX 315
++m7100 SA1100_M7100 M7100 316
++nipc2 ARCH_NIPC2 NIPC2 317
++fu7202 ARCH_FU7202 FU7202 318
++adsagx ARCH_ADSAGX ADSAGX 319
++pxa_pooh ARCH_PXA_POOH PXA_POOH 320
++bandon ARCH_BANDON BANDON 321
++pcm7210 ARCH_PCM7210 PCM7210 322
++nms9200 ARCH_NMS9200 NMS9200 323
++logodl ARCH_LOGODL LOGODL 324
++m7140 SA1100_M7140 M7140 325
++korebot ARCH_KOREBOT KOREBOT 326
+ iq31244 ARCH_IQ31244 IQ31244 327
++koan393 SA1100_KOAN393 KOAN393 328
++inhandftip3 ARCH_INHANDFTIP3 INHANDFTIP3 329
++gonzo ARCH_GONZO GONZO 330
+ bast ARCH_BAST BAST 331
++scanpass ARCH_SCANPASS SCANPASS 332
++ep7312_pooh ARCH_EP7312_POOH EP7312_POOH 333
++ta7s ARCH_TA7S TA7S 334
++ta7v ARCH_TA7V TA7V 335
++icarus SA1100_ICARUS ICARUS 336
++h1900 ARCH_H1900 H1900 337
++gemini SA1100_GEMINI GEMINI 338
++axim ARCH_AXIM AXIM 339
++audiotron ARCH_AUDIOTRON AUDIOTRON 340
++h2200 ARCH_H2200 H2200 341
++loox600 ARCH_LOOX600 LOOX600 342
++niop ARCH_NIOP NIOP 343
++dm310 ARCH_DM310 DM310 344
++seedpxa_c2 ARCH_SEEDPXA_C2 SEEDPXA_C2 345
++ixp4xx_mguardpci ARCH_IXP4XX_MGUARD_PCI IXP4XX_MGUARD_PCI 346
+ h1940 ARCH_H1940 H1940 347
++scorpio ARCH_SCORPIO SCORPIO 348
++viva ARCH_VIVA VIVA 349
++pxa_xcard ARCH_PXA_XCARD PXA_XCARD 350
++csb335 ARCH_CSB335 CSB335 351
++ixrd425 ARCH_IXRD425 IXRD425 352
++iq80315 ARCH_IQ80315 IQ80315 353
++nmp7312 ARCH_NMP7312 NMP7312 354
++cx861xx ARCH_CX861XX CX861XX 355
+ enp2611 ARCH_ENP2611 ENP2611 356
++xda SA1100_XDA XDA 357
++csir_ims ARCH_CSIR_IMS CSIR_IMS 358
++ixp421_dnaeeth ARCH_IXP421_DNAEETH IXP421_DNAEETH 359
++pocketserv9200 ARCH_POCKETSERV9200 POCKETSERV9200 360
++toto ARCH_TOTO TOTO 361
+ s3c2440 ARCH_S3C2440 S3C2440 362
++ks8695p ARCH_KS8695P KS8695P 363
++se4000 ARCH_SE4000 SE4000 364
++quadriceps ARCH_QUADRICEPS QUADRICEPS 365
++bronco ARCH_BRONCO BRONCO 366
++esl_wireless_tab ARCH_ESL_WIRELESS_TAB ESL_WIRELESS_TAB 367
++esl_sofcomp ARCH_ESL_SOFCOMP ESL_SOFCOMP 368
++s5c7375 ARCH_S5C7375 S5C7375 369
++spearhead ARCH_SPEARHEAD SPEARHEAD 370
++pantera ARCH_PANTERA PANTERA 371
++prayoglite ARCH_PRAYOGLITE PRAYOGLITE 372
+ gumstix ARCH_GUMSTIX GUMSTIX 373
++rcube ARCH_RCUBE RCUBE 374
++rea_olv ARCH_REA_OLV REA_OLV 375
++pxa_iphone ARCH_PXA_IPHONE PXA_IPHONE 376
++s3c3410 ARCH_S3C3410 S3C3410 377
++espd_4510b ARCH_ESPD_4510B ESPD_4510B 378
++mp1x ARCH_MP1X MP1X 379
++at91rm9200tb ARCH_AT91RM9200TB AT91RM9200TB 380
++adsvgx ARCH_ADSVGX ADSVGX 381
+ omap_h2 MACH_OMAP_H2 OMAP_H2 382
++pelee ARCH_PELEE PELEE 383
+ e740 MACH_E740 E740 384
+ iq80331 ARCH_IQ80331 IQ80331 385
+ versatile_pb ARCH_VERSATILE_PB VERSATILE_PB 387
+ kev7a400 MACH_KEV7A400 KEV7A400 388
+ lpd7a400 MACH_LPD7A400 LPD7A400 389
+ lpd7a404 MACH_LPD7A404 LPD7A404 390
++fujitsu_camelot ARCH_FUJITSU_CAMELOT FUJITSU_CAMELOT 391
++janus2m ARCH_JANUS2M JANUS2M 392
++embtf MACH_EMBTF EMBTF 393
++hpm MACH_HPM HPM 394
++smdk2410tk MACH_SMDK2410TK SMDK2410TK 395
++smdk2410aj MACH_SMDK2410AJ SMDK2410AJ 396
++streetracer MACH_STREETRACER STREETRACER 397
++eframe MACH_EFRAME EFRAME 398
+ csb337 MACH_CSB337 CSB337 399
++pxa_lark MACH_PXA_LARK PXA_LARK 400
++pxa_pnp2110 MACH_PNP2110 PNP2110 401
++tcc72x MACH_TCC72X TCC72X 402
++altair MACH_ALTAIR ALTAIR 403
++kc3 MACH_KC3 KC3 404
++sinteftd MACH_SINTEFTD SINTEFTD 405
+ mainstone MACH_MAINSTONE MAINSTONE 406
++aday4x MACH_ADAY4X ADAY4X 407
++lite300 MACH_LITE300 LITE300 408
++s5c7376 MACH_S5C7376 S5C7376 409
++mt02 MACH_MT02 MT02 410
++mport3s MACH_MPORT3S MPORT3S 411
++ra_alpha MACH_RA_ALPHA RA_ALPHA 412
+ xcep MACH_XCEP XCEP 413
+ arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414
++stargate MACH_STARGATE STARGATE 415
++armadilloj MACH_ARMADILLOJ ARMADILLOJ 416
++elroy_jack MACH_ELROY_JACK ELROY_JACK 417
++backend MACH_BACKEND BACKEND 418
++s5linbox MACH_S5LINBOX S5LINBOX 419
+ nomadik MACH_NOMADIK NOMADIK 420
++ia_cpu_9200 MACH_IA_CPU_9200 IA_CPU_9200 421
++at91_bja1 MACH_AT91_BJA1 AT91_BJA1 422
+ corgi MACH_CORGI CORGI 423
+ poodle MACH_POODLE POODLE 424
++ten MACH_TEN TEN 425
++roverp5p MACH_ROVERP5P ROVERP5P 426
++sc2700 MACH_SC2700 SC2700 427
++ex_eagle MACH_EX_EAGLE EX_EAGLE 428
++nx_pxa12 MACH_NX_PXA12 NX_PXA12 429
++nx_pxa5 MACH_NX_PXA5 NX_PXA5 430
++blackboard2 MACH_BLACKBOARD2 BLACKBOARD2 431
++i819 MACH_I819 I819 432
++ixmb995e MACH_IXMB995E IXMB995E 433
++skyrider MACH_SKYRIDER SKYRIDER 434
++skyhawk MACH_SKYHAWK SKYHAWK 435
++enterprise MACH_ENTERPRISE ENTERPRISE 436
++dep2410 MACH_DEP2410 DEP2410 437
+ armcore MACH_ARMCORE ARMCORE 438
++hobbit MACH_HOBBIT HOBBIT 439
++h7210 MACH_H7210 H7210 440
++pxa_netdcu5 MACH_PXA_NETDCU5 PXA_NETDCU5 441
++acc MACH_ACC ACC 442
++esl_sarva MACH_ESL_SARVA ESL_SARVA 443
++xm250 MACH_XM250 XM250 444
++t6tc1xb MACH_T6TC1XB T6TC1XB 445
++ess710 MACH_ESS710 ESS710 446
+ mx31ads MACH_MX31ADS MX31ADS 447
+ himalaya MACH_HIMALAYA HIMALAYA 448
++bolfenk MACH_BOLFENK BOLFENK 449
++at91rm9200kr MACH_AT91RM9200KR AT91RM9200KR 450
+ edb9312 MACH_EDB9312 EDB9312 451
+ omap_generic MACH_OMAP_GENERIC OMAP_GENERIC 452
++aximx3 MACH_AXIMX3 AXIMX3 453
++eb67xdip MACH_EB67XDIP EB67XDIP 454
++webtxs MACH_WEBTXS WEBTXS 455
++hawk MACH_HAWK HAWK 456
++ccat91sbc001 MACH_CCAT91SBC001 CCAT91SBC001 457
++expresso MACH_EXPRESSO EXPRESSO 458
++h4000 MACH_H4000 H4000 459
++dino MACH_DINO DINO 460
++ml675k MACH_ML675K ML675K 461
+ edb9301 MACH_EDB9301 EDB9301 462
+ edb9315 MACH_EDB9315 EDB9315 463
++reciva_tt MACH_RECIVA_TT RECIVA_TT 464
++cstcb01 MACH_CSTCB01 CSTCB01 465
++cstcb1 MACH_CSTCB1 CSTCB1 466
++shadwell MACH_SHADWELL SHADWELL 467
++goepel263 MACH_GOEPEL263 GOEPEL263 468
++acq100 MACH_ACQ100 ACQ100 469
++mx1fs2 MACH_MX1FS2 MX1FS2 470
++hiptop_g1 MACH_HIPTOP_G1 HIPTOP_G1 471
++sparky MACH_SPARKY SPARKY 472
++ns9750 MACH_NS9750 NS9750 473
++phoenix MACH_PHOENIX PHOENIX 474
+ vr1000 MACH_VR1000 VR1000 475
++deisterpxa MACH_DEISTERPXA DEISTERPXA 476
++bcm1160 MACH_BCM1160 BCM1160 477
++pcm022 MACH_PCM022 PCM022 478
++adsgcx MACH_ADSGCX ADSGCX 479
++dreadnaught MACH_DREADNAUGHT DREADNAUGHT 480
++dm320 MACH_DM320 DM320 481
++markov MACH_MARKOV MARKOV 482
++cos7a400 MACH_COS7A400 COS7A400 483
++milano MACH_MILANO MILANO 484
++ue9328 MACH_UE9328 UE9328 485
++uex255 MACH_UEX255 UEX255 486
++ue2410 MACH_UE2410 UE2410 487
++a620 MACH_A620 A620 488
++ocelot MACH_OCELOT OCELOT 489
++cheetah MACH_CHEETAH CHEETAH 490
+ omap_perseus2 MACH_OMAP_PERSEUS2 OMAP_PERSEUS2 491
++zvue MACH_ZVUE ZVUE 492
++roverp1 MACH_ROVERP1 ROVERP1 493
++asidial2 MACH_ASIDIAL2 ASIDIAL2 494
++s3c24a0 MACH_S3C24A0 S3C24A0 495
+ e800 MACH_E800 E800 496
+ e750 MACH_E750 E750 497
++s3c5500 MACH_S3C5500 S3C5500 498
++smdk5500 MACH_SMDK5500 SMDK5500 499
++signalsync MACH_SIGNALSYNC SIGNALSYNC 500
++nbc MACH_NBC NBC 501
++kodiak MACH_KODIAK KODIAK 502
++netbookpro MACH_NETBOOKPRO NETBOOKPRO 503
++hw90200 MACH_HW90200 HW90200 504
++condor MACH_CONDOR CONDOR 505
++cup MACH_CUP CUP 506
++kite MACH_KITE KITE 507
+ scb9328 MACH_SCB9328 SCB9328 508
+ omap_h3 MACH_OMAP_H3 OMAP_H3 509
+ omap_h4 MACH_OMAP_H4 OMAP_H4 510
++n10 MACH_N10 N10 511
++montejade MACH_MONTAJADE MONTAJADE 512
++sg560 MACH_SG560 SG560 513
++dp1000 MACH_DP1000 DP1000 514
+ omap_osk MACH_OMAP_OSK OMAP_OSK 515
++rg100v3 MACH_RG100V3 RG100V3 516
++mx2ads MACH_MX2ADS MX2ADS 517
++pxa_kilo MACH_PXA_KILO PXA_KILO 518
++ixp4xx_eagle MACH_IXP4XX_EAGLE IXP4XX_EAGLE 519
+ tosa MACH_TOSA TOSA 520
++mb2520f MACH_MB2520F MB2520F 521
++emc1000 MACH_EMC1000 EMC1000 522
++tidsc25 MACH_TIDSC25 TIDSC25 523
++akcpmxl MACH_AKCPMXL AKCPMXL 524
++av3xx MACH_AV3XX AV3XX 525
+ avila MACH_AVILA AVILA 526
++pxa_mpm10 MACH_PXA_MPM10 PXA_MPM10 527
++pxa_kyanite MACH_PXA_KYANITE PXA_KYANITE 528
++sgold MACH_SGOLD SGOLD 529
++oscar MACH_OSCAR OSCAR 530
++epxa4usb2 MACH_EPXA4USB2 EPXA4USB2 531
++xsengine MACH_XSENGINE XSENGINE 532
++ip600 MACH_IP600 IP600 533
++mcan2 MACH_MCAN2 MCAN2 534
++ddi_blueridge MACH_DDI_BLUERIDGE DDI_BLUERIDGE 535
++skyminder MACH_SKYMINDER SKYMINDER 536
++lpd79520 MACH_LPD79520 LPD79520 537
+ edb9302 MACH_EDB9302 EDB9302 538
++hw90340 MACH_HW90340 HW90340 539
++cip_box MACH_CIP_BOX CIP_BOX 540
++ivpn MACH_IVPN IVPN 541
++rsoc2 MACH_RSOC2 RSOC2 542
+ husky MACH_HUSKY HUSKY 543
++boxer MACH_BOXER BOXER 544
+ shepherd MACH_SHEPHERD SHEPHERD 545
++aml42800aa MACH_AML42800AA AML42800AA 546
++lpc2294 MACH_LPC2294 LPC2294 548
++switchgrass MACH_SWITCHGRASS SWITCHGRASS 549
++ens_cmu MACH_ENS_CMU ENS_CMU 550
++mm6_sdb MACH_MM6_SDB MM6_SDB 551
++saturn MACH_SATURN SATURN 552
++i30030evb MACH_I30030EVB I30030EVB 553
++mxc27530evb MACH_MXC27530EVB MXC27530EVB 554
++smdk2800 MACH_SMDK2800 SMDK2800 555
++mtwilson MACH_MTWILSON MTWILSON 556
++ziti MACH_ZITI ZITI 557
++grandfather MACH_GRANDFATHER GRANDFATHER 558
++tengine MACH_TENGINE TENGINE 559
++s3c2460 MACH_S3C2460 S3C2460 560
++pdm MACH_PDM PDM 561
+ h4700 MACH_H4700 H4700 562
++h6300 MACH_H6300 H6300 563
++rz1700 MACH_RZ1700 RZ1700 564
++a716 MACH_A716 A716 565
++estk2440a MACH_ESTK2440A ESTK2440A 566
++atwixp425 MACH_ATWIXP425 ATWIXP425 567
++csb336 MACH_CSB336 CSB336 568
++rirm2 MACH_RIRM2 RIRM2 569
++cx23518 MACH_CX23518 CX23518 570
++cx2351x MACH_CX2351X CX2351X 571
++computime MACH_COMPUTIME COMPUTIME 572
++izarus MACH_IZARUS IZARUS 573
++pxa_rts MACH_RTS RTS 574
++se5100 MACH_SE5100 SE5100 575
++s3c2510 MACH_S3C2510 S3C2510 576
++csb437tl MACH_CSB437TL CSB437TL 577
++slauson MACH_SLAUSON SLAUSON 578
++pearlriver MACH_PEARLRIVER PEARLRIVER 579
++tdc_p210 MACH_TDC_P210 TDC_P210 580
++sg580 MACH_SG580 SG580 581
++wrsbcarm7 MACH_WRSBCARM7 WRSBCARM7 582
++ipd MACH_IPD IPD 583
++pxa_dnp2110 MACH_PXA_DNP2110 PXA_DNP2110 584
++xaeniax MACH_XAENIAX XAENIAX 585
++somn4250 MACH_SOMN4250 SOMN4250 586
++pleb2 MACH_PLEB2 PLEB2 587
++cornwallis MACH_CORNWALLIS CORNWALLIS 588
++gurney_drv MACH_GURNEY_DRV GURNEY_DRV 589
++chaffee MACH_CHAFFEE CHAFFEE 590
++rms101 MACH_RMS101 RMS101 591
+ rx3715 MACH_RX3715 RX3715 592
++swift MACH_SWIFT SWIFT 593
++roverp7 MACH_ROVERP7 ROVERP7 594
++pr818s MACH_PR818S PR818S 595
++trxpro MACH_TRXPRO TRXPRO 596
+ nslu2 MACH_NSLU2 NSLU2 597
+ e400 MACH_E400 E400 598
++trab MACH_TRAB TRAB 599
++cmc_pu2 MACH_CMC_PU2 CMC_PU2 600
++fulcrum MACH_FULCRUM FULCRUM 601
++netgate42x MACH_NETGATE42X NETGATE42X 602
++str710 MACH_STR710 STR710 603
+ ixdpg425 MACH_IXDPG425 IXDPG425 604
++tomtomgo MACH_TOMTOMGO TOMTOMGO 605
+ versatile_ab MACH_VERSATILE_AB VERSATILE_AB 606
+ edb9307 MACH_EDB9307 EDB9307 607
++sg565 MACH_SG565 SG565 608
++lpd79524 MACH_LPD79524 LPD79524 609
++lpd79525 MACH_LPD79525 LPD79525 610
++rms100 MACH_RMS100 RMS100 611
+ kb9200 MACH_KB9200 KB9200 612
+ sx1 MACH_SX1 SX1 613
++hms39c7092 MACH_HMS39C7092 HMS39C7092 614
++armadillo MACH_ARMADILLO ARMADILLO 615
++ipcu MACH_IPCU IPCU 616
++loox720 MACH_LOOX720 LOOX720 617
+ ixdp465 MACH_IXDP465 IXDP465 618
+ ixdp2351 MACH_IXDP2351 IXDP2351 619
++adsvix MACH_ADSVIX ADSVIX 620
++dm270 MACH_DM270 DM270 621
++socltplus MACH_SOCLTPLUS SOCLTPLUS 622
++ecia MACH_ECIA ECIA 623
++cm4008 MACH_CM4008 CM4008 624
++p2001 MACH_P2001 P2001 625
++twister MACH_TWISTER TWISTER 626
++mudshark MACH_MUDSHARK MUDSHARK 627
++hb2 MACH_HB2 HB2 628
+ iq80332 MACH_IQ80332 IQ80332 629
++sendt MACH_SENDT SENDT 630
++mx2jazz MACH_MX2JAZZ MX2JAZZ 631
++multiio MACH_MULTIIO MULTIIO 632
++hrdisplay MACH_HRDISPLAY HRDISPLAY 633
++mxc27530ads MACH_MXC27530ADS MXC27530ADS 634
++trizeps3 MACH_TRIZEPS3 TRIZEPS3 635
++zefeerdza MACH_ZEFEERDZA ZEFEERDZA 636
++zefeerdzb MACH_ZEFEERDZB ZEFEERDZB 637
++zefeerdzg MACH_ZEFEERDZG ZEFEERDZG 638
++zefeerdzn MACH_ZEFEERDZN ZEFEERDZN 639
++zefeerdzq MACH_ZEFEERDZQ ZEFEERDZQ 640
+ gtwx5715 MACH_GTWX5715 GTWX5715 641
++astro_jack MACH_ASTRO_JACK ASTRO_JACK 643
++tip03 MACH_TIP03 TIP03 644
++a9200ec MACH_A9200EC A9200EC 645
++pnx0105 MACH_PNX0105 PNX0105 646
++adcpoecpu MACH_ADCPOECPU ADCPOECPU 647
+ csb637 MACH_CSB637 CSB637 648
++mb9200 MACH_MB9200 MB9200 650
++kulun MACH_KULUN KULUN 651
++snapper MACH_SNAPPER SNAPPER 652
++optima MACH_OPTIMA OPTIMA 653
++dlhsbc MACH_DLHSBC DLHSBC 654
++x30 MACH_X30 X30 655
+ n30 MACH_N30 N30 656
++manga_ks8695 MACH_MANGA_KS8695 MANGA_KS8695 657
++ajax MACH_AJAX AJAX 658
+ nec_mp900 MACH_NEC_MP900 NEC_MP900 659
++vvtk1000 MACH_VVTK1000 VVTK1000 661
+ kafa MACH_KAFA KAFA 662
++vvtk3000 MACH_VVTK3000 VVTK3000 663
++pimx1 MACH_PIMX1 PIMX1 664
++ollie MACH_OLLIE OLLIE 665
++skymax MACH_SKYMAX SKYMAX 666
++jazz MACH_JAZZ JAZZ 667
++tel_t3 MACH_TEL_T3 TEL_T3 668
++aisino_fcr255 MACH_AISINO_FCR255 AISINO_FCR255 669
++btweb MACH_BTWEB BTWEB 670
++dbg_lh79520 MACH_DBG_LH79520 DBG_LH79520 671
++cm41xx MACH_CM41XX CM41XX 672
+ ts72xx MACH_TS72XX TS72XX 673
++nggpxa MACH_NGGPXA NGGPXA 674
++csb535 MACH_CSB535 CSB535 675
++csb536 MACH_CSB536 CSB536 676
++pxa_trakpod MACH_PXA_TRAKPOD PXA_TRAKPOD 677
++praxis MACH_PRAXIS PRAXIS 678
++lh75411 MACH_LH75411 LH75411 679
+ otom MACH_OTOM OTOM 680
+ nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681
++loox410 MACH_LOOX410 LOOX410 682
++westlake MACH_WESTLAKE WESTLAKE 683
++nsb MACH_NSB NSB 684
++esl_sarva_stn MACH_ESL_SARVA_STN ESL_SARVA_STN 685
++esl_sarva_tft MACH_ESL_SARVA_TFT ESL_SARVA_TFT 686
++esl_sarva_iad MACH_ESL_SARVA_IAD ESL_SARVA_IAD 687
++esl_sarva_acc MACH_ESL_SARVA_ACC ESL_SARVA_ACC 688
++typhoon MACH_TYPHOON TYPHOON 689
++cnav MACH_CNAV CNAV 690
++a730 MACH_A730 A730 691
++netstar MACH_NETSTAR NETSTAR 692
++supercon MACH_PHASEFALE_SUPERCON PHASEFALE_SUPERCON 693
++shiva1100 MACH_SHIVA1100 SHIVA1100 694
++etexsc MACH_ETEXSC ETEXSC 695
++ixdpg465 MACH_IXDPG465 IXDPG465 696
++a9m2410 MACH_A9M2410 A9M2410 697
++a9m2440 MACH_A9M2440 A9M2440 698
++a9m9750 MACH_A9M9750 A9M9750 699
++a9m9360 MACH_A9M9360 A9M9360 700
++unc90 MACH_UNC90 UNC90 701
+ eco920 MACH_ECO920 ECO920 702
++satview MACH_SATVIEW SATVIEW 703
+ roadrunner MACH_ROADRUNNER ROADRUNNER 704
+ at91rm9200ek MACH_AT91RM9200EK AT91RM9200EK 705
++gp32 MACH_GP32 GP32 706
++gem MACH_GEM GEM 707
++i858 MACH_I858 I858 708
++hx2750 MACH_HX2750 HX2750 709
++mxc91131evb MACH_MXC91131EVB MXC91131EVB 710
++p700 MACH_P700 P700 711
++cpe MACH_CPE CPE 712
+ spitz MACH_SPITZ SPITZ 713
++nimbra340 MACH_NIMBRA340 NIMBRA340 714
++lpc22xx MACH_LPC22XX LPC22XX 715
++omap_comet3 MACH_COMET3 COMET3 716
++omap_comet4 MACH_COMET4 COMET4 717
++csb625 MACH_CSB625 CSB625 718
++fortunet2 MACH_FORTUNET2 FORTUNET2 719
++s5h2200 MACH_S5H2200 S5H2200 720
++optorm920 MACH_OPTORM920 OPTORM920 721
++adsbitsyxb MACH_ADSBITSYXB ADSBITSYXB 722
+ adssphere MACH_ADSSPHERE ADSSPHERE 723
++adsportal MACH_ADSPORTAL ADSPORTAL 724
++ln2410sbc MACH_LN2410SBC LN2410SBC 725
++cb3rufc MACH_CB3RUFC CB3RUFC 726
++mp2usb MACH_MP2USB MP2USB 727
++ntnp425c MACH_NTNP425C NTNP425C 728
+ colibri MACH_COLIBRI COLIBRI 729
++pcm7220 MACH_PCM7220 PCM7220 730
+ gateway7001 MACH_GATEWAY7001 GATEWAY7001 731
+ pcm027 MACH_PCM027 PCM027 732
++cmpxa MACH_CMPXA CMPXA 733
+ anubis MACH_ANUBIS ANUBIS 734
++ite8152 MACH_ITE8152 ITE8152 735
++lpc3xxx MACH_LPC3XXX LPC3XXX 736
++puppeteer MACH_PUPPETEER PUPPETEER 737
++e570 MACH_E570 E570 739
++x50 MACH_X50 X50 740
++recon MACH_RECON RECON 741
++xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742
++fpic2 MACH_FPIC2 FPIC2 743
+ akita MACH_AKITA AKITA 744
++a81 MACH_A81 A81 745
++svm_sc25x MACH_SVM_SC25X SVM_SC25X 746
++vt020 MACH_VADATECH020 VADATECH020 747
++tli MACH_TLI TLI 748
++edb9315lc MACH_EDB9315LC EDB9315LC 749
++passec MACH_PASSEC PASSEC 750
++ds_tiger MACH_DS_TIGER DS_TIGER 751
++e310 MACH_E310 E310 752
+ e330 MACH_E330 E330 753
++rt3000 MACH_RT3000 RT3000 754
+ nokia770 MACH_NOKIA770 NOKIA770 755
++pnx0106 MACH_PNX0106 PNX0106 756
++hx21xx MACH_HX21XX HX21XX 757
++faraday MACH_FARADAY FARADAY 758
++sbc9312 MACH_SBC9312 SBC9312 759
++batman MACH_BATMAN BATMAN 760
++jpd201 MACH_JPD201 JPD201 761
++mipsa MACH_MIPSA MIPSA 762
++kacom MACH_KACOM KACOM 763
++swarcocpu MACH_SWARCOCPU SWARCOCPU 764
++swarcodsl MACH_SWARCODSL SWARCODSL 765
++blueangel MACH_BLUEANGEL BLUEANGEL 766
++hairygrama MACH_HAIRYGRAMA HAIRYGRAMA 767
++banff MACH_BANFF BANFF 768
+ carmeva MACH_CARMEVA CARMEVA 769
++sam255 MACH_SAM255 SAM255 770
++ppm10 MACH_PPM10 PPM10 771
+ edb9315a MACH_EDB9315A EDB9315A 772
++sunset MACH_SUNSET SUNSET 773
+ stargate2 MACH_STARGATE2 STARGATE2 774
+ intelmote2 MACH_INTELMOTE2 INTELMOTE2 775
+ trizeps4 MACH_TRIZEPS4 TRIZEPS4 776
++mainstone2 MACH_MAINSTONE2 MAINSTONE2 777
++ez_ixp42x MACH_EZ_IXP42X EZ_IXP42X 778
++tapwave_zodiac MACH_TAPWAVE_ZODIAC TAPWAVE_ZODIAC 779
++universalmeter MACH_UNIVERSALMETER UNIVERSALMETER 780
++hicoarm9 MACH_HICOARM9 HICOARM9 781
+ pnx4008 MACH_PNX4008 PNX4008 782
++kws6000 MACH_KWS6000 KWS6000 783
++portux920t MACH_PORTUX920T PORTUX920T 784
++ez_x5 MACH_EZ_X5 EZ_X5 785
++omap_rudolph MACH_OMAP_RUDOLPH OMAP_RUDOLPH 786
+ cpuat91 MACH_CPUAT91 CPUAT91 787
++rea9200 MACH_REA9200 REA9200 788
++acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789
++ixp425 MACH_IXP425 IXP425 790
++i30030ads MACH_I30030ADS I30030ADS 791
++perch MACH_PERCH PERCH 792
++eis05r1 MACH_EIS05R1 EIS05R1 793
++pepperpad MACH_PEPPERPAD PEPPERPAD 794
++sb3010 MACH_SB3010 SB3010 795
++rm9200 MACH_RM9200 RM9200 796
++dma03 MACH_DMA03 DMA03 797
++road_s101 MACH_ROAD_S101 ROAD_S101 798
+ iq81340sc MACH_IQ81340SC IQ81340SC 799
++iq_nextgen_b MACH_IQ_NEXTGEN_B IQ_NEXTGEN_B 800
+ iq81340mc MACH_IQ81340MC IQ81340MC 801
++iq_nextgen_d MACH_IQ_NEXTGEN_D IQ_NEXTGEN_D 802
++iq_nextgen_e MACH_IQ_NEXTGEN_E IQ_NEXTGEN_E 803
++mallow_at91 MACH_MALLOW_AT91 MALLOW_AT91 804
++cybertracker_i MACH_CYBERTRACKER_I CYBERTRACKER_I 805
++gesbc931x MACH_GESBC931X GESBC931X 806
++centipad MACH_CENTIPAD CENTIPAD 807
++armsoc MACH_ARMSOC ARMSOC 808
++se4200 MACH_SE4200 SE4200 809
++ems197a MACH_EMS197A EMS197A 810
+ micro9 MACH_MICRO9 MICRO9 811
+ micro9l MACH_MICRO9L MICRO9L 812
++uc5471dsp MACH_UC5471DSP UC5471DSP 813
++sj5471eng MACH_SJ5471ENG SJ5471ENG 814
++none MACH_CMPXA26X CMPXA26X 815
++nc1 MACH_NC NC 816
+ omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817
++ajax52x MACH_AJAX52X AJAX52X 818
++siriustar MACH_SIRIUSTAR SIRIUSTAR 819
++iodata_hdlg MACH_IODATA_HDLG IODATA_HDLG 820
++at91rm9200utl MACH_AT91RM9200UTL AT91RM9200UTL 821
++biosafe MACH_BIOSAFE BIOSAFE 822
++mp1000 MACH_MP1000 MP1000 823
++parsy MACH_PARSY PARSY 824
++ccxp270 MACH_CCXP CCXP 825
++omap_gsample MACH_OMAP_GSAMPLE OMAP_GSAMPLE 826
+ realview_eb MACH_REALVIEW_EB REALVIEW_EB 827
++samoa MACH_SAMOA SAMOA 828
++palmt3 MACH_PALMT3 PALMT3 829
++i878 MACH_I878 I878 830
+ borzoi MACH_BORZOI BORZOI 831
++gecko MACH_GECKO GECKO 832
++ds101 MACH_DS101 DS101 833
++omap_palmtt2 MACH_OMAP_PALMTT2 OMAP_PALMTT2 834
+ palmld MACH_PALMLD PALMLD 835
++cc9c MACH_CC9C CC9C 836
++sbc1670 MACH_SBC1670 SBC1670 837
+ ixdp28x5 MACH_IXDP28X5 IXDP28X5 838
+ omap_palmtt MACH_OMAP_PALMTT OMAP_PALMTT 839
++ml696k MACH_ML696K ML696K 840
+ arcom_zeus MACH_ARCOM_ZEUS ARCOM_ZEUS 841
+ osiris MACH_OSIRIS OSIRIS 842
++maestro MACH_MAESTRO MAESTRO 843
+ palmte2 MACH_PALMTE2 PALMTE2 844
++ixbbm MACH_IXBBM IXBBM 845
+ mx27ads MACH_MX27ADS MX27ADS 846
++ax8004 MACH_AX8004 AX8004 847
+ at91sam9261ek MACH_AT91SAM9261EK AT91SAM9261EK 848
+ loft MACH_LOFT LOFT 849
++magpie MACH_MAGPIE MAGPIE 850
+ mx21ads MACH_MX21ADS MX21ADS 851
++mb87m3400 MACH_MB87M3400 MB87M3400 852
++mguard_delta MACH_MGUARD_DELTA MGUARD_DELTA 853
++davinci_dvdp MACH_DAVINCI_DVDP DAVINCI_DVDP 854
++htcuniversal MACH_HTCUNIVERSAL HTCUNIVERSAL 855
++tpad MACH_TPAD TPAD 856
++roverp3 MACH_ROVERP3 ROVERP3 857
++jornada928 MACH_JORNADA928 JORNADA928 858
++mv88fxx81 MACH_MV88FXX81 MV88FXX81 859
++stmp36xx MACH_STMP36XX STMP36XX 860
++sxni79524 MACH_SXNI79524 SXNI79524 861
+ ams_delta MACH_AMS_DELTA AMS_DELTA 862
++uranium MACH_URANIUM URANIUM 863
++ucon MACH_UCON UCON 864
+ nas100d MACH_NAS100D NAS100D 865
++l083 MACH_L083_1000 L083_1000 866
++ezx MACH_EZX EZX 867
++pnx5220 MACH_PNX5220 PNX5220 868
++butte MACH_BUTTE BUTTE 869
++srm2 MACH_SRM2 SRM2 870
++dsbr MACH_DSBR DSBR 871
++crystalball MACH_CRYSTALBALL CRYSTALBALL 872
++tinypxa27x MACH_TINYPXA27X TINYPXA27X 873
++herbie MACH_HERBIE HERBIE 874
+ magician MACH_MAGICIAN MAGICIAN 875
++cm4002 MACH_CM4002 CM4002 876
++b4 MACH_B4 B4 877
++maui MACH_MAUI MAUI 878
++cybertracker_g MACH_CYBERTRACKER_G CYBERTRACKER_G 879
+ nxdkn MACH_NXDKN NXDKN 880
++mio8390 MACH_MIO8390 MIO8390 881
++omi_board MACH_OMI_BOARD OMI_BOARD 882
++mx21civ MACH_MX21CIV MX21CIV 883
++mahi_cdac MACH_MAHI_CDAC MAHI_CDAC 884
+ palmtx MACH_PALMTX PALMTX 885
+ s3c2413 MACH_S3C2413 S3C2413 887
++samsys_ep0 MACH_SAMSYS_EP0 SAMSYS_EP0 888
++wg302v1 MACH_WG302V1 WG302V1 889
+ wg302v2 MACH_WG302V2 WG302V2 890
++eb42x MACH_EB42X EB42X 891
++iq331es MACH_IQ331ES IQ331ES 892
++cosydsp MACH_COSYDSP COSYDSP 893
++uplat7d_proto MACH_UPLAT7D UPLAT7D 894
++ptdavinci MACH_PTDAVINCI PTDAVINCI 895
++mbus MACH_MBUS MBUS 896
++nadia2vb MACH_NADIA2VB NADIA2VB 897
++r1000 MACH_R1000 R1000 898
++hw90250 MACH_HW90250 HW90250 899
+ omap_2430sdp MACH_OMAP_2430SDP OMAP_2430SDP 900
+ davinci_evm MACH_DAVINCI_EVM DAVINCI_EVM 901
++omap_tornado MACH_OMAP_TORNADO OMAP_TORNADO 902
++olocreek MACH_OLOCREEK OLOCREEK 903
+ palmz72 MACH_PALMZ72 PALMZ72 904
+ nxdb500 MACH_NXDB500 NXDB500 905
+ apf9328 MACH_APF9328 APF9328 906
++omap_wipoq MACH_OMAP_WIPOQ OMAP_WIPOQ 907
++omap_twip MACH_OMAP_TWIP OMAP_TWIP 908
++treo650 MACH_TREO650 TREO650 909
++acumen MACH_ACUMEN ACUMEN 910
++xp100 MACH_XP100 XP100 911
++fs2410 MACH_FS2410 FS2410 912
++pxa270_cerf MACH_PXA270_CERF PXA270_CERF 913
++sq2ftlpalm MACH_SQ2FTLPALM SQ2FTLPALM 914
++bsemserver MACH_BSEMSERVER BSEMSERVER 915
++netclient MACH_NETCLIENT NETCLIENT 916
+ palmt5 MACH_PALMT5 PALMT5 917
+ palmtc MACH_PALMTC PALMTC 918
+ omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
++mxc30030evb MACH_MXC30030EVB MXC30030EVB 920
++rea_cpu2 MACH_REA_2D REA_2D 921
++eti3e524 MACH_TI3E524 TI3E524 922
+ ateb9200 MACH_ATEB9200 ATEB9200 923
++auckland MACH_AUCKLAND AUCKLAND 924
++ak3220m MACH_AK3320M AK3320M 925
++duramax MACH_DURAMAX DURAMAX 926
+ n35 MACH_N35 N35 927
++pronghorn MACH_PRONGHORN PRONGHORN 928
++fundy MACH_FUNDY FUNDY 929
+ logicpd_pxa270 MACH_LOGICPD_PXA270 LOGICPD_PXA270 930
++cpu777 MACH_CPU777 CPU777 931
++simicon9201 MACH_SIMICON9201 SIMICON9201 932
++leap2_hpm MACH_LEAP2_HPM LEAP2_HPM 933
++cm922txa10 MACH_CM922TXA10 CM922TXA10 934
++sandgate MACH_PXA PXA 935
++sandgate2 MACH_SANDGATE2 SANDGATE2 936
++sandgate2g MACH_SANDGATE2G SANDGATE2G 937
++sandgate2p MACH_SANDGATE2P SANDGATE2P 938
++fred_jack MACH_FRED_JACK FRED_JACK 939
++ttg_color1 MACH_TTG_COLOR1 TTG_COLOR1 940
+ nxeb500hmi MACH_NXEB500HMI NXEB500HMI 941
++netdcu8 MACH_NETDCU8 NETDCU8 942
++ng_fvx538 MACH_NG_FVX538 NG_FVX538 944
++ng_fvs338 MACH_NG_FVS338 NG_FVS338 945
++pnx4103 MACH_PNX4103 PNX4103 946
++hesdb MACH_HESDB HESDB 947
++xsilo MACH_XSILO XSILO 948
+ espresso MACH_ESPRESSO ESPRESSO 949
++emlc MACH_EMLC EMLC 950
++sisteron MACH_SISTERON SISTERON 951
+ rx1950 MACH_RX1950 RX1950 952
++tsc_venus MACH_TSC_VENUS TSC_VENUS 953
++ds101j MACH_DS101J DS101J 954
++mxc30030ads MACH_MXC30030ADS MXC30030ADS 955
++fujitsu_wimaxsoc MACH_FUJITSU_WIMAXSOC FUJITSU_WIMAXSOC 956
++dualpcmodem MACH_DUALPCMODEM DUALPCMODEM 957
+ gesbc9312 MACH_GESBC9312 GESBC9312 958
++htcapache MACH_HTCAPACHE HTCAPACHE 959
++ixdp435 MACH_IXDP435 IXDP435 960
++catprovt100 MACH_CATPROVT100 CATPROVT100 961
++picotux1xx MACH_PICOTUX1XX PICOTUX1XX 962
+ picotux2xx MACH_PICOTUX2XX PICOTUX2XX 963
+ dsmg600 MACH_DSMG600 DSMG600 964
++empc2 MACH_EMPC2 EMPC2 965
++ventura MACH_VENTURA VENTURA 966
++phidget_sbc MACH_PHIDGET_SBC PHIDGET_SBC 967
++ij3k MACH_IJ3K IJ3K 968
++pisgah MACH_PISGAH PISGAH 969
+ omap_fsample MACH_OMAP_FSAMPLE OMAP_FSAMPLE 970
++sg720 MACH_SG720 SG720 971
++redfox MACH_REDFOX REDFOX 972
++mysh_ep9315_1 MACH_MYSH_EP9315_1 MYSH_EP9315_1 973
++tpf106 MACH_TPF106 TPF106 974
++at91rm9200kg MACH_AT91RM9200KG AT91RM9200KG 975
++rcmt2 MACH_SLEDB SLEDB 976
++ontrack MACH_ONTRACK ONTRACK 977
++pm1200 MACH_PM1200 PM1200 978
++ess24562 MACH_ESS24XXX ESS24XXX 979
++coremp7 MACH_COREMP7 COREMP7 980
++nexcoder_6446 MACH_NEXCODER_6446 NEXCODER_6446 981
++stvc8380 MACH_STVC8380 STVC8380 982
++teklynx MACH_TEKLYNX TEKLYNX 983
++carbonado MACH_CARBONADO CARBONADO 984
++sysmos_mp730 MACH_SYSMOS_MP730 SYSMOS_MP730 985
+ snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986
++pgigim MACH_PGIGIM PGIGIM 987
++ptx9160p2 MACH_PTX9160P2 PTX9160P2 988
++dcore1 MACH_DCORE1 DCORE1 989
++victorpxa MACH_VICTORPXA VICTORPXA 990
++mx2dtb MACH_MX2DTB MX2DTB 991
++pxa_irex_er0100 MACH_PXA_IREX_ER0100 PXA_IREX_ER0100 992
+ omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993
++bartec_deg MACH_BARTEC_DEG BARTEC_DEG 994
++hw50251 MACH_HW50251 HW50251 995
++ibox MACH_IBOX IBOX 996
++atlaslh7a404 MACH_ATLASLH7A404 ATLASLH7A404 997
++pt2026 MACH_PT2026 PT2026 998
++htcalpine MACH_HTCALPINE HTCALPINE 999
++bartec_vtu MACH_BARTEC_VTU BARTEC_VTU 1000
++vcoreii MACH_VCOREII VCOREII 1001
++pdnb3 MACH_PDNB3 PDNB3 1002
++htcbeetles MACH_HTCBEETLES HTCBEETLES 1003
++s3c6400 MACH_S3C6400 S3C6400 1004
++s3c2443 MACH_S3C2443 S3C2443 1005
++omap_ldk MACH_OMAP_LDK OMAP_LDK 1006
++smdk2460 MACH_SMDK2460 SMDK2460 1007
++smdk2440 MACH_SMDK2440 SMDK2440 1008
+ smdk2412 MACH_SMDK2412 SMDK2412 1009
++webbox MACH_WEBBOX WEBBOX 1010
++cwwndp MACH_CWWNDP CWWNDP 1011
++i839 MACH_DRAGON DRAGON 1012
++opendo_cpu_board MACH_OPENDO_CPU_BOARD OPENDO_CPU_BOARD 1013
++ccm2200 MACH_CCM2200 CCM2200 1014
++etwarm MACH_ETWARM ETWARM 1015
++m93030 MACH_M93030 M93030 1016
++cc7u MACH_CC7U CC7U 1017
++mtt_ranger MACH_MTT_RANGER MTT_RANGER 1018
++nexus MACH_NEXUS NEXUS 1019
++desman MACH_DESMAN DESMAN 1020
++bkde303 MACH_BKDE303 BKDE303 1021
+ smdk2413 MACH_SMDK2413 SMDK2413 1022
++aml_m7200 MACH_AML_M7200 AML_M7200 1023
+ aml_m5900 MACH_AML_M5900 AML_M5900 1024
++sg640 MACH_SG640 SG640 1025
++edg79524 MACH_EDG79524 EDG79524 1026
++ai2410 MACH_AI2410 AI2410 1027
++ixp465 MACH_IXP465 IXP465 1028
+ balloon3 MACH_BALLOON3 BALLOON3 1029
++heins MACH_HEINS HEINS 1030
++mpluseva MACH_MPLUSEVA MPLUSEVA 1031
++rt042 MACH_RT042 RT042 1032
++cwiem MACH_CWIEM CWIEM 1033
++cm_x270 MACH_CM_X270 CM_X270 1034
++cm_x255 MACH_CM_X255 CM_X255 1035
++esh_at91 MACH_ESH_AT91 ESH_AT91 1036
++sandgate3 MACH_SANDGATE3 SANDGATE3 1037
++primo MACH_PRIMO PRIMO 1038
++gemstone MACH_GEMSTONE GEMSTONE 1039
++pronghorn_metro MACH_PRONGHORNMETRO PRONGHORNMETRO 1040
++sidewinder MACH_SIDEWINDER SIDEWINDER 1041
++picomod1 MACH_PICOMOD1 PICOMOD1 1042
++sg590 MACH_SG590 SG590 1043
++akai9307 MACH_AKAI9307 AKAI9307 1044
++fontaine MACH_FONTAINE FONTAINE 1045
++wombat MACH_WOMBAT WOMBAT 1046
++acq300 MACH_ACQ300 ACQ300 1047
++mod272 MACH_MOD_270 MOD_270 1048
++vmc_vc0820 MACH_VC0820 VC0820 1049
++ani_aim MACH_ANI_AIM ANI_AIM 1050
++jellyfish MACH_JELLYFISH JELLYFISH 1051
++amanita MACH_AMANITA AMANITA 1052
++vlink MACH_VLINK VLINK 1053
++dexflex MACH_DEXFLEX DEXFLEX 1054
++eigen_ttq MACH_EIGEN_TTQ EIGEN_TTQ 1055
++arcom_titan MACH_ARCOM_TITAN ARCOM_TITAN 1056
++tabla MACH_TABLA TABLA 1057
++mdirac3 MACH_MDIRAC3 MDIRAC3 1058
++mrhfbp2 MACH_MRHFBP2 MRHFBP2 1059
++at91rm9200rb MACH_AT91RM9200RB AT91RM9200RB 1060
++ani_apm MACH_ANI_APM ANI_APM 1061
++ella1 MACH_ELLA1 ELLA1 1062
++inhand_pxa27x MACH_INHAND_PXA27X INHAND_PXA27X 1063
++inhand_pxa25x MACH_INHAND_PXA25X INHAND_PXA25X 1064
++empos_xm MACH_EMPOS_XM EMPOS_XM 1065
++empos MACH_EMPOS EMPOS 1066
++empos_tiny MACH_EMPOS_TINY EMPOS_TINY 1067
++empos_sm MACH_EMPOS_SM EMPOS_SM 1068
++egret MACH_EGRET EGRET 1069
++ostrich MACH_OSTRICH OSTRICH 1070
++n50 MACH_N50 N50 1071
+ ecbat91 MACH_ECBAT91 ECBAT91 1072
++stareast MACH_STAREAST STAREAST 1073
++dspg_dw MACH_DSPG_DW DSPG_DW 1074
+ onearm MACH_ONEARM ONEARM 1075
++mrg110_6 MACH_MRG110_6 MRG110_6 1076
++wrt300nv2 MACH_WRT300NV2 WRT300NV2 1077
++xm_bulverde MACH_XM_BULVERDE XM_BULVERDE 1078
++msm6100 MACH_MSM6100 MSM6100 1079
++eti_b1 MACH_ETI_B1 ETI_B1 1080
++za9l_series MACH_ZILOG_ZA9L ZILOG_ZA9L 1081
++bit2440 MACH_BIT2440 BIT2440 1082
++nbi MACH_NBI NBI 1083
+ smdk2443 MACH_SMDK2443 SMDK2443 1084
++vdavinci MACH_VDAVINCI VDAVINCI 1085
++atc6 MACH_ATC6 ATC6 1086
++multmdw MACH_MULTMDW MULTMDW 1087
++mba2440 MACH_MBA2440 MBA2440 1088
++ecsd MACH_ECSD ECSD 1089
++palmz31 MACH_PALMZ31 PALMZ31 1090
+ fsg MACH_FSG FSG 1091
++razor101 MACH_RAZOR101 RAZOR101 1092
++opera_tdm MACH_OPERA_TDM OPERA_TDM 1093
++comcerto MACH_COMCERTO COMCERTO 1094
++tb0319 MACH_TB0319 TB0319 1095
++kws8000 MACH_KWS8000 KWS8000 1096
++b2 MACH_B2 B2 1097
++lcl54 MACH_LCL54 LCL54 1098
+ at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099
+ glantank MACH_GLANTANK GLANTANK 1100
+ n2100 MACH_N2100 N2100 1101
++n4100 MACH_N4100 N4100 1102
++rsc4 MACH_VERTICAL_RSC4 VERTICAL_RSC4 1103
++sg8100 MACH_SG8100 SG8100 1104
++im42xx MACH_IM42XX IM42XX 1105
++ftxx MACH_FTXX FTXX 1106
++lwfusion MACH_LWFUSION LWFUSION 1107
+ qt2410 MACH_QT2410 QT2410 1108
+ kixrp435 MACH_KIXRP435 KIXRP435 1109
++ccw9c MACH_CCW9C CCW9C 1110
++dabhs MACH_DABHS DABHS 1111
++gzmx MACH_GZMX GZMX 1112
++ipnw100ap MACH_IPNW100AP IPNW100AP 1113
+ cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114
++cc9p9750dev MACH_CC9P9750DEV CC9P9750DEV 1115
++cc9p9360val MACH_CC9P9360VAL CC9P9360VAL 1116
++cc9p9750val MACH_CC9P9750VAL CC9P9750VAL 1117
++nx70v MACH_NX70V NX70V 1118
++at91rm9200df MACH_AT91RM9200DF AT91RM9200DF 1119
++se_pilot2 MACH_SE_PILOT2 SE_PILOT2 1120
++mtcn_t800 MACH_MTCN_T800 MTCN_T800 1121
++vcmx212 MACH_VCMX212 VCMX212 1122
++lynx MACH_LYNX LYNX 1123
++at91sam9260id MACH_AT91SAM9260ID AT91SAM9260ID 1124
++hw86052 MACH_HW86052 HW86052 1125
++pilz_pmi3 MACH_PILZ_PMI3 PILZ_PMI3 1126
+ edb9302a MACH_EDB9302A EDB9302A 1127
+ edb9307a MACH_EDB9307A EDB9307A 1128
++ct_dfs MACH_CT_DFS CT_DFS 1129
++pilz_pmi4 MACH_PILZ_PMI4 PILZ_PMI4 1130
++xceednp_ixp MACH_XCEEDNP_IXP XCEEDNP_IXP 1131
++smdk2442b MACH_SMDK2442B SMDK2442B 1132
++xnode MACH_XNODE XNODE 1133
++aidx270 MACH_AIDX270 AIDX270 1134
++rema MACH_REMA REMA 1135
++bps1000 MACH_BPS1000 BPS1000 1136
++hw90350 MACH_HW90350 HW90350 1137
+ omap_3430sdp MACH_OMAP_3430SDP OMAP_3430SDP 1138
++bluetouch MACH_BLUETOUCH BLUETOUCH 1139
+ vstms MACH_VSTMS VSTMS 1140
++xsbase270 MACH_XSBASE270 XSBASE270 1141
++at91sam9260ek_cn MACH_AT91SAM9260EK_CN AT91SAM9260EK_CN 1142
++adsturboxb MACH_ADSTURBOXB ADSTURBOXB 1143
++oti4110 MACH_OTI4110 OTI4110 1144
++hme_pxa MACH_HME_PXA HME_PXA 1145
++deisterdca MACH_DEISTERDCA DEISTERDCA 1146
++ces_ssem2 MACH_CES_SSEM2 CES_SSEM2 1147
++ces_mtr MACH_CES_MTR CES_MTR 1148
++tds_avng_sbc MACH_TDS_AVNG_SBC TDS_AVNG_SBC 1149
++everest MACH_EVEREST EVEREST 1150
++pnx4010 MACH_PNX4010 PNX4010 1151
++oxnas MACH_OXNAS OXNAS 1152
++fiori MACH_FIORI FIORI 1153
++ml1200 MACH_ML1200 ML1200 1154
++pecos MACH_PECOS PECOS 1155
++nb2xxx MACH_NB2XXX NB2XXX 1156
++hw6900 MACH_HW6900 HW6900 1157
++cdcs_quoll MACH_CDCS_QUOLL CDCS_QUOLL 1158
++quicksilver MACH_QUICKSILVER QUICKSILVER 1159
++uplat926 MACH_UPLAT926 UPLAT926 1160
++dep2410_dep2410 MACH_DEP2410_THOMAS DEP2410_THOMAS 1161
++dtk2410 MACH_DTK2410 DTK2410 1162
++chili MACH_CHILI CHILI 1163
++demeter MACH_DEMETER DEMETER 1164
++dionysus MACH_DIONYSUS DIONYSUS 1165
++as352x MACH_AS352X AS352X 1166
++service MACH_SERVICE SERVICE 1167
++cs_e9301 MACH_CS_E9301 CS_E9301 1168
+ micro9m MACH_MICRO9M MICRO9M 1169
++ia_mospck MACH_IA_MOSPCK IA_MOSPCK 1170
++ql201b MACH_QL201B QL201B 1171
++bbm MACH_BBM BBM 1174
++exxx MACH_EXXX EXXX 1175
++wma11b MACH_WMA11B WMA11B 1176
++pelco_atlas MACH_PELCO_ATLAS PELCO_ATLAS 1177
++g500 MACH_G500 G500 1178
+ bug MACH_BUG BUG 1179
++mx33ads MACH_MX33ADS MX33ADS 1180
++chub MACH_CHUB CHUB 1181
++neo1973_gta01 MACH_NEO1973_GTA01 NEO1973_GTA01 1182
++w90n740 MACH_W90N740 W90N740 1183
++medallion_sa2410 MACH_MEDALLION_SA2410 MEDALLION_SA2410 1184
++ia_cpu_9200_2 MACH_IA_CPU_9200_2 IA_CPU_9200_2 1185
++dimmrm9200 MACH_DIMMRM9200 DIMMRM9200 1186
++pm9261 MACH_PM9261 PM9261 1187
++ml7304 MACH_ML7304 ML7304 1189
++ucp250 MACH_UCP250 UCP250 1190
++intboard MACH_INTBOARD INTBOARD 1191
++gulfstream MACH_GULFSTREAM GULFSTREAM 1192
++labquest MACH_LABQUEST LABQUEST 1193
++vcmx313 MACH_VCMX313 VCMX313 1194
++urg200 MACH_URG200 URG200 1195
++cpux255lcdnet MACH_CPUX255LCDNET CPUX255LCDNET 1196
++netdcu9 MACH_NETDCU9 NETDCU9 1197
++netdcu10 MACH_NETDCU10 NETDCU10 1198
++dspg_dga MACH_DSPG_DGA DSPG_DGA 1199
++dspg_dvw MACH_DSPG_DVW DSPG_DVW 1200
++solos MACH_SOLOS SOLOS 1201
+ at91sam9263ek MACH_AT91SAM9263EK AT91SAM9263EK 1202
++osstbox MACH_OSSTBOX OSSTBOX 1203
++kbat9261 MACH_KBAT9261 KBAT9261 1204
++ct1100 MACH_CT1100 CT1100 1205
++akcppxa MACH_AKCPPXA AKCPPXA 1206
++ochaya1020 MACH_OCHAYA1020 OCHAYA1020 1207
++hitrack MACH_HITRACK HITRACK 1208
++syme1 MACH_SYME1 SYME1 1209
++syhl1 MACH_SYHL1 SYHL1 1210
++empca400 MACH_EMPCA400 EMPCA400 1211
+ em7210 MACH_EM7210 EM7210 1212
++htchermes MACH_HTCHERMES HTCHERMES 1213
++eti_c1 MACH_ETI_C1 ETI_C1 1214
++ac100 MACH_AC100 AC100 1216
++sneetch MACH_SNEETCH SNEETCH 1217
++studentmate MACH_STUDENTMATE STUDENTMATE 1218
++zir2410 MACH_ZIR2410 ZIR2410 1219
++zir2413 MACH_ZIR2413 ZIR2413 1220
++dlonip3 MACH_DLONIP3 DLONIP3 1221
++instream MACH_INSTREAM INSTREAM 1222
++ambarella MACH_AMBARELLA AMBARELLA 1223
++nevis MACH_NEVIS NEVIS 1224
++htc_trinity MACH_HTC_TRINITY HTC_TRINITY 1225
++ql202b MACH_QL202B QL202B 1226
+ vpac270 MACH_VPAC270 VPAC270 1227
++rd129 MACH_RD129 RD129 1228
++htcwizard MACH_HTCWIZARD HTCWIZARD 1229
+ treo680 MACH_TREO680 TREO680 1230
++tecon_tmezon MACH_TECON_TMEZON TECON_TMEZON 1231
+ zylonite MACH_ZYLONITE ZYLONITE 1233
++gene1270 MACH_GENE1270 GENE1270 1234
++zir2412 MACH_ZIR2412 ZIR2412 1235
+ mx31lite MACH_MX31LITE MX31LITE 1236
++t700wx MACH_T700WX T700WX 1237
++vf100 MACH_VF100 VF100 1238
++nsb2 MACH_NSB2 NSB2 1239
++nxhmi_bb MACH_NXHMI_BB NXHMI_BB 1240
++nxhmi_re MACH_NXHMI_RE NXHMI_RE 1241
++n4100pro MACH_N4100PRO N4100PRO 1242
++sam9260 MACH_SAM9260 SAM9260 1243
++omap_treo600 MACH_OMAP_TREO600 OMAP_TREO600 1244
++indy2410 MACH_INDY2410 INDY2410 1245
++nelt_a MACH_NELT_A NELT_A 1246
++n311 MACH_N311 N311 1248
++at91sam9260vgk MACH_AT91SAM9260VGK AT91SAM9260VGK 1249
++at91leppe MACH_AT91LEPPE AT91LEPPE 1250
++at91lepccn MACH_AT91LEPCCN AT91LEPCCN 1251
++apc7100 MACH_APC7100 APC7100 1252
++stargazer MACH_STARGAZER STARGAZER 1253
++sonata MACH_SONATA SONATA 1254
++schmoogie MACH_SCHMOOGIE SCHMOOGIE 1255
++aztool MACH_AZTOOL AZTOOL 1256
+ mioa701 MACH_MIOA701 MIOA701 1257
++sxni9260 MACH_SXNI9260 SXNI9260 1258
++mxc27520evb MACH_MXC27520EVB MXC27520EVB 1259
+ armadillo5x0 MACH_ARMADILLO5X0 ARMADILLO5X0 1260
++mb9260 MACH_MB9260 MB9260 1261
++mb9263 MACH_MB9263 MB9263 1262
++ipac9302 MACH_IPAC9302 IPAC9302 1263
+ cc9p9360js MACH_CC9P9360JS CC9P9360JS 1264
++gallium MACH_GALLIUM GALLIUM 1265
++msc2410 MACH_MSC2410 MSC2410 1266
++ghi270 MACH_GHI270 GHI270 1267
++davinci_leonardo MACH_DAVINCI_LEONARDO DAVINCI_LEONARDO 1268
++oiab MACH_OIAB OIAB 1269
+ smdk6400 MACH_SMDK6400 SMDK6400 1270
+ nokia_n800 MACH_NOKIA_N800 NOKIA_N800 1271
++greenphone MACH_GREENPHONE GREENPHONE 1272
++compex42x MACH_COMPEXWP18 COMPEXWP18 1273
++xmate MACH_XMATE XMATE 1274
++energizer MACH_ENERGIZER ENERGIZER 1275
++ime1 MACH_IME1 IME1 1276
++sweda_tms MACH_SWEDATMS SWEDATMS 1277
++ntnp435c MACH_NTNP435C NTNP435C 1278
++spectro2 MACH_SPECTRO2 SPECTRO2 1279
++h6039 MACH_H6039 H6039 1280
+ ep80219 MACH_EP80219 EP80219 1281
++samoa_ii MACH_SAMOA_II SAMOA_II 1282
++cwmxl MACH_CWMXL CWMXL 1283
++as9200 MACH_AS9200 AS9200 1284
++sfx1149 MACH_SFX1149 SFX1149 1285
++navi010 MACH_NAVI010 NAVI010 1286
++multmdp MACH_MULTMDP MULTMDP 1287
++scb9520 MACH_SCB9520 SCB9520 1288
++htcathena MACH_HTCATHENA HTCATHENA 1289
++xp179 MACH_XP179 XP179 1290
++h4300 MACH_H4300 H4300 1291
+ goramo_mlr MACH_GORAMO_MLR GORAMO_MLR 1292
++mxc30020evb MACH_MXC30020EVB MXC30020EVB 1293
++adsbitsyg5 MACH_ADSBITSYG5 ADSBITSYG5 1294
++adsportalplus MACH_ADSPORTALPLUS ADSPORTALPLUS 1295
++mmsp2plus MACH_MMSP2PLUS MMSP2PLUS 1296
+ em_x270 MACH_EM_X270 EM_X270 1297
++tpp302 MACH_TPP302 TPP302 1298
++tpp104 MACH_TPM104 TPM104 1299
++tpm102 MACH_TPM102 TPM102 1300
++tpm109 MACH_TPM109 TPM109 1301
++fbxo1 MACH_FBXO1 FBXO1 1302
++hxd8 MACH_HXD8 HXD8 1303
+ neo1973_gta02 MACH_NEO1973_GTA02 NEO1973_GTA02 1304
++emtest MACH_EMTEST EMTEST 1305
++ad6900 MACH_AD6900 AD6900 1306
++europa MACH_EUROPA EUROPA 1307
++metroconnect MACH_METROCONNECT METROCONNECT 1308
++ez_s2410 MACH_EZ_S2410 EZ_S2410 1309
++ez_s2440 MACH_EZ_S2440 EZ_S2440 1310
++ez_ep9312 MACH_EZ_EP9312 EZ_EP9312 1311
++ez_ep9315 MACH_EZ_EP9315 EZ_EP9315 1312
++ez_x7 MACH_EZ_X7 EZ_X7 1313
++godotdb MACH_GODOTDB GODOTDB 1314
++mistral MACH_MISTRAL MISTRAL 1315
++msm MACH_MSM MSM 1316
++ct5910 MACH_CT5910 CT5910 1317
++ct5912 MACH_CT5912 CT5912 1318
++argonst_mp MACH_HYNET_INE HYNET_INE 1319
++hynet_app MACH_HYNET_APP HYNET_APP 1320
++msm7200 MACH_MSM7200 MSM7200 1321
++msm7600 MACH_MSM7600 MSM7600 1322
++ceb255 MACH_CEB255 CEB255 1323
++ciel MACH_CIEL CIEL 1324
++slm5650 MACH_SLM5650 SLM5650 1325
+ at91sam9rlek MACH_AT91SAM9RLEK AT91SAM9RLEK 1326
++comtech_router MACH_COMTECH_ROUTER COMTECH_ROUTER 1327
++sbc2410x MACH_SBC2410X SBC2410X 1328
++at4x0bd MACH_AT4X0BD AT4X0BD 1329
++cbifr MACH_CBIFR CBIFR 1330
++arcom_quantum MACH_ARCOM_QUANTUM ARCOM_QUANTUM 1331
++matrix520 MACH_MATRIX520 MATRIX520 1332
++matrix510 MACH_MATRIX510 MATRIX510 1333
++matrix500 MACH_MATRIX500 MATRIX500 1334
++m501 MACH_M501 M501 1335
++aaeon1270 MACH_AAEON1270 AAEON1270 1336
++matrix500ev MACH_MATRIX500EV MATRIX500EV 1337
++pac500 MACH_PAC500 PAC500 1338
++pnx8181 MACH_PNX8181 PNX8181 1339
+ colibri320 MACH_COLIBRI320 COLIBRI320 1340
++aztoolbb MACH_AZTOOLBB AZTOOLBB 1341
++aztoolg2 MACH_AZTOOLG2 AZTOOLG2 1342
++dvlhost MACH_DVLHOST DVLHOST 1343
++zir9200 MACH_ZIR9200 ZIR9200 1344
++zir9260 MACH_ZIR9260 ZIR9260 1345
++cocopah MACH_COCOPAH COCOPAH 1346
++nds MACH_NDS NDS 1347
++rosencrantz MACH_ROSENCRANTZ ROSENCRANTZ 1348
++fttx_odsc MACH_FTTX_ODSC FTTX_ODSC 1349
++classe_r6904 MACH_CLASSE_R6904 CLASSE_R6904 1350
+ cam60 MACH_CAM60 CAM60 1351
++mxc30031ads MACH_MXC30031ADS MXC30031ADS 1352
++datacall MACH_DATACALL DATACALL 1353
+ at91eb01 MACH_AT91EB01 AT91EB01 1354
++rty MACH_RTY RTY 1355
++dwl2100 MACH_DWL2100 DWL2100 1356
++vinsi MACH_VINSI VINSI 1357
+ db88f5281 MACH_DB88F5281 DB88F5281 1358
+ csb726 MACH_CSB726 CSB726 1359
++tik27 MACH_TIK27 TIK27 1360
++mx_uc7420 MACH_MX_UC7420 MX_UC7420 1361
++rirm3 MACH_RIRM3 RIRM3 1362
++pelco_odyssey MACH_PELCO_ODYSSEY PELCO_ODYSSEY 1363
++adx_abox MACH_ADX_ABOX ADX_ABOX 1365
++adx_tpid MACH_ADX_TPID ADX_TPID 1366
++minicheck MACH_MINICHECK MINICHECK 1367
++idam MACH_IDAM IDAM 1368
++mario_mx MACH_MARIO_MX MARIO_MX 1369
++vi1888 MACH_VI1888 VI1888 1370
++zr4230 MACH_ZR4230 ZR4230 1371
++t1_ix_blue MACH_T1_IX_BLUE T1_IX_BLUE 1372
++syhq2 MACH_SYHQ2 SYHQ2 1373
++computime_r3 MACH_COMPUTIME_R3 COMPUTIME_R3 1374
++oratis MACH_ORATIS ORATIS 1375
++mikko MACH_MIKKO MIKKO 1376
++holon MACH_HOLON HOLON 1377
++olip8 MACH_OLIP8 OLIP8 1378
++ghi270hg MACH_GHI270HG GHI270HG 1379
+ davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380
+ davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381
++blackriver MACH_BLACKRIVER BLACKRIVER 1383
++sandgate_wp MACH_SANDGATEWP SANDGATEWP 1384
++cdotbwsg MACH_CDOTBWSG CDOTBWSG 1385
++quark963 MACH_QUARK963 QUARK963 1386
++csb735 MACH_CSB735 CSB735 1387
+ littleton MACH_LITTLETON LITTLETON 1388
++mio_p550 MACH_MIO_P550 MIO_P550 1389
++motion2440 MACH_MOTION2440 MOTION2440 1390
++imm500 MACH_IMM500 IMM500 1391
++homematic MACH_HOMEMATIC HOMEMATIC 1392
++ermine MACH_ERMINE ERMINE 1393
++kb9202b MACH_KB9202B KB9202B 1394
++hs1xx MACH_HS1XX HS1XX 1395
++studentmate2440 MACH_STUDENTMATE2440 STUDENTMATE2440 1396
++arvoo_l1_z1 MACH_ARVOO_L1_Z1 ARVOO_L1_Z1 1397
++dep2410k MACH_DEP2410K DEP2410K 1398
++xxsvideo MACH_XXSVIDEO XXSVIDEO 1399
++im4004 MACH_IM4004 IM4004 1400
++ochaya1050 MACH_OCHAYA1050 OCHAYA1050 1401
++lep9261 MACH_LEP9261 LEP9261 1402
++svenmeb MACH_SVENMEB SVENMEB 1403
++fortunet2ne MACH_FORTUNET2NE FORTUNET2NE 1404
++nxhx MACH_NXHX NXHX 1406
+ realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407
++ids500 MACH_IDS500 IDS500 1408
++ors_n725 MACH_ORS_N725 ORS_N725 1409
++hsdarm MACH_HSDARM HSDARM 1410
++sha_pon003 MACH_SHA_PON003 SHA_PON003 1411
++sha_pon004 MACH_SHA_PON004 SHA_PON004 1412
++sha_pon007 MACH_SHA_PON007 SHA_PON007 1413
++sha_pon011 MACH_SHA_PON011 SHA_PON011 1414
++h6042 MACH_H6042 H6042 1415
++h6043 MACH_H6043 H6043 1416
++looxc550 MACH_LOOXC550 LOOXC550 1417
++cnty_titan MACH_CNTY_TITAN CNTY_TITAN 1418
++app3xx MACH_APP3XX APP3XX 1419
++sideoatsgrama MACH_SIDEOATSGRAMA SIDEOATSGRAMA 1420
++treo700p MACH_TREO700P TREO700P 1421
++treo700w MACH_TREO700W TREO700W 1422
++treo750 MACH_TREO750 TREO750 1423
++treo755p MACH_TREO755P TREO755P 1424
++ezreganut9200 MACH_EZREGANUT9200 EZREGANUT9200 1425
++sarge MACH_SARGE SARGE 1426
++a696 MACH_A696 A696 1427
++turtle1916 MACH_TURTLE TURTLE 1428
+ mx27_3ds MACH_MX27_3DS MX27_3DS 1430
++bishop MACH_BISHOP BISHOP 1431
++pxx MACH_PXX PXX 1432
++redwood MACH_REDWOOD REDWOOD 1433
++omap_2430dlp MACH_OMAP_2430DLP OMAP_2430DLP 1436
++omap_2430osk MACH_OMAP_2430OSK OMAP_2430OSK 1437
++sardine MACH_SARDINE SARDINE 1438
+ halibut MACH_HALIBUT HALIBUT 1439
+ trout MACH_TROUT TROUT 1440
++goldfish MACH_GOLDFISH GOLDFISH 1441
++gesbc2440 MACH_GESBC2440 GESBC2440 1442
++nomad MACH_NOMAD NOMAD 1443
++rosalind MACH_ROSALIND ROSALIND 1444
++cc9p9215 MACH_CC9P9215 CC9P9215 1445
++cc9p9210 MACH_CC9P9210 CC9P9210 1446
++cc9p9215js MACH_CC9P9215JS CC9P9215JS 1447
++cc9p9210js MACH_CC9P9210JS CC9P9210JS 1448
++nasffe MACH_NASFFE NASFFE 1449
++tn2x0bd MACH_TN2X0BD TN2X0BD 1450
++gwmpxa MACH_GWMPXA GWMPXA 1451
++exyplus MACH_EXYPLUS EXYPLUS 1452
++jadoo21 MACH_JADOO21 JADOO21 1453
++looxn560 MACH_LOOXN560 LOOXN560 1454
++bonsai MACH_BONSAI BONSAI 1455
++adsmilgato MACH_ADSMILGATO ADSMILGATO 1456
++gba MACH_GBA GBA 1457
++h6044 MACH_H6044 H6044 1458
++app MACH_APP APP 1459
+ tct_hammer MACH_TCT_HAMMER TCT_HAMMER 1460
+ herald MACH_HERALD HERALD 1461
++artemis MACH_ARTEMIS ARTEMIS 1462
++htctitan MACH_HTCTITAN HTCTITAN 1463
++qranium MACH_QRANIUM QRANIUM 1464
++adx_wsc2 MACH_ADX_WSC2 ADX_WSC2 1465
++adx_medcom MACH_ADX_MEDCOM ADX_MEDCOM 1466
++bboard MACH_BBOARD BBOARD 1467
++cambria MACH_CAMBRIA CAMBRIA 1468
++mt7xxx MACH_MT7XXX MT7XXX 1469
++matrix512 MACH_MATRIX512 MATRIX512 1470
++matrix522 MACH_MATRIX522 MATRIX522 1471
++ipac5010 MACH_IPAC5010 IPAC5010 1472
++sakura MACH_SAKURA SAKURA 1473
++grocx MACH_GROCX GROCX 1474
++pm9263 MACH_PM9263 PM9263 1475
+ sim_one MACH_SIM_ONE SIM_ONE 1476
++acq132 MACH_ACQ132 ACQ132 1477
++datr MACH_DATR DATR 1478
++actux1 MACH_ACTUX1 ACTUX1 1479
++actux2 MACH_ACTUX2 ACTUX2 1480
++actux3 MACH_ACTUX3 ACTUX3 1481
++flexit MACH_FLEXIT FLEXIT 1482
++bh2x0bd MACH_BH2X0BD BH2X0BD 1483
++atb2002 MACH_ATB2002 ATB2002 1484
++xenon MACH_XENON XENON 1485
++fm607 MACH_FM607 FM607 1486
++matrix514 MACH_MATRIX514 MATRIX514 1487
++matrix524 MACH_MATRIX524 MATRIX524 1488
++inpod MACH_INPOD INPOD 1489
+ jive MACH_JIVE JIVE 1490
++tll_mx21 MACH_TLL_MX21 TLL_MX21 1491
++sbc2800 MACH_SBC2800 SBC2800 1492
++cc7ucamry MACH_CC7UCAMRY CC7UCAMRY 1493
++ubisys_p9_sc15 MACH_UBISYS_P9_SC15 UBISYS_P9_SC15 1494
++ubisys_p9_ssc2d10 MACH_UBISYS_P9_SSC2D10 UBISYS_P9_SSC2D10 1495
++ubisys_p9_rcu3 MACH_UBISYS_P9_RCU3 UBISYS_P9_RCU3 1496
++aml_m8000 MACH_AML_M8000 AML_M8000 1497
++snapper_270 MACH_SNAPPER_270 SNAPPER_270 1498
++omap_bbx MACH_OMAP_BBX OMAP_BBX 1499
++ucn2410 MACH_UCN2410 UCN2410 1500
+ sam9_l9260 MACH_SAM9_L9260 SAM9_L9260 1501
++eti_c2 MACH_ETI_C2 ETI_C2 1502
++avalanche MACH_AVALANCHE AVALANCHE 1503
+ realview_pb1176 MACH_REALVIEW_PB1176 REALVIEW_PB1176 1504
++dp1500 MACH_DP1500 DP1500 1505
++apple_iphone MACH_APPLE_IPHONE APPLE_IPHONE 1506
+ yl9200 MACH_YL9200 YL9200 1507
+ rd88f5182 MACH_RD88F5182 RD88F5182 1508
+ kurobox_pro MACH_KUROBOX_PRO KUROBOX_PRO 1509
++se_poet MACH_SE_POET SE_POET 1510
+ mx31_3ds MACH_MX31_3DS MX31_3DS 1511
++r270 MACH_R270 R270 1512
++armour21 MACH_ARMOUR21 ARMOUR21 1513
++dt2 MACH_DT2 DT2 1514
++vt4 MACH_VT4 VT4 1515
++tyco320 MACH_TYCO320 TYCO320 1516
++adma MACH_ADMA ADMA 1517
++wp188 MACH_WP188 WP188 1518
++corsica MACH_CORSICA CORSICA 1519
++bigeye MACH_BIGEYE BIGEYE 1520
++tll5000 MACH_TLL5000 TLL5000 1522
++bebot MACH_BEBOT BEBOT 1523
+ qong MACH_QONG QONG 1524
++tcompact MACH_TCOMPACT TCOMPACT 1525
++puma5 MACH_PUMA5 PUMA5 1526
++elara MACH_ELARA ELARA 1527
++ellington MACH_ELLINGTON ELLINGTON 1528
++xda_atom MACH_XDA_ATOM XDA_ATOM 1529
++energizer2 MACH_ENERGIZER2 ENERGIZER2 1530
++odin MACH_ODIN ODIN 1531
++actux4 MACH_ACTUX4 ACTUX4 1532
++esl_omap MACH_ESL_OMAP ESL_OMAP 1533
+ omap2evm MACH_OMAP2EVM OMAP2EVM 1534
+ omap3evm MACH_OMAP3EVM OMAP3EVM 1535
++adx_pcu57 MACH_ADX_PCU57 ADX_PCU57 1536
++monaco MACH_MONACO MONACO 1537
++levante MACH_LEVANTE LEVANTE 1538
++tmxipx425 MACH_TMXIPX425 TMXIPX425 1539
++leep MACH_LEEP LEEP 1540
++raad MACH_RAAD RAAD 1541
+ dns323 MACH_DNS323 DNS323 1542
++ap1000 MACH_AP1000 AP1000 1543
++a9sam6432 MACH_A9SAM6432 A9SAM6432 1544
++shiny MACH_SHINY SHINY 1545
+ omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546
++csr_bdb2 MACH_CSR_BDB2 CSR_BDB2 1547
+ nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548
++c270 MACH_C270 C270 1549
++sentry MACH_SENTRY SENTRY 1550
+ pcm038 MACH_PCM038 PCM038 1551
++anc300 MACH_ANC300 ANC300 1552
++htckaiser MACH_HTCKAISER HTCKAISER 1553
++sbat100 MACH_SBAT100 SBAT100 1554
++modunorm MACH_MODUNORM MODUNORM 1555
++pelos_twarm MACH_PELOS_TWARM PELOS_TWARM 1556
++flank MACH_FLANK FLANK 1557
++sirloin MACH_SIRLOIN SIRLOIN 1558
++brisket MACH_BRISKET BRISKET 1559
++chuck MACH_CHUCK CHUCK 1560
++otter MACH_OTTER OTTER 1561
++davinci_ldk MACH_DAVINCI_LDK DAVINCI_LDK 1562
++phreedom MACH_PHREEDOM PHREEDOM 1563
++sg310 MACH_SG310 SG310 1564
+ ts209 MACH_TS209 TS209 1565
+ at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566
++tion9315 MACH_TION9315 TION9315 1567
++mast MACH_MAST MAST 1568
++pfw MACH_PFW PFW 1569
++yl_p2440 MACH_YL_P2440 YL_P2440 1570
++zsbc32 MACH_ZSBC32 ZSBC32 1571
++omap_pace2 MACH_OMAP_PACE2 OMAP_PACE2 1572
++imx_pace2 MACH_IMX_PACE2 IMX_PACE2 1573
+ mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574
++mx37_3ds MACH_MX37_3DS MX37_3DS 1575
++rcc MACH_RCC RCC 1576
++dmp MACH_ARM9 ARM9 1577
+ vision_ep9307 MACH_VISION_EP9307 VISION_EP9307 1578
++scly1000 MACH_SCLY1000 SCLY1000 1579
++fontel_ep MACH_FONTEL_EP FONTEL_EP 1580
++voiceblue3g MACH_VOICEBLUE3G VOICEBLUE3G 1581
++tt9200 MACH_TT9200 TT9200 1582
++digi2410 MACH_DIGI2410 DIGI2410 1583
+ terastation_pro2 MACH_TERASTATION_PRO2 TERASTATION_PRO2 1584
+ linkstation_pro MACH_LINKSTATION_PRO LINKSTATION_PRO 1585
++motorola_a780 MACH_MOTOROLA_A780 MOTOROLA_A780 1587
++motorola_e6 MACH_MOTOROLA_E6 MOTOROLA_E6 1588
++motorola_e2 MACH_MOTOROLA_E2 MOTOROLA_E2 1589
++motorola_e680 MACH_MOTOROLA_E680 MOTOROLA_E680 1590
++ur2410 MACH_UR2410 UR2410 1591
++tas9261 MACH_TAS9261 TAS9261 1592
++davinci_hermes_hd MACH_HERMES_HD HERMES_HD 1593
++davinci_perseo_hd MACH_PERSEO_HD PERSEO_HD 1594
++stargazer2 MACH_STARGAZER2 STARGAZER2 1595
+ e350 MACH_E350 E350 1596
++wpcm450 MACH_WPCM450 WPCM450 1597
++cartesio MACH_CARTESIO CARTESIO 1598
++toybox MACH_TOYBOX TOYBOX 1599
++tx27 MACH_TX27 TX27 1600
+ ts409 MACH_TS409 TS409 1601
++p300 MACH_P300 P300 1602
++xdacomet MACH_XDACOMET XDACOMET 1603
++dexflex2 MACH_DEXFLEX2 DEXFLEX2 1604
++ow MACH_OW OW 1605
++armebs3 MACH_ARMEBS3 ARMEBS3 1606
++u3 MACH_U3 U3 1607
++smdk2450 MACH_SMDK2450 SMDK2450 1608
+ rsi_ews MACH_RSI_EWS RSI_EWS 1609
++tnb MACH_TNB TNB 1610
++toepath MACH_TOEPATH TOEPATH 1611
++kb9263 MACH_KB9263 KB9263 1612
++mt7108 MACH_MT7108 MT7108 1613
++smtr2440 MACH_SMTR2440 SMTR2440 1614
++manao MACH_MANAO MANAO 1615
+ cm_x300 MACH_CM_X300 CM_X300 1616
++gulfstream_kp MACH_GULFSTREAM_KP GULFSTREAM_KP 1617
++lanreadyfn522 MACH_LANREADYFN522 LANREADYFN522 1618
++arma37 MACH_ARMA37 ARMA37 1619
++mendel MACH_MENDEL MENDEL 1620
++pelco_iliad MACH_PELCO_ILIAD PELCO_ILIAD 1621
++unit2p MACH_UNIT2P UNIT2P 1622
++inc20otter MACH_INC20OTTER INC20OTTER 1623
+ at91sam9g20ek MACH_AT91SAM9G20EK AT91SAM9G20EK 1624
++sc_ge2 MACH_STORCENTER STORCENTER 1625
+ smdk6410 MACH_SMDK6410 SMDK6410 1626
+ u300 MACH_U300 U300 1627
++u500 MACH_U500 U500 1628
++ds9260 MACH_DS9260 DS9260 1629
++riverrock MACH_RIVERROCK RIVERROCK 1630
++scibath MACH_SCIBATH SCIBATH 1631
++at91sam7se MACH_AT91SAM7SE512EK AT91SAM7SE512EK 1632
+ wrt350n_v2 MACH_WRT350N_V2 WRT350N_V2 1633
++multimedia MACH_MULTIMEDIA MULTIMEDIA 1634
++marvin MACH_MARVIN MARVIN 1635
++x500 MACH_X500 X500 1636
++awlug4lcu MACH_AWLUG4LCU AWLUG4LCU 1637
++palermoc MACH_PALERMOC PALERMOC 1638
+ omap_ldp MACH_OMAP_LDP OMAP_LDP 1639
++ip500 MACH_IP500 IP500 1640
++ase2 MACH_ASE2 ASE2 1642
++mx35evb MACH_MX35EVB MX35EVB 1643
++aml_m8050 MACH_AML_M8050 AML_M8050 1644
+ mx35_3ds MACH_MX35_3DS MX35_3DS 1645
++mars MACH_MARS MARS 1646
+ neuros_osd2 MACH_NEUROS_OSD2 NEUROS_OSD2 1647
++badger MACH_BADGER BADGER 1648
+ trizeps4wl MACH_TRIZEPS4WL TRIZEPS4WL 1649
++trizeps5 MACH_TRIZEPS5 TRIZEPS5 1650
++marlin MACH_MARLIN MARLIN 1651
+ ts78xx MACH_TS78XX TS78XX 1652
++hpipaq214 MACH_HPIPAQ214 HPIPAQ214 1653
++at572d940dcm MACH_AT572D940DCM AT572D940DCM 1654
++ne1board MACH_NE1BOARD NE1BOARD 1655
++zante MACH_ZANTE ZANTE 1656
+ sffsdr MACH_SFFSDR SFFSDR 1657
++tw2662 MACH_TW2662 TW2662 1658
++vf10xx MACH_VF10XX VF10XX 1659
++zoran43xx MACH_ZORAN43XX ZORAN43XX 1660
++sonix926 MACH_SONIX926 SONIX926 1661
++celestialsemi MACH_CELESTIALSEMI CELESTIALSEMI 1662
++cc9m2443js MACH_CC9M2443JS CC9M2443JS 1663
++tw5334 MACH_TW5334 TW5334 1664
++omap_htcartemis MACH_HTCARTEMIS HTCARTEMIS 1665
++nal_hlite MACH_NAL_HLITE NAL_HLITE 1666
++htcvogue MACH_HTCVOGUE HTCVOGUE 1667
++smartweb MACH_SMARTWEB SMARTWEB 1668
++mv86xx MACH_MV86XX MV86XX 1669
++mv87xx MACH_MV87XX MV87XX 1670
++songyoungho MACH_SONGYOUNGHO SONGYOUNGHO 1671
++younghotema MACH_YOUNGHOTEMA YOUNGHOTEMA 1672
+ pcm037 MACH_PCM037 PCM037 1673
++mmvp MACH_MMVP MMVP 1674
++mmap MACH_MMAP MMAP 1675
++ptid2410 MACH_PTID2410 PTID2410 1676
++james_926 MACH_JAMES_926 JAMES_926 1677
++fm6000 MACH_FM6000 FM6000 1678
+ db88f6281_bp MACH_DB88F6281_BP DB88F6281_BP 1680
+ rd88f6192_nas MACH_RD88F6192_NAS RD88F6192_NAS 1681
+ rd88f6281 MACH_RD88F6281 RD88F6281 1682
+ db78x00_bp MACH_DB78X00_BP DB78X00_BP 1683
+ smdk2416 MACH_SMDK2416 SMDK2416 1685
++oce_spider_si MACH_OCE_SPIDER_SI OCE_SPIDER_SI 1686
++oce_spider_sk MACH_OCE_SPIDER_SK OCE_SPIDER_SK 1687
++rovern6 MACH_ROVERN6 ROVERN6 1688
++pelco_evolution MACH_PELCO_EVOLUTION PELCO_EVOLUTION 1689
+ wbd111 MACH_WBD111 WBD111 1690
++elaracpe MACH_ELARACPE ELARACPE 1691
++mabv3 MACH_MABV3 MABV3 1692
+ mv2120 MACH_MV2120 MV2120 1693
++csb737 MACH_CSB737 CSB737 1695
+ mx51_3ds MACH_MX51_3DS MX51_3DS 1696
++g900 MACH_G900 G900 1697
++apf27 MACH_APF27 APF27 1698
++ggus2000 MACH_GGUS2000 GGUS2000 1699
++omap_2430_mimic MACH_OMAP_2430_MIMIC OMAP_2430_MIMIC 1700
+ imx27lite MACH_IMX27LITE IMX27LITE 1701
++almex MACH_ALMEX ALMEX 1702
++control MACH_CONTROL CONTROL 1703
++mba2410 MACH_MBA2410 MBA2410 1704
++volcano MACH_VOLCANO VOLCANO 1705
++zenith MACH_ZENITH ZENITH 1706
++muchip MACH_MUCHIP MUCHIP 1707
++magellan MACH_MAGELLAN MAGELLAN 1708
+ usb_a9260 MACH_USB_A9260 USB_A9260 1709
+ usb_a9263 MACH_USB_A9263 USB_A9263 1710
+ qil_a9260 MACH_QIL_A9260 QIL_A9260 1711
++cme9210 MACH_CME9210 CME9210 1712
++hczh4 MACH_HCZH4 HCZH4 1713
++spearbasic MACH_SPEARBASIC SPEARBASIC 1714
++dep2440 MACH_DEP2440 DEP2440 1715
++hdl_gxr MACH_HDL_GXR HDL_GXR 1716
++hdl_gt MACH_HDL_GT HDL_GT 1717
++hdl_4g MACH_HDL_4G HDL_4G 1718
++s3c6000 MACH_S3C6000 S3C6000 1719
++mmsp2_mdk MACH_MMSP2_MDK MMSP2_MDK 1720
++mpx220 MACH_MPX220 MPX220 1721
+ kzm_arm11_01 MACH_KZM_ARM11_01 KZM_ARM11_01 1722
++htc_polaris MACH_HTC_POLARIS HTC_POLARIS 1723
++htc_kaiser MACH_HTC_KAISER HTC_KAISER 1724
++lg_ks20 MACH_LG_KS20 LG_KS20 1725
++hhgps MACH_HHGPS HHGPS 1726
+ nokia_n810_wimax MACH_NOKIA_N810_WIMAX NOKIA_N810_WIMAX 1727
++insight MACH_INSIGHT INSIGHT 1728
+ sapphire MACH_SAPPHIRE SAPPHIRE 1729
++csb637xo MACH_CSB637XO CSB637XO 1730
++evisiong MACH_EVISIONG EVISIONG 1731
+ stmp37xx MACH_STMP37XX STMP37XX 1732
+ stmp378x MACH_STMP378X STMP378X 1733
++tnt MACH_TNT TNT 1734
++tbxt MACH_TBXT TBXT 1735
++playmate MACH_PLAYMATE PLAYMATE 1736
++pns10 MACH_PNS10 PNS10 1737
++eznavi MACH_EZNAVI EZNAVI 1738
++ps4000 MACH_PS4000 PS4000 1739
+ ezx_a780 MACH_EZX_A780 EZX_A780 1740
+ ezx_e680 MACH_EZX_E680 EZX_E680 1741
+ ezx_a1200 MACH_EZX_A1200 EZX_A1200 1742
+ ezx_e6 MACH_EZX_E6 EZX_E6 1743
+ ezx_e2 MACH_EZX_E2 EZX_E2 1744
+ ezx_a910 MACH_EZX_A910 EZX_A910 1745
++cwmx31 MACH_CWMX31 CWMX31 1746
++sl2312 MACH_SL2312 SL2312 1747
++blenny MACH_BLENNY BLENNY 1748
++ds107 MACH_DS107 DS107 1749
++dsx07 MACH_DSX07 DSX07 1750
++picocom1 MACH_PICOCOM1 PICOCOM1 1751
++lynx_wolverine MACH_LYNX_WOLVERINE LYNX_WOLVERINE 1752
++ubisys_p9_sc19 MACH_UBISYS_P9_SC19 UBISYS_P9_SC19 1753
++kratos_low MACH_KRATOS_LOW KRATOS_LOW 1754
++m700 MACH_M700 M700 1755
+ edmini_v2 MACH_EDMINI_V2 EDMINI_V2 1756
+ zipit2 MACH_ZIPIT2 ZIPIT2 1757
++hslfemtocell MACH_HSLFEMTOCELL HSLFEMTOCELL 1758
++daintree_at91 MACH_DAINTREE_AT91 DAINTREE_AT91 1759
++sg560usb MACH_SG560USB SG560USB 1760
+ omap3_pandora MACH_OMAP3_PANDORA OMAP3_PANDORA 1761
++usr8200 MACH_USR8200 USR8200 1762
++s1s65k MACH_S1S65K S1S65K 1763
++s2s65a MACH_S2S65A S2S65A 1764
++icore MACH_ICORE ICORE 1765
+ mss2 MACH_MSS2 MSS2 1766
++belmont MACH_BELMONT BELMONT 1767
++asusp525 MACH_ASUSP525 ASUSP525 1768
+ lb88rc8480 MACH_LB88RC8480 LB88RC8480 1769
++hipxa MACH_HIPXA HIPXA 1770
+ mx25_3ds MACH_MX25_3DS MX25_3DS 1771
++m800 MACH_M800 M800 1772
+ omap3530_lv_som MACH_OMAP3530_LV_SOM OMAP3530_LV_SOM 1773
++prima_evb MACH_PRIMA_EVB PRIMA_EVB 1774
++mx31bt1 MACH_MX31BT1 MX31BT1 1775
++atlas4_evb MACH_ATLAS4_EVB ATLAS4_EVB 1776
++mx31cicada MACH_MX31CICADA MX31CICADA 1777
++mi424wr MACH_MI424WR MI424WR 1778
++axs_ultrax MACH_AXS_ULTRAX AXS_ULTRAX 1779
++at572d940deb MACH_AT572D940DEB AT572D940DEB 1780
+ davinci_da830_evm MACH_DAVINCI_DA830_EVM DAVINCI_DA830_EVM 1781
++ep9302 MACH_EP9302 EP9302 1782
++at572d940hfek MACH_AT572D940HFEB AT572D940HFEB 1783
++cybook3 MACH_CYBOOK3 CYBOOK3 1784
++wdg002 MACH_WDG002 WDG002 1785
++sg560adsl MACH_SG560ADSL SG560ADSL 1786
++nextio_n2800_ica MACH_NEXTIO_N2800_ICA NEXTIO_N2800_ICA 1787
+ dove_db MACH_DOVE_DB DOVE_DB 1788
++vandihud MACH_VANDIHUD VANDIHUD 1790
++magx_e8 MACH_MAGX_E8 MAGX_E8 1791
++magx_z6 MACH_MAGX_Z6 MAGX_Z6 1792
++magx_v8 MACH_MAGX_V8 MAGX_V8 1793
++magx_u9 MACH_MAGX_U9 MAGX_U9 1794
++toughcf08 MACH_TOUGHCF08 TOUGHCF08 1795
++zw4400 MACH_ZW4400 ZW4400 1796
++marat91 MACH_MARAT91 MARAT91 1797
+ overo MACH_OVERO OVERO 1798
+ at2440evb MACH_AT2440EVB AT2440EVB 1799
+ neocore926 MACH_NEOCORE926 NEOCORE926 1800
+ wnr854t MACH_WNR854T WNR854T 1801
++imx27 MACH_IMX27 IMX27 1802
++moose_db MACH_MOOSE_DB MOOSE_DB 1803
++fab4 MACH_FAB4 FAB4 1804
++htcdiamond MACH_HTCDIAMOND HTCDIAMOND 1805
++fiona MACH_FIONA FIONA 1806
++mxc30030_x MACH_MXC30030_X MXC30030_X 1807
++bmp1000 MACH_BMP1000 BMP1000 1808
++logi9200 MACH_LOGI9200 LOGI9200 1809
++tqma31 MACH_TQMA31 TQMA31 1810
++ccw9p9215js MACH_CCW9P9215JS CCW9P9215JS 1811
+ rd88f5181l_ge MACH_RD88F5181L_GE RD88F5181L_GE 1812
++sifmain MACH_SIFMAIN SIFMAIN 1813
++sam9_l9261 MACH_SAM9_L9261 SAM9_L9261 1814
++cc9m2443 MACH_CC9M2443 CC9M2443 1815
++xaria300 MACH_XARIA300 XARIA300 1816
++it9200 MACH_IT9200 IT9200 1817
+ rd88f5181l_fxo MACH_RD88F5181L_FXO RD88F5181L_FXO 1818
++kriss_sensor MACH_KRISS_SENSOR KRISS_SENSOR 1819
++pilz_pmi5 MACH_PILZ_PMI5 PILZ_PMI5 1820
++jade MACH_JADE JADE 1821
++ks8695_softplc MACH_KS8695_SOFTPLC KS8695_SOFTPLC 1822
++gprisc3 MACH_GPRISC3 GPRISC3 1823
+ stamp9g20 MACH_STAMP9G20 STAMP9G20 1824
++smdk6430 MACH_SMDK6430 SMDK6430 1825
+ smdkc100 MACH_SMDKC100 SMDKC100 1826
+ tavorevb MACH_TAVOREVB TAVOREVB 1827
+ saar MACH_SAAR SAAR 1828
++deister_eyecam MACH_DEISTER_EYECAM DEISTER_EYECAM 1829
+ at91sam9m10g45ek MACH_AT91SAM9M10G45EK AT91SAM9M10G45EK 1830
++linkstation_produo MACH_LINKSTATION_PRODUO LINKSTATION_PRODUO 1831
++hit_b0 MACH_HIT_B0 HIT_B0 1832
++adx_rmu MACH_ADX_RMU ADX_RMU 1833
++xg_cpe_main MACH_XG_CPE_MAIN XG_CPE_MAIN 1834
++edb9407a MACH_EDB9407A EDB9407A 1835
++dtb9608 MACH_DTB9608 DTB9608 1836
++em104v1 MACH_EM104V1 EM104V1 1837
++demo MACH_DEMO DEMO 1838
++logi9260 MACH_LOGI9260 LOGI9260 1839
++mx31_exm32 MACH_MX31_EXM32 MX31_EXM32 1840
+ usb_a9g20 MACH_USB_A9G20 USB_A9G20 1841
++picproje2008 MACH_PICPROJE2008 PICPROJE2008 1842
++cs_e9315 MACH_CS_E9315 CS_E9315 1843
++qil_a9g20 MACH_QIL_A9G20 QIL_A9G20 1844
++sha_pon020 MACH_SHA_PON020 SHA_PON020 1845
++nad MACH_NAD NAD 1846
++sbc35_a9260 MACH_SBC35_A9260 SBC35_A9260 1847
++sbc35_a9g20 MACH_SBC35_A9G20 SBC35_A9G20 1848
++davinci_beginning MACH_DAVINCI_BEGINNING DAVINCI_BEGINNING 1849
++uwc MACH_UWC UWC 1850
+ mxlads MACH_MXLADS MXLADS 1851
++htcnike MACH_HTCNIKE HTCNIKE 1852
++deister_pxa270 MACH_DEISTER_PXA270 DEISTER_PXA270 1853
++cme9210js MACH_CME9210JS CME9210JS 1854
++cc9p9360 MACH_CC9P9360 CC9P9360 1855
++mocha MACH_MOCHA MOCHA 1856
++wapd170ag MACH_WAPD170AG WAPD170AG 1857
+ linkstation_mini MACH_LINKSTATION_MINI LINKSTATION_MINI 1858
+ afeb9260 MACH_AFEB9260 AFEB9260 1859
++w90x900 MACH_W90X900 W90X900 1860
++w90x700 MACH_W90X700 W90X700 1861
++kt300ip MACH_KT300IP KT300IP 1862
++kt300ip_g20 MACH_KT300IP_G20 KT300IP_G20 1863
++srcm MACH_SRCM SRCM 1864
++wlnx_9260 MACH_WLNX_9260 WLNX_9260 1865
++openmoko_gta03 MACH_OPENMOKO_GTA03 OPENMOKO_GTA03 1866
++osprey2 MACH_OSPREY2 OSPREY2 1867
++kbio9260 MACH_KBIO9260 KBIO9260 1868
++ginza MACH_GINZA GINZA 1869
++a636n MACH_A636N A636N 1870
+ imx27ipcam MACH_IMX27IPCAM IMX27IPCAM 1871
++nemoc MACH_NEMOC NEMOC 1872
++geneva MACH_GENEVA GENEVA 1873
++htcpharos MACH_HTCPHAROS HTCPHAROS 1874
++neonc MACH_NEONC NEONC 1875
++nas7100 MACH_NAS7100 NAS7100 1876
++teuphone MACH_TEUPHONE TEUPHONE 1877
++annax_eth2 MACH_ANNAX_ETH2 ANNAX_ETH2 1878
++csb733 MACH_CSB733 CSB733 1879
++bk3 MACH_BK3 BK3 1880
++omap_em32 MACH_OMAP_EM32 OMAP_EM32 1881
++et9261cp MACH_ET9261CP ET9261CP 1882
++jasperc MACH_JASPERC JASPERC 1883
++issi_arm9 MACH_ISSI_ARM9 ISSI_ARM9 1884
++ued MACH_UED UED 1885
++esiblade MACH_ESIBLADE ESIBLADE 1886
++eye02 MACH_EYE02 EYE02 1887
++imx27kbd MACH_IMX27KBD IMX27KBD 1888
++kixvp435 MACH_KIXVP435 KIXVP435 1890
++kixnp435 MACH_KIXNP435 KIXNP435 1891
++africa MACH_AFRICA AFRICA 1892
++nh233 MACH_NH233 NH233 1893
+ rd88f6183ap_ge MACH_RD88F6183AP_GE RD88F6183AP_GE 1894
++bcm4760 MACH_BCM4760 BCM4760 1895
++eddy_v2 MACH_EDDY_V2 EDDY_V2 1896
+ realview_pba8 MACH_REALVIEW_PBA8 REALVIEW_PBA8 1897
++hid_a7 MACH_HID_A7 HID_A7 1898
++hero MACH_HERO HERO 1899
++omap_poseidon MACH_OMAP_POSEIDON OMAP_POSEIDON 1900
+ realview_pbx MACH_REALVIEW_PBX REALVIEW_PBX 1901
+ micro9s MACH_MICRO9S MICRO9S 1902
++mako MACH_MAKO MAKO 1903
++xdaflame MACH_XDAFLAME XDAFLAME 1904
++phidget_sbc2 MACH_PHIDGET_SBC2 PHIDGET_SBC2 1905
++limestone MACH_LIMESTONE LIMESTONE 1906
++iprobe_c32 MACH_IPROBE_C32 IPROBE_C32 1907
+ rut100 MACH_RUT100 RUT100 1908
++asusp535 MACH_ASUSP535 ASUSP535 1909
++htcraphael MACH_HTCRAPHAEL HTCRAPHAEL 1910
++sygdg1 MACH_SYGDG1 SYGDG1 1911
++sygdg2 MACH_SYGDG2 SYGDG2 1912
++seoul MACH_SEOUL SEOUL 1913
++salerno MACH_SALERNO SALERNO 1914
++ucn_s3c64xx MACH_UCN_S3C64XX UCN_S3C64XX 1915
++msm7201a MACH_MSM7201A MSM7201A 1916
++lpr1 MACH_LPR1 LPR1 1917
++armadillo500fx MACH_ARMADILLO500FX ARMADILLO500FX 1918
+ g3evm MACH_G3EVM G3EVM 1919
++z3_dm355 MACH_Z3_DM355 Z3_DM355 1920
+ w90p910evb MACH_W90P910EVB W90P910EVB 1921
++w90p920evb MACH_W90P920EVB W90P920EVB 1922
+ w90p950evb MACH_W90P950EVB W90P950EVB 1923
+ w90n960evb MACH_W90N960EVB W90N960EVB 1924
++camhd MACH_CAMHD CAMHD 1925
++mvc100 MACH_MVC100 MVC100 1926
++electrum_200 MACH_ELECTRUM_200 ELECTRUM_200 1927
++htcjade MACH_HTCJADE HTCJADE 1928
++memphis MACH_MEMPHIS MEMPHIS 1929
++imx27sbc MACH_IMX27SBC IMX27SBC 1930
++lextar MACH_LEXTAR LEXTAR 1931
+ mv88f6281gtw_ge MACH_MV88F6281GTW_GE MV88F6281GTW_GE 1932
+ ncp MACH_NCP NCP 1933
++z32an_series MACH_Z32AN Z32AN 1934
++tmq_capd MACH_TMQ_CAPD TMQ_CAPD 1935
++omap3_wl MACH_OMAP3_WL OMAP3_WL 1936
++chumby MACH_CHUMBY CHUMBY 1937
++atsarm9 MACH_ATSARM9 ATSARM9 1938
+ davinci_dm365_evm MACH_DAVINCI_DM365_EVM DAVINCI_DM365_EVM 1939
++bahamas MACH_BAHAMAS BAHAMAS 1940
++das MACH_DAS DAS 1941
++minidas MACH_MINIDAS MINIDAS 1942
++vk1000 MACH_VK1000 VK1000 1943
+ centro MACH_CENTRO CENTRO 1944
++ctera_2bay MACH_CTERA_2BAY CTERA_2BAY 1945
++edgeconnect MACH_EDGECONNECT EDGECONNECT 1946
++nd27000 MACH_ND27000 ND27000 1947
++cobra MACH_GEMALTO_COBRA GEMALTO_COBRA 1948
++ingelabs_comet MACH_INGELABS_COMET INGELABS_COMET 1949
++pollux_wiz MACH_POLLUX_WIZ POLLUX_WIZ 1950
++blackstone MACH_BLACKSTONE BLACKSTONE 1951
++topaz MACH_TOPAZ TOPAZ 1952
++aixle MACH_AIXLE AIXLE 1953
++mw998 MACH_MW998 MW998 1954
+ nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955
++vsc5605ev MACH_VSC5605EV VSC5605EV 1956
++nt98700dk MACH_NT98700DK NT98700DK 1957
++icontact MACH_ICONTACT ICONTACT 1958
++swarco_frcpu MACH_SWARCO_FRCPU SWARCO_FRCPU 1959
++swarco_scpu MACH_SWARCO_SCPU SWARCO_SCPU 1960
++bbox_p16 MACH_BBOX_P16 BBOX_P16 1961
++bstd MACH_BSTD BSTD 1962
++sbc2440ii MACH_SBC2440II SBC2440II 1963
++pcm034 MACH_PCM034 PCM034 1964
++neso MACH_NESO NESO 1965
++wlnx_9g20 MACH_WLNX_9G20 WLNX_9G20 1966
+ omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967
++totemnova MACH_TOTEMNOVA TOTEMNOVA 1968
++c5000 MACH_C5000 C5000 1969
++unipo_at91sam9263 MACH_UNIPO_AT91SAM9263 UNIPO_AT91SAM9263 1970
++ethernut5 MACH_ETHERNUT5 ETHERNUT5 1971
++arm11 MACH_ARM11 ARM11 1972
+ cpuat9260 MACH_CPUAT9260 CPUAT9260 1973
++cpupxa255 MACH_CPUPXA255 CPUPXA255 1974
+ eukrea_cpuimx27 MACH_EUKREA_CPUIMX27 EUKREA_CPUIMX27 1975
++cheflux MACH_CHEFLUX CHEFLUX 1976
++eb_cpux9k2 MACH_EB_CPUX9K2 EB_CPUX9K2 1977
++opcotec MACH_OPCOTEC OPCOTEC 1978
++yt MACH_YT YT 1979
++motoq MACH_MOTOQ MOTOQ 1980
++bsb1 MACH_BSB1 BSB1 1981
+ acs5k MACH_ACS5K ACS5K 1982
++milan MACH_MILAN MILAN 1983
++quartzv2 MACH_QUARTZV2 QUARTZV2 1984
++rsvp MACH_RSVP RSVP 1985
++rmp200 MACH_RMP200 RMP200 1986
+ snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987
+ dsm320 MACH_DSM320 DSM320 1988
++adsgcm MACH_ADSGCM ADSGCM 1989
++ase2_400 MACH_ASE2_400 ASE2_400 1990
++pizza MACH_PIZZA PIZZA 1991
++spot_ngpl MACH_SPOT_NGPL SPOT_NGPL 1992
++armata MACH_ARMATA ARMATA 1993
+ exeda MACH_EXEDA EXEDA 1994
++mx31sf005 MACH_MX31SF005 MX31SF005 1995
++f5d8231_4_v2 MACH_F5D8231_4_V2 F5D8231_4_V2 1996
++q2440 MACH_Q2440 Q2440 1997
++qq2440 MACH_QQ2440 QQ2440 1998
+ mini2440 MACH_MINI2440 MINI2440 1999
+ colibri300 MACH_COLIBRI300 COLIBRI300 2000
++jades MACH_JADES JADES 2001
++spark MACH_SPARK SPARK 2002
++benzina MACH_BENZINA BENZINA 2003
++blaze MACH_BLAZE BLAZE 2004
+ linkstation_ls_hgl MACH_LINKSTATION_LS_HGL LINKSTATION_LS_HGL 2005
++htckovsky MACH_HTCKOVSKY HTCKOVSKY 2006
++sony_prs505 MACH_SONY_PRS505 SONY_PRS505 2007
++hanlin_v3 MACH_HANLIN_V3 HANLIN_V3 2008
++sapphira MACH_SAPPHIRA SAPPHIRA 2009
++dack_sda_01 MACH_DACK_SDA_01 DACK_SDA_01 2010
++armbox MACH_ARMBOX ARMBOX 2011
++harris_rvp MACH_HARRIS_RVP HARRIS_RVP 2012
++ribaldo MACH_RIBALDO RIBALDO 2013
++agora MACH_AGORA AGORA 2014
++omap3_mini MACH_OMAP3_MINI OMAP3_MINI 2015
++a9sam6432_b MACH_A9SAM6432_B A9SAM6432_B 2016
++usg2410 MACH_USG2410 USG2410 2017
++pc72052_i10_revb MACH_PC72052_I10_REVB PC72052_I10_REVB 2018
++mx35_exm32 MACH_MX35_EXM32 MX35_EXM32 2019
++topas910 MACH_TOPAS910 TOPAS910 2020
++hyena MACH_HYENA HYENA 2021
++pospax MACH_POSPAX POSPAX 2022
++hdl_gx MACH_HDL_GX HDL_GX 2023
++ctera_4bay MACH_CTERA_4BAY CTERA_4BAY 2024
++ctera_plug_c MACH_CTERA_PLUG_C CTERA_PLUG_C 2025
++crwea_plug_i MACH_CRWEA_PLUG_I CRWEA_PLUG_I 2026
++egauge2 MACH_EGAUGE2 EGAUGE2 2027
++didj MACH_DIDJ DIDJ 2028
++m_s3c2443 MACH_MEISTER MEISTER 2029
++htcblackstone MACH_HTCBLACKSTONE HTCBLACKSTONE 2030
+ cpuat9g20 MACH_CPUAT9G20 CPUAT9G20 2031
+ smdk6440 MACH_SMDK6440 SMDK6440 2032
++omap_35xx_mvp MACH_OMAP_35XX_MVP OMAP_35XX_MVP 2033
++ctera_plug_i MACH_CTERA_PLUG_I CTERA_PLUG_I 2034
++pvg610_100 MACH_PVG610 PVG610 2035
++hprw6815 MACH_HPRW6815 HPRW6815 2036
++omap3_oswald MACH_OMAP3_OSWALD OMAP3_OSWALD 2037
+ nas4220b MACH_NAS4220B NAS4220B 2038
++htcraphael_cdma MACH_HTCRAPHAEL_CDMA HTCRAPHAEL_CDMA 2039
++htcdiamond_cdma MACH_HTCDIAMOND_CDMA HTCDIAMOND_CDMA 2040
++scaler MACH_SCALER SCALER 2041
+ zylonite2 MACH_ZYLONITE2 ZYLONITE2 2042
+ aspenite MACH_ASPENITE ASPENITE 2043
++teton MACH_TETON TETON 2044
+ ttc_dkb MACH_TTC_DKB TTC_DKB 2045
++bishop2 MACH_BISHOP2 BISHOP2 2046
++ippv5 MACH_IPPV5 IPPV5 2047
++farm926 MACH_FARM926 FARM926 2048
++mmccpu MACH_MMCCPU MMCCPU 2049
++sgmsfl MACH_SGMSFL SGMSFL 2050
++tt8000 MACH_TT8000 TT8000 2051
++zrn4300lp MACH_ZRN4300LP ZRN4300LP 2052
++mptc MACH_MPTC MPTC 2053
++h6051 MACH_H6051 H6051 2054
++pvg610_101 MACH_PVG610_101 PVG610_101 2055
++stamp9261_pc_evb MACH_STAMP9261_PC_EVB STAMP9261_PC_EVB 2056
++pelco_odysseus MACH_PELCO_ODYSSEUS PELCO_ODYSSEUS 2057
++tny_a9260 MACH_TNY_A9260 TNY_A9260 2058
++tny_a9g20 MACH_TNY_A9G20 TNY_A9G20 2059
++aesop_mp2530f MACH_AESOP_MP2530F AESOP_MP2530F 2060
++dx900 MACH_DX900 DX900 2061
++cpodc2 MACH_CPODC2 CPODC2 2062
++tilt_8925 MACH_TILT_8925 TILT_8925 2063
++davinci_dm357_evm MACH_DAVINCI_DM357_EVM DAVINCI_DM357_EVM 2064
++swordfish MACH_SWORDFISH SWORDFISH 2065
++corvus MACH_CORVUS CORVUS 2066
++taurus MACH_TAURUS TAURUS 2067
++axm MACH_AXM AXM 2068
++axc MACH_AXC AXC 2069
++baby MACH_BABY BABY 2070
++mp200 MACH_MP200 MP200 2071
+ pcm043 MACH_PCM043 PCM043 2072
++hanlin_v3c MACH_HANLIN_V3C HANLIN_V3C 2073
++kbk9g20 MACH_KBK9G20 KBK9G20 2074
++adsturbog5 MACH_ADSTURBOG5 ADSTURBOG5 2075
++avenger_lite1 MACH_AVENGER_LITE1 AVENGER_LITE1 2076
++suc82x MACH_SUC SUC 2077
++at91sam7s256 MACH_AT91SAM7S256 AT91SAM7S256 2078
++mendoza MACH_MENDOZA MENDOZA 2079
++kira MACH_KIRA KIRA 2080
++mx1hbm MACH_MX1HBM MX1HBM 2081
++quatro43xx MACH_QUATRO43XX QUATRO43XX 2082
++quatro4230 MACH_QUATRO4230 QUATRO4230 2083
++nsb400 MACH_NSB400 NSB400 2084
++drp255 MACH_DRP255 DRP255 2085
++thoth MACH_THOTH THOTH 2086
++firestone MACH_FIRESTONE FIRESTONE 2087
++asusp750 MACH_ASUSP750 ASUSP750 2088
++ctera_dl MACH_CTERA_DL CTERA_DL 2089
++socr MACH_SOCR SOCR 2090
++htcoxygen MACH_HTCOXYGEN HTCOXYGEN 2091
++heroc MACH_HEROC HEROC 2092
++zeno6800 MACH_ZENO6800 ZENO6800 2093
++sc2mcs MACH_SC2MCS SC2MCS 2094
++gene100 MACH_GENE100 GENE100 2095
++as353x MACH_AS353X AS353X 2096
+ sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097
++at91sam9g20 MACH_AT91SAM9G20 AT91SAM9G20 2098
++mv88f6192gtw_fe MACH_MV88F6192GTW_FE MV88F6192GTW_FE 2099
++cc9200 MACH_CC9200 CC9200 2100
++sm9200 MACH_SM9200 SM9200 2101
++tp9200 MACH_TP9200 TP9200 2102
++snapperdv MACH_SNAPPERDV SNAPPERDV 2103
+ avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104
++avengers_lite1 MACH_AVENGERS_LITE1 AVENGERS_LITE1 2105
++omap3axon MACH_OMAP3AXON OMAP3AXON 2106
++ma8xx MACH_MA8XX MA8XX 2107
++mp201ek MACH_MP201EK MP201EK 2108
++davinci_tux MACH_DAVINCI_TUX DAVINCI_TUX 2109
++mpa1600 MACH_MPA1600 MPA1600 2110
++pelco_troy MACH_PELCO_TROY PELCO_TROY 2111
++nsb667 MACH_NSB667 NSB667 2112
++rovers5_4mpix MACH_ROVERS5_4MPIX ROVERS5_4MPIX 2113
++twocom MACH_TWOCOM TWOCOM 2114
++ubisys_p9_rcu3r2 MACH_UBISYS_P9_RCU3R2 UBISYS_P9_RCU3R2 2115
++hero_espresso MACH_HERO_ESPRESSO HERO_ESPRESSO 2116
++afeusb MACH_AFEUSB AFEUSB 2117
++t830 MACH_T830 T830 2118
++spd8020_cc MACH_SPD8020_CC SPD8020_CC 2119
++om_3d7k MACH_OM_3D7K OM_3D7K 2120
++picocom2 MACH_PICOCOM2 PICOCOM2 2121
++uwg4mx27 MACH_UWG4MX27 UWG4MX27 2122
++uwg4mx31 MACH_UWG4MX31 UWG4MX31 2123
++cherry MACH_CHERRY CHERRY 2124
+ mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125
++s3c2440turkiye MACH_S3C2440TURKIYE S3C2440TURKIYE 2126
+ tx37 MACH_TX37 TX37 2127
++sbc2800_9g20 MACH_SBC2800_9G20 SBC2800_9G20 2128
++benzglb MACH_BENZGLB BENZGLB 2129
++benztd MACH_BENZTD BENZTD 2130
++cartesio_plus MACH_CARTESIO_PLUS CARTESIO_PLUS 2131
++solrad_g20 MACH_SOLRAD_G20 SOLRAD_G20 2132
++mx27wallace MACH_MX27WALLACE MX27WALLACE 2133
++fmzwebmodul MACH_FMZWEBMODUL FMZWEBMODUL 2134
+ rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135
++smallogger MACH_SMALLOGGER SMALLOGGER 2136
++ccw9p9215 MACH_CCW9P9215 CCW9P9215 2137
+ dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138
+ ts219 MACH_TS219 TS219 2139
++tny_a9263 MACH_TNY_A9263 TNY_A9263 2140
++apollo MACH_APOLLO APOLLO 2141
++at91cap9stk MACH_AT91CAP9STK AT91CAP9STK 2142
++spc300 MACH_SPC300 SPC300 2143
++eko MACH_EKO EKO 2144
++ccw9m2443 MACH_CCW9M2443 CCW9M2443 2145
++ccw9m2443js MACH_CCW9M2443JS CCW9M2443JS 2146
++m2m_router_device MACH_M2M_ROUTER_DEVICE M2M_ROUTER_DEVICE 2147
++str9104nas MACH_STAR9104NAS STAR9104NAS 2148
+ pca100 MACH_PCA100 PCA100 2149
++z3_dm365_mod_01 MACH_Z3_DM365_MOD_01 Z3_DM365_MOD_01 2150
++hipox MACH_HIPOX HIPOX 2151
++omap3_piteds MACH_OMAP3_PITEDS OMAP3_PITEDS 2152
++bm150r MACH_BM150R BM150R 2153
++tbone MACH_TBONE TBONE 2154
++merlin MACH_MERLIN MERLIN 2155
++falcon MACH_FALCON FALCON 2156
+ davinci_da850_evm MACH_DAVINCI_DA850_EVM DAVINCI_DA850_EVM 2157
++s5p6440 MACH_S5P6440 S5P6440 2158
+ at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159
+ omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160
++lpc313x MACH_LPC313X LPC313X 2161
+ magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162
++magx_em30 MACH_MAGX_EM30 MAGX_EM30 2163
++magx_ve66 MACH_MAGX_VE66 MAGX_VE66 2164
++meesc MACH_MEESC MEESC 2165
++otc570 MACH_OTC570 OTC570 2166
++bcu2412 MACH_BCU2412 BCU2412 2167
++beacon MACH_BEACON BEACON 2168
++actia_tgw MACH_ACTIA_TGW ACTIA_TGW 2169
++e4430 MACH_E4430 E4430 2170
++ql300 MACH_QL300 QL300 2171
+ btmavb101 MACH_BTMAVB101 BTMAVB101 2172
+ btmawb101 MACH_BTMAWB101 BTMAWB101 2173
++sq201 MACH_SQ201 SQ201 2174
++quatro45xx MACH_QUATRO45XX QUATRO45XX 2175
++openpad MACH_OPENPAD OPENPAD 2176
+ tx25 MACH_TX25 TX25 2177
+ omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178
++htcraphael_k MACH_HTCRAPHAEL_K HTCRAPHAEL_K 2179
++lal43 MACH_LAL43 LAL43 2181
++htcraphael_cdma500 MACH_HTCRAPHAEL_CDMA500 HTCRAPHAEL_CDMA500 2182
+ anw6410 MACH_ANW6410 ANW6410 2183
++htcprophet MACH_HTCPROPHET HTCPROPHET 2185
++cfa_10022 MACH_CFA_10022 CFA_10022 2186
+ imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187
++px2imx27 MACH_PX2IMX27 PX2IMX27 2188
++stm3210e_eval MACH_STM3210E_EVAL STM3210E_EVAL 2189
++dvs10 MACH_DVS10 DVS10 2190
+ portuxg20 MACH_PORTUXG20 PORTUXG20 2191
++arm_spv MACH_ARM_SPV ARM_SPV 2192
+ smdkc110 MACH_SMDKC110 SMDKC110 2193
++cabespresso MACH_CABESPRESSO CABESPRESSO 2194
++hmc800 MACH_HMC800 HMC800 2195
++sholes MACH_SHOLES SHOLES 2196
++btmxc31 MACH_BTMXC31 BTMXC31 2197
++dt501 MACH_DT501 DT501 2198
++ktx MACH_KTX KTX 2199
+ omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200
+ netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201
+ netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202
+ d2net_v2 MACH_D2NET_V2 D2NET_V2 2203
+ net2big_v2 MACH_NET2BIG_V2 NET2BIG_V2 2204
++net4big_v2 MACH_NET4BIG_V2 NET4BIG_V2 2205
+ net5big_v2 MACH_NET5BIG_V2 NET5BIG_V2 2206
++endb2443 MACH_ENDB2443 ENDB2443 2207
+ inetspace_v2 MACH_INETSPACE_V2 INETSPACE_V2 2208
++tros MACH_TROS TROS 2209
++pelco_homer MACH_PELCO_HOMER PELCO_HOMER 2210
++ofsp8 MACH_OFSP8 OFSP8 2211
+ at91sam9g45ekes MACH_AT91SAM9G45EKES AT91SAM9G45EKES 2212
++guf_cupid MACH_GUF_CUPID GUF_CUPID 2213
++eab1r MACH_EAB1R EAB1R 2214
++desirec MACH_DESIREC DESIREC 2215
++cordoba MACH_CORDOBA CORDOBA 2216
++irvine MACH_IRVINE IRVINE 2217
++sff772 MACH_SFF772 SFF772 2218
++pelco_milano MACH_PELCO_MILANO PELCO_MILANO 2219
+ pc7302 MACH_PC7302 PC7302 2220
++bip6000 MACH_BIP6000 BIP6000 2221
++silvermoon MACH_SILVERMOON SILVERMOON 2222
++vc0830 MACH_VC0830 VC0830 2223
++dt430 MACH_DT430 DT430 2224
++ji42pf MACH_JI42PF JI42PF 2225
++gnet_ksm MACH_GNET_KSM GNET_KSM 2226
++gnet_sgm MACH_GNET_SGM GNET_SGM 2227
++gnet_sgr MACH_GNET_SGR GNET_SGR 2228
++omap3_icetekevm MACH_OMAP3_ICETEKEVM OMAP3_ICETEKEVM 2229
++pnp MACH_PNP PNP 2230
++ctera_2bay_k MACH_CTERA_2BAY_K CTERA_2BAY_K 2231
++ctera_2bay_u MACH_CTERA_2BAY_U CTERA_2BAY_U 2232
++sas_c MACH_SAS_C SAS_C 2233
++vma2315 MACH_VMA2315 VMA2315 2234
++vcs MACH_VCS VCS 2235
+ spear600 MACH_SPEAR600 SPEAR600 2236
+ spear300 MACH_SPEAR300 SPEAR300 2237
++spear1300 MACH_SPEAR1300 SPEAR1300 2238
+ lilly1131 MACH_LILLY1131 LILLY1131 2239
++arvoo_ax301 MACH_ARVOO_AX301 ARVOO_AX301 2240
++mapphone MACH_MAPPHONE MAPPHONE 2241
++legend MACH_LEGEND LEGEND 2242
++salsa MACH_SALSA SALSA 2243
++lounge MACH_LOUNGE LOUNGE 2244
++vision MACH_VISION VISION 2245
++vmb20 MACH_VMB20 VMB20 2246
++hy2410 MACH_HY2410 HY2410 2247
++hy9315 MACH_HY9315 HY9315 2248
++bullwinkle MACH_BULLWINKLE BULLWINKLE 2249
++arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250
++vs_v210 MACH_VS_V210 VS_V210 2252
++vs_v212 MACH_VS_V212 VS_V212 2253
+ hmt MACH_HMT HMT 2254
++km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255
++vesper MACH_VESPER VESPER 2256
++str9 MACH_STR9 STR9 2257
++omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
++simcom MACH_SIMCOM SIMCOM 2259
++mcwebio MACH_MCWEBIO MCWEBIO 2260
++omap3_phrazer MACH_OMAP3_PHRAZER OMAP3_PHRAZER 2261
++darwin MACH_DARWIN DARWIN 2262
++oratiscomu MACH_ORATISCOMU ORATISCOMU 2263
++rtsbc20 MACH_RTSBC20 RTSBC20 2264
++sgh_i780 MACH_I780 I780 2265
++gemini324 MACH_GEMINI324 GEMINI324 2266
++oratislan MACH_ORATISLAN ORATISLAN 2267
++oratisalog MACH_ORATISALOG ORATISALOG 2268
++oratismadi MACH_ORATISMADI ORATISMADI 2269
++oratisot16 MACH_ORATISOT16 ORATISOT16 2270
++oratisdesk MACH_ORATISDESK ORATISDESK 2271
+ vexpress MACH_VEXPRESS VEXPRESS 2272
++sintexo MACH_SINTEXO SINTEXO 2273
++cm3389 MACH_CM3389 CM3389 2274
++omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
++sgh_i900 MACH_SGH_I900 SGH_I900 2276
++bst100 MACH_BST100 BST100 2277
++passion MACH_PASSION PASSION 2278
++indesign_at91sam MACH_INDESIGN_AT91SAM INDESIGN_AT91SAM 2279
++c4_badger MACH_C4_BADGER C4_BADGER 2280
++c4_viper MACH_C4_VIPER C4_VIPER 2281
+ d2net MACH_D2NET D2NET 2282
+ bigdisk MACH_BIGDISK BIGDISK 2283
++notalvision MACH_NOTALVISION NOTALVISION 2284
++omap3_kboc MACH_OMAP3_KBOC OMAP3_KBOC 2285
++cyclone MACH_CYCLONE CYCLONE 2286
++ninja MACH_NINJA NINJA 2287
+ at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288
+ bcmring MACH_BCMRING BCMRING 2289
++resol_dl2 MACH_RESOL_DL2 RESOL_DL2 2290
++ifosw MACH_IFOSW IFOSW 2291
++htcrhodium MACH_HTCRHODIUM HTCRHODIUM 2292
++htctopaz MACH_HTCTOPAZ HTCTOPAZ 2293
++matrix504 MACH_MATRIX504 MATRIX504 2294
++mrfsa MACH_MRFSA MRFSA 2295
++sc_p270 MACH_SC_P270 SC_P270 2296
++atlas5_evb MACH_ATLAS5_EVB ATLAS5_EVB 2297
++pelco_lobox MACH_PELCO_LOBOX PELCO_LOBOX 2298
++dilax_pcu200 MACH_DILAX_PCU200 DILAX_PCU200 2299
++leonardo MACH_LEONARDO LEONARDO 2300
++zoran_approach7 MACH_ZORAN_APPROACH7 ZORAN_APPROACH7 2301
+ dp6xx MACH_DP6XX DP6XX 2302
++bcm2153_vesper MACH_BCM2153_VESPER BCM2153_VESPER 2303
+ mahimahi MACH_MAHIMAHI MAHIMAHI 2304
++clickc MACH_CLICKC CLICKC 2305
++zb_gateway MACH_ZB_GATEWAY ZB_GATEWAY 2306
++tazcard MACH_TAZCARD TAZCARD 2307
++tazdev MACH_TAZDEV TAZDEV 2308
++annax_cb_arm MACH_ANNAX_CB_ARM ANNAX_CB_ARM 2309
++annax_dm3 MACH_ANNAX_DM3 ANNAX_DM3 2310
++cerebric MACH_CEREBRIC CEREBRIC 2311
++orca MACH_ORCA ORCA 2312
++pc9260 MACH_PC9260 PC9260 2313
++ems285a MACH_EMS285A EMS285A 2314
++gec2410 MACH_GEC2410 GEC2410 2315
++gec2440 MACH_GEC2440 GEC2440 2316
++mw903 MACH_ARCH_MW903 ARCH_MW903 2317
++mw2440 MACH_MW2440 MW2440 2318
++ecac2378 MACH_ECAC2378 ECAC2378 2319
++tazkiosk MACH_TAZKIOSK TAZKIOSK 2320
++whiterabbit_mch MACH_WHITERABBIT_MCH WHITERABBIT_MCH 2321
++sbox9263 MACH_SBOX9263 SBOX9263 2322
+ smdk6442 MACH_SMDK6442 SMDK6442 2324
+ openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325
++incredible MACH_INCREDIBLE INCREDIBLE 2326
++incrediblec MACH_INCREDIBLEC INCREDIBLEC 2327
++heroct MACH_HEROCT HEROCT 2328
++mmnet1000 MACH_MMNET1000 MMNET1000 2329
+ devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330
++devkit9000 MACH_DEVKIT9000 DEVKIT9000 2331
++mx31txtr MACH_MX31TXTR MX31TXTR 2332
++u380 MACH_U380 U380 2333
++oamp3_hualu MACH_HUALU_BOARD HUALU_BOARD 2334
++npcmx50 MACH_NPCMX50 NPCMX50 2335
+ mx51_efikamx MACH_MX51_EFIKAMX MX51_EFIKAMX 2336
++mx51_lange52 MACH_MX51_LANGE52 MX51_LANGE52 2337
++riom MACH_RIOM RIOM 2338
++comcas MACH_COMCAS COMCAS 2339
++wsi_mx27 MACH_WSI_MX27 WSI_MX27 2340
+ cm_t35 MACH_CM_T35 CM_T35 2341
+ net2big MACH_NET2BIG NET2BIG 2342
++motorola_a1600 MACH_MOTOROLA_A1600 MOTOROLA_A1600 2343
+ igep0020 MACH_IGEP0020 IGEP0020 2344
++igep0010 MACH_IGEP0010 IGEP0010 2345
++mv6281gtwge2 MACH_MV6281GTWGE2 MV6281GTWGE2 2346
++scat100 MACH_SCAT100 SCAT100 2347
++sanmina MACH_SANMINA SANMINA 2348
++momento MACH_MOMENTO MOMENTO 2349
++nuc9xx MACH_NUC9XX NUC9XX 2350
++nuc910evb MACH_NUC910EVB NUC910EVB 2351
++nuc920evb MACH_NUC920EVB NUC920EVB 2352
++nuc950evb MACH_NUC950EVB NUC950EVB 2353
++nuc945evb MACH_NUC945EVB NUC945EVB 2354
++nuc960evb MACH_NUC960EVB NUC960EVB 2355
+ nuc932evb MACH_NUC932EVB NUC932EVB 2356
++nuc900 MACH_NUC900 NUC900 2357
++sd1soc MACH_SD1SOC SD1SOC 2358
++ln2440bc MACH_LN2440BC LN2440BC 2359
++rsbc MACH_RSBC RSBC 2360
+ openrd_client MACH_OPENRD_CLIENT OPENRD_CLIENT 2361
++hpipaq11x MACH_HPIPAQ11X HPIPAQ11X 2362
++wayland MACH_WAYLAND WAYLAND 2363
++acnbsx102 MACH_ACNBSX102 ACNBSX102 2364
++hwat91 MACH_HWAT91 HWAT91 2365
++at91sam9263cs MACH_AT91SAM9263CS AT91SAM9263CS 2366
++csb732 MACH_CSB732 CSB732 2367
+ u8500 MACH_U8500 U8500 2368
++huqiu MACH_HUQIU HUQIU 2369
+ mx51_efikasb MACH_MX51_EFIKASB MX51_EFIKASB 2370
++pmt1g MACH_PMT1G PMT1G 2371
++htcelf MACH_HTCELF HTCELF 2372
++armadillo420 MACH_ARMADILLO420 ARMADILLO420 2373
++armadillo440 MACH_ARMADILLO440 ARMADILLO440 2374
++u_chip_dual_arm MACH_U_CHIP_DUAL_ARM U_CHIP_DUAL_ARM 2375
++csr_bdb3 MACH_CSR_BDB3 CSR_BDB3 2376
++dolby_cat1018 MACH_DOLBY_CAT1018 DOLBY_CAT1018 2377
++hy9307 MACH_HY9307 HY9307 2378
++aspire_easystore MACH_A_ES A_ES 2379
++davinci_irif MACH_DAVINCI_IRIF DAVINCI_IRIF 2380
++agama9263 MACH_AGAMA9263 AGAMA9263 2381
+ marvell_jasper MACH_MARVELL_JASPER MARVELL_JASPER 2382
+ flint MACH_FLINT FLINT 2383
+ tavorevb3 MACH_TAVOREVB3 TAVOREVB3 2384
++sch_m490 MACH_SCH_M490 SCH_M490 2386
++rbl01 MACH_RBL01 RBL01 2387
++omnifi MACH_OMNIFI OMNIFI 2388
++otavalo MACH_OTAVALO OTAVALO 2389
++htc_excalibur_s620 MACH_HTC_EXCALIBUR_S620 HTC_EXCALIBUR_S620 2391
++htc_opal MACH_HTC_OPAL HTC_OPAL 2392
+ touchbook MACH_TOUCHBOOK TOUCHBOOK 2393
++latte MACH_LATTE LATTE 2394
++xa200 MACH_XA200 XA200 2395
++nimrod MACH_NIMROD NIMROD 2396
++cc9p9215_3g MACH_CC9P9215_3G CC9P9215_3G 2397
++cc9p9215_3gjs MACH_CC9P9215_3GJS CC9P9215_3GJS 2398
++tk71 MACH_TK71 TK71 2399
++comham3525 MACH_COMHAM3525 COMHAM3525 2400
++mx31erebus MACH_MX31EREBUS MX31EREBUS 2401
++mcardmx27 MACH_MCARDMX27 MCARDMX27 2402
++paradise MACH_PARADISE PARADISE 2403
++tide MACH_TIDE TIDE 2404
++wzl2440 MACH_WZL2440 WZL2440 2405
++sdrdemo MACH_SDRDEMO SDRDEMO 2406
++ethercan2 MACH_ETHERCAN2 ETHERCAN2 2407
++ecmimg20 MACH_ECMIMG20 ECMIMG20 2408
++omap_dragon MACH_OMAP_DRAGON OMAP_DRAGON 2409
++halo MACH_HALO HALO 2410
++huangshan MACH_HUANGSHAN HUANGSHAN 2411
++vl_ma2sc MACH_VL_MA2SC VL_MA2SC 2412
+ raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413
+ raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414
+ raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415
++multibus_master MACH_MULTIBUS_MASTER MULTIBUS_MASTER 2416
++multibus_pbk MACH_MULTIBUS_PBK MULTIBUS_PBK 2417
+ tnetv107x MACH_TNETV107X TNETV107X 2418
++snake MACH_SNAKE SNAKE 2419
++cwmx27 MACH_CWMX27 CWMX27 2420
++sch_m480 MACH_SCH_M480 SCH_M480 2421
++platypus MACH_PLATYPUS PLATYPUS 2422
++pss2 MACH_PSS2 PSS2 2423
++davinci_apm150 MACH_DAVINCI_APM150 DAVINCI_APM150 2424
++str9100 MACH_STR9100 STR9100 2425
++net5big MACH_NET5BIG NET5BIG 2426
++seabed9263 MACH_SEABED9263 SEABED9263 2427
+ mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428
++octvocplus_eb MACH_OCTVOCPLUS_EB OCTVOCPLUS_EB 2429
++klk_firefox MACH_KLK_FIREFOX KLK_FIREFOX 2430
++klk_wirma_module MACH_KLK_WIRMA_MODULE KLK_WIRMA_MODULE 2431
++klk_wirma_mmi MACH_KLK_WIRMA_MMI KLK_WIRMA_MMI 2432
++supersonic MACH_SUPERSONIC SUPERSONIC 2433
++liberty MACH_LIBERTY LIBERTY 2434
++mh355 MACH_MH355 MH355 2435
++pc7802 MACH_PC7802 PC7802 2436
++gnet_sgc MACH_GNET_SGC GNET_SGC 2437
++einstein15 MACH_EINSTEIN15 EINSTEIN15 2438
++cmpd MACH_CMPD CMPD 2439
++davinci_hase1 MACH_DAVINCI_HASE1 DAVINCI_HASE1 2440
++lgeincitephone MACH_LGEINCITEPHONE LGEINCITEPHONE 2441
++ea313x MACH_EA313X EA313X 2442
++fwbd_39064 MACH_FWBD_39064 FWBD_39064 2443
++fwbd_390128 MACH_FWBD_390128 FWBD_390128 2444
++pelco_moe MACH_PELCO_MOE PELCO_MOE 2445
++minimix27 MACH_MINIMIX27 MINIMIX27 2446
++omap3_thunder MACH_OMAP3_THUNDER OMAP3_THUNDER 2447
++passionc MACH_PASSIONC PASSIONC 2448
++mx27amata MACH_MX27AMATA MX27AMATA 2449
++bgat1 MACH_BGAT1 BGAT1 2450
++buzz MACH_BUZZ BUZZ 2451
++mb9g20 MACH_MB9G20 MB9G20 2452
++yushan MACH_YUSHAN YUSHAN 2453
++lizard MACH_LIZARD LIZARD 2454
++omap3polycom MACH_OMAP3POLYCOM OMAP3POLYCOM 2455
+ smdkv210 MACH_SMDKV210 SMDKV210 2456
++bravo MACH_BRAVO BRAVO 2457
++siogentoo1 MACH_SIOGENTOO1 SIOGENTOO1 2458
++siogentoo2 MACH_SIOGENTOO2 SIOGENTOO2 2459
++sm3k MACH_SM3K SM3K 2460
++acer_tempo_f900 MACH_ACER_TEMPO_F900 ACER_TEMPO_F900 2461
++glittertind MACH_GLITTERTIND GLITTERTIND 2463
+ omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
+ omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
++cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466
++torino_s MACH_TORINO_S TORINO_S 2467
++havana MACH_HAVANA HAVANA 2468
++beaumont_11 MACH_BEAUMONT_11 BEAUMONT_11 2469
++vanguard MACH_VANGUARD VANGUARD 2470
++s5pc110_draco MACH_S5PC110_DRACO S5PC110_DRACO 2471
++cartesio_two MACH_CARTESIO_TWO CARTESIO_TWO 2472
++aster MACH_ASTER ASTER 2473
++voguesv210 MACH_VOGUESV210 VOGUESV210 2474
++acm500x MACH_ACM500X ACM500X 2475
++km9260 MACH_KM9260 KM9260 2476
++nideflexg1 MACH_NIDEFLEXG1 NIDEFLEXG1 2477
++ctera_plug_io MACH_CTERA_PLUG_IO CTERA_PLUG_IO 2478
+ smartq7 MACH_SMARTQ7 SMARTQ7 2479
++at91sam9g10ek2 MACH_AT91SAM9G10EK2 AT91SAM9G10EK2 2480
++asusp527 MACH_ASUSP527 ASUSP527 2481
++at91sam9g20mpm2 MACH_AT91SAM9G20MPM2 AT91SAM9G20MPM2 2482
++topasa900 MACH_TOPASA900 TOPASA900 2483
++electrum_100 MACH_ELECTRUM_100 ELECTRUM_100 2484
++mx51grb MACH_MX51GRB MX51GRB 2485
++xea300 MACH_XEA300 XEA300 2486
++htcstartrek MACH_HTCSTARTREK HTCSTARTREK 2487
++lima MACH_LIMA LIMA 2488
++csb740 MACH_CSB740 CSB740 2489
++usb_s8815 MACH_USB_S8815 USB_S8815 2490
+ watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491
++milkyway MACH_MILKYWAY MILKYWAY 2492
+ g4evm MACH_G4EVM G4EVM 2493
++picomod6 MACH_PICOMOD6 PICOMOD6 2494
+ omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
++ip6000 MACH_IP6000 IP6000 2496
++ip6010 MACH_IP6010 IP6010 2497
++utm400 MACH_UTM400 UTM400 2498
++omap3_zybex MACH_OMAP3_ZYBEX OMAP3_ZYBEX 2499
++wireless_space MACH_WIRELESS_SPACE WIRELESS_SPACE 2500
++sx560 MACH_SX560 SX560 2501
+ ts41x MACH_TS41X TS41X 2502
++elphel10373 MACH_ELPHEL10373 ELPHEL10373 2503
++rhobot MACH_RHOBOT RHOBOT 2504
++mx51_refresh MACH_MX51_REFRESH MX51_REFRESH 2505
++ls9260 MACH_LS9260 LS9260 2506
++shank MACH_SHANK SHANK 2507
++qsd8x50_st1 MACH_QSD8X50_ST1 QSD8X50_ST1 2508
++at91sam9m10ekes MACH_AT91SAM9M10EKES AT91SAM9M10EKES 2509
++hiram MACH_HIRAM HIRAM 2510
+ phy3250 MACH_PHY3250 PHY3250 2511
++ea3250 MACH_EA3250 EA3250 2512
++fdi3250 MACH_FDI3250 FDI3250 2513
++at91sam9263nit MACH_AT91SAM9263NIT AT91SAM9263NIT 2515
++ccmx51 MACH_CCMX51 CCMX51 2516
++ccmx51js MACH_CCMX51JS CCMX51JS 2517
++ccwmx51 MACH_CCWMX51 CCWMX51 2518
++ccwmx51js MACH_CCWMX51JS CCWMX51JS 2519
+ mini6410 MACH_MINI6410 MINI6410 2520
++tiny6410 MACH_TINY6410 TINY6410 2521
++nano6410 MACH_NANO6410 NANO6410 2522
++at572d940hfnldb MACH_AT572D940HFNLDB AT572D940HFNLDB 2523
++htcleo MACH_HTCLEO HTCLEO 2524
++avp13 MACH_AVP13 AVP13 2525
++xxsvideod MACH_XXSVIDEOD XXSVIDEOD 2526
++vpnext MACH_VPNEXT VPNEXT 2527
++swarco_itc3 MACH_SWARCO_ITC3 SWARCO_ITC3 2528
+ tx51 MACH_TX51 TX51 2529
++dolby_cat1021 MACH_DOLBY_CAT1021 DOLBY_CAT1021 2530
+ mx28evk MACH_MX28EVK MX28EVK 2531
++phoenix260 MACH_PHOENIX260 PHOENIX260 2532
++uvaca_stork MACH_UVACA_STORK UVACA_STORK 2533
+ smartq5 MACH_SMARTQ5 SMARTQ5 2534
++all3078 MACH_ALL3078 ALL3078 2535
++ctera_2bay_ds MACH_CTERA_2BAY_DS CTERA_2BAY_DS 2536
++siogentoo3 MACH_SIOGENTOO3 SIOGENTOO3 2537
++epb5000 MACH_EPB5000 EPB5000 2538
++hy9263 MACH_HY9263 HY9263 2539
++acer_tempo_m900 MACH_ACER_TEMPO_M900 ACER_TEMPO_M900 2540
++acer_tempo_dx650 MACH_ACER_TEMPO_DX900 ACER_TEMPO_DX900 2541
++acer_tempo_x960 MACH_ACER_TEMPO_X960 ACER_TEMPO_X960 2542
++acer_eten_v900 MACH_ACER_ETEN_V900 ACER_ETEN_V900 2543
++acer_eten_x900 MACH_ACER_ETEN_X900 ACER_ETEN_X900 2544
++bonnell MACH_BONNELL BONNELL 2545
++oht_mx27 MACH_OHT_MX27 OHT_MX27 2546
++htcquartz MACH_HTCQUARTZ HTCQUARTZ 2547
+ davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
++c3ax03 MACH_C3AX03 C3AX03 2549
+ mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
++esyx MACH_ESYX ESYX 2551
++dove_db2 MACH_DOVE_DB2 DOVE_DB2 2552
++bulldog MACH_BULLDOG BULLDOG 2553
++derell_me2000 MACH_DERELL_ME2000 DERELL_ME2000 2554
++bcmring_base MACH_BCMRING_BASE BCMRING_BASE 2555
++bcmring_evm MACH_BCMRING_EVM BCMRING_EVM 2556
++bcmring_evm_jazz MACH_BCMRING_EVM_JAZZ BCMRING_EVM_JAZZ 2557
++bcmring_sp MACH_BCMRING_SP BCMRING_SP 2558
++bcmring_sv MACH_BCMRING_SV BCMRING_SV 2559
++bcmring_sv_jazz MACH_BCMRING_SV_JAZZ BCMRING_SV_JAZZ 2560
++bcmring_tablet MACH_BCMRING_TABLET BCMRING_TABLET 2561
++bcmring_vp MACH_BCMRING_VP BCMRING_VP 2562
++bcmring_evm_seikor MACH_BCMRING_EVM_SEIKOR BCMRING_EVM_SEIKOR 2563
++bcmring_sp_wqvga MACH_BCMRING_SP_WQVGA BCMRING_SP_WQVGA 2564
++bcmring_custom MACH_BCMRING_CUSTOM BCMRING_CUSTOM 2565
++acer_s200 MACH_ACER_S200 ACER_S200 2566
++bt270 MACH_BT270 BT270 2567
++iseo MACH_ISEO ISEO 2568
++cezanne MACH_CEZANNE CEZANNE 2569
++lucca MACH_LUCCA LUCCA 2570
++supersmart MACH_SUPERSMART SUPERSMART 2571
++arm11_board MACH_CS_MISANO CS_MISANO 2572
++magnolia2 MACH_MAGNOLIA2 MAGNOLIA2 2573
++emxx MACH_EMXX EMXX 2574
++outlaw MACH_OUTLAW OUTLAW 2575
+ riot_bei2 MACH_RIOT_BEI2 RIOT_BEI2 2576
++riot_gx2 MACH_RIOT_VOX RIOT_VOX 2577
+ riot_x37 MACH_RIOT_X37 RIOT_X37 2578
++mega25mx MACH_MEGA25MX MEGA25MX 2579
++benzina2 MACH_BENZINA2 BENZINA2 2580
++ignite MACH_IGNITE IGNITE 2581
++foggia MACH_FOGGIA FOGGIA 2582
++arezzo MACH_AREZZO AREZZO 2583
++leica_skywalker MACH_LEICA_SKYWALKER LEICA_SKYWALKER 2584
++jacinto2_jamr MACH_JACINTO2_JAMR JACINTO2_JAMR 2585
++gts_nova MACH_GTS_NOVA GTS_NOVA 2586
++p3600 MACH_P3600 P3600 2587
++dlt2 MACH_DLT2 DLT2 2588
++df3120 MACH_DF3120 DF3120 2589
++ecucore_9g20 MACH_ECUCORE_9G20 ECUCORE_9G20 2590
++nautel_am35xx MACH_NAUTEL_LPC3240 NAUTEL_LPC3240 2591
++glacier MACH_GLACIER GLACIER 2592
++phrazer_bulldog MACH_PHRAZER_BULLDOG PHRAZER_BULLDOG 2593
++omap3_bulldog MACH_OMAP3_BULLDOG OMAP3_BULLDOG 2594
+ pca101 MACH_PCA101 PCA101 2595
++buzzc MACH_BUZZC BUZZC 2596
++sasie2 MACH_SASIE2 SASIE2 2597
++smartmeter_dl MACH_SMARTMETER_DL SMARTMETER_DL 2599
++wzl6410 MACH_WZL6410 WZL6410 2600
++wzl6410m MACH_WZL6410M WZL6410M 2601
++wzl6410f MACH_WZL6410F WZL6410F 2602
++wzl6410i MACH_WZL6410I WZL6410I 2603
++spacecom1 MACH_SPACECOM1 SPACECOM1 2604
++pingu920 MACH_PINGU920 PINGU920 2605
++bravoc MACH_BRAVOC BRAVOC 2606
++vdssw MACH_VDSSW VDSSW 2608
++romulus MACH_ROMULUS ROMULUS 2609
++omap_magic MACH_OMAP_MAGIC OMAP_MAGIC 2610
++eltd100 MACH_ELTD100 ELTD100 2611
+ capc7117 MACH_CAPC7117 CAPC7117 2612
++swan MACH_SWAN SWAN 2613
++veu MACH_VEU VEU 2614
++rm2 MACH_RM2 RM2 2615
++tt2100 MACH_TT2100 TT2100 2616
++venice MACH_VENICE VENICE 2617
++pc7323 MACH_PC7323 PC7323 2618
++masp MACH_MASP MASP 2619
++fujitsu_tvstbsoc0 MACH_FUJITSU_TVSTBSOC FUJITSU_TVSTBSOC 2620
++fujitsu_tvstbsoc1 MACH_FUJITSU_TVSTBSOC1 FUJITSU_TVSTBSOC1 2621
++lexikon MACH_LEXIKON LEXIKON 2622
++mini2440v2 MACH_MINI2440V2 MINI2440V2 2623
+ icontrol MACH_ICONTROL ICONTROL 2624
+ gplugd MACH_GPLUGD GPLUGD 2625
++qsd8x50a_st1_1 MACH_QSD8X50A_ST1_1 QSD8X50A_ST1_1 2626
+ qsd8x50a_st1_5 MACH_QSD8X50A_ST1_5 QSD8X50A_ST1_5 2627
++bee MACH_BEE BEE 2628
+ mx23evk MACH_MX23EVK MX23EVK 2629
+ ap4evb MACH_AP4EVB AP4EVB 2630
++stockholm MACH_STOCKHOLM STOCKHOLM 2631
++lpc_h3131 MACH_LPC_H3131 LPC_H3131 2632
++stingray MACH_STINGRAY STINGRAY 2633
++kraken MACH_KRAKEN KRAKEN 2634
++gw2388 MACH_GW2388 GW2388 2635
++jadecpu MACH_JADECPU JADECPU 2636
++carlisle MACH_CARLISLE CARLISLE 2637
++lux_sf9 MACH_LUX_SF9 LUX_SF9 2638
++nemid_tb MACH_NEMID_TB NEMID_TB 2639
++terrier MACH_TERRIER TERRIER 2640
++turbot MACH_TURBOT TURBOT 2641
++sanddab MACH_SANDDAB SANDDAB 2642
++mx35_cicada MACH_MX35_CICADA MX35_CICADA 2643
++ghi2703d MACH_GHI2703D GHI2703D 2644
++lux_sfx9 MACH_LUX_SFX9 LUX_SFX9 2645
++lux_sf9g MACH_LUX_SF9G LUX_SF9G 2646
++lux_edk9 MACH_LUX_EDK9 LUX_EDK9 2647
++hw90240 MACH_HW90240 HW90240 2648
++dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649
+ mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650
++scat110 MACH_SCAT110 SCAT110 2651
++acer_a1 MACH_ACER_A1 ACER_A1 2652
++cmcontrol MACH_CMCONTROL CMCONTROL 2653
++pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654
++rfp43 MACH_RFP43 RFP43 2655
++sk86r0301 MACH_SK86R0301 SK86R0301 2656
++ctpxa MACH_CTPXA CTPXA 2657
++epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658
+ guruplug MACH_GURUPLUG GURUPLUG 2659
+ spear310 MACH_SPEAR310 SPEAR310 2660
+ spear320 MACH_SPEAR320 SPEAR320 2661
++robotx MACH_ROBOTX ROBOTX 2662
++lsxhl MACH_LSXHL LSXHL 2663
++smartlite MACH_SMARTLITE SMARTLITE 2664
++cws2 MACH_CWS2 CWS2 2665
++m619 MACH_M619 M619 2666
++smartview MACH_SMARTVIEW SMARTVIEW 2667
++lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668
++kizbox MACH_KIZBOX KIZBOX 2669
++htccharmer MACH_HTCCHARMER HTCCHARMER 2670
++guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671
++pm9g45 MACH_PM9G45 PM9G45 2672
++htcpanther MACH_HTCPANTHER HTCPANTHER 2673
++htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674
++reb01 MACH_REB01 REB01 2675
+ aquila MACH_AQUILA AQUILA 2676
++spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677
+ esata_sheevaplug MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
+ msm7x30_surf MACH_MSM7X30_SURF MSM7X30_SURF 2679
++micro2440 MACH_MICRO2440 MICRO2440 2680
++am2440 MACH_AM2440 AM2440 2681
++tq2440 MACH_TQ2440 TQ2440 2682
+ ea2478devkit MACH_EA2478DEVKIT EA2478DEVKIT 2683
++ak880x MACH_AK880X AK880X 2684
++cobra3530 MACH_COBRA3530 COBRA3530 2685
++pmppb MACH_PMPPB PMPPB 2686
++u6715 MACH_U6715 U6715 2687
++axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688
++g30_dvb MACH_G30_DVB G30_DVB 2689
++vc088x MACH_VC088X VC088X 2690
++mioa702 MACH_MIOA702 MIOA702 2691
++hpmin MACH_HPMIN HPMIN 2692
++ak880xak MACH_AK880XAK AK880XAK 2693
++arm926tomap850 MACH_ARM926TOMAP850 ARM926TOMAP850 2694
++lkevm MACH_LKEVM LKEVM 2695
++mw6410 MACH_MW6410 MW6410 2696
+ terastation_wxl MACH_TERASTATION_WXL TERASTATION_WXL 2697
++cpu8000e MACH_CPU8000E CPU8000E 2698
++tokyo MACH_TOKYO TOKYO 2700
++msm7201a_surf MACH_MSM7201A_SURF MSM7201A_SURF 2701
++msm7201a_ffa MACH_MSM7201A_FFA MSM7201A_FFA 2702
+ msm7x25_surf MACH_MSM7X25_SURF MSM7X25_SURF 2703
+ msm7x25_ffa MACH_MSM7X25_FFA MSM7X25_FFA 2704
+ msm7x27_surf MACH_MSM7X27_SURF MSM7X27_SURF 2705
+ msm7x27_ffa MACH_MSM7X27_FFA MSM7X27_FFA 2706
+ msm7x30_ffa MACH_MSM7X30_FFA MSM7X30_FFA 2707
+ qsd8x50_surf MACH_QSD8X50_SURF QSD8X50_SURF 2708
++qsd8x50_comet MACH_QSD8X50_COMET QSD8X50_COMET 2709
++qsd8x50_ffa MACH_QSD8X50_FFA QSD8X50_FFA 2710
++qsd8x50a_surf MACH_QSD8X50A_SURF QSD8X50A_SURF 2711
++qsd8x50a_ffa MACH_QSD8X50A_FFA QSD8X50A_FFA 2712
++adx_xgcp10 MACH_ADX_XGCP10 ADX_XGCP10 2713
++mcgwumts2a MACH_MCGWUMTS2A MCGWUMTS2A 2714
++mobikt MACH_MOBIKT MOBIKT 2715
+ mx53_evk MACH_MX53_EVK MX53_EVK 2716
+ igep0030 MACH_IGEP0030 IGEP0030 2717
++axell_h40_h50_ctrl MACH_AXELL_H40_H50_CTRL AXELL_H40_H50_CTRL 2718
++dtcommod MACH_DTCOMMOD DTCOMMOD 2719
++gould MACH_GOULD GOULD 2720
++siberia MACH_SIBERIA SIBERIA 2721
+ sbc3530 MACH_SBC3530 SBC3530 2722
++qarm MACH_QARM QARM 2723
++mips MACH_MIPS MIPS 2724
++mx27grb MACH_MX27GRB MX27GRB 2725
++sbc8100 MACH_SBC8100 SBC8100 2726
+ saarb MACH_SAARB SAARB 2727
++omap3mini MACH_OMAP3MINI OMAP3MINI 2728
++cnmbook7se MACH_CNMBOOK7SE CNMBOOK7SE 2729
++catan MACH_CATAN CATAN 2730
+ harmony MACH_HARMONY HARMONY 2731
++tonga MACH_TONGA TONGA 2732
++cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733
++htcrhodiumcdma MACH_HTCRHODIUMCDMA HTCRHODIUMCDMA 2734
++epc_g45 MACH_EPC_G45 EPC_G45 2735
++epc_lpc3250 MACH_EPC_LPC3250 EPC_LPC3250 2736
++mxc91341evb MACH_MXC91341EVB MXC91341EVB 2737
++rtw1000 MACH_RTW1000 RTW1000 2738
++bobcat MACH_BOBCAT BOBCAT 2739
++trizeps6 MACH_TRIZEPS6 TRIZEPS6 2740
+ msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741
++nedap9263 MACH_NEDAP9263 NEDAP9263 2742
++netgear_ms2110 MACH_NETGEAR_MS2110 NETGEAR_MS2110 2743
++bmx MACH_BMX BMX 2744
++netstream MACH_NETSTREAM NETSTREAM 2745
++vpnext_rcu MACH_VPNEXT_RCU VPNEXT_RCU 2746
++vpnext_mpu MACH_VPNEXT_MPU VPNEXT_MPU 2747
++bcmring_tablet_v1 MACH_BCMRING_TABLET_V1 BCMRING_TABLET_V1 2748
++sgarm10 MACH_SGARM10 SGARM10 2749
+ cm_t3517 MACH_CM_T3517 CM_T3517 2750
++dig297 MACH_OMAP3_CPS OMAP3_CPS 2751
++axar1500_receiver MACH_AXAR1500_RECEIVER AXAR1500_RECEIVER 2752
+ wbd222 MACH_WBD222 WBD222 2753
++mt65xx MACH_MT65XX MT65XX 2754
+ msm8x60_surf MACH_MSM8X60_SURF MSM8X60_SURF 2755
+ msm8x60_sim MACH_MSM8X60_SIM MSM8X60_SIM 2756
+ tcc8000_sdk MACH_TCC8000_SDK TCC8000_SDK 2758
+ nanos MACH_NANOS NANOS 2759
++stamp9g10 MACH_STAMP9G10 STAMP9G10 2760
+ stamp9g45 MACH_STAMP9G45 STAMP9G45 2761
++h6053 MACH_H6053 H6053 2762
++smint01 MACH_SMINT01 SMINT01 2763
++prtlvt2 MACH_PRTLVT2 PRTLVT2 2764
++ap420 MACH_AP420 AP420 2765
++davinci_dm365_fc MACH_DAVINCI_DM365_FC DAVINCI_DM365_FC 2767
++msm8x55_surf MACH_MSM8X55_SURF MSM8X55_SURF 2768
++msm8x55_ffa MACH_MSM8X55_FFA MSM8X55_FFA 2769
++esl_vamana MACH_ESL_VAMANA ESL_VAMANA 2770
++sbc35 MACH_SBC35 SBC35 2771
++mpx6446 MACH_MPX6446 MPX6446 2772
++oreo_controller MACH_OREO_CONTROLLER OREO_CONTROLLER 2773
++kopin_models MACH_KOPIN_MODELS KOPIN_MODELS 2774
++ttc_vision2 MACH_TTC_VISION2 TTC_VISION2 2775
+ cns3420vb MACH_CNS3420VB CNS3420VB 2776
++lpc_evo MACH_LPC2 LPC2 2777
++olympus MACH_OLYMPUS OLYMPUS 2778
++vortex MACH_VORTEX VORTEX 2779
++s5pc200 MACH_S5PC200 S5PC200 2780
++ecucore_9263 MACH_ECUCORE_9263 ECUCORE_9263 2781
++smdkc200 MACH_SMDKC200 SMDKC200 2782
++emsiso_sx27 MACH_EMSISO_SX27 EMSISO_SX27 2783
++apx_som9g45_ek MACH_APX_SOM9G45_EK APX_SOM9G45_EK 2784
++songshan MACH_SONGSHAN SONGSHAN 2785
++tianshan MACH_TIANSHAN TIANSHAN 2786
++vpx500 MACH_VPX500 VPX500 2787
++am3517sam MACH_AM3517SAM AM3517SAM 2788
++skat91_sim508 MACH_SKAT91_SIM508 SKAT91_SIM508 2789
++skat91_s3e MACH_SKAT91_S3E SKAT91_S3E 2790
+ omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791
++df7220 MACH_DF7220 DF7220 2792
++nemini MACH_NEMINI NEMINI 2793
++t8200 MACH_T8200 T8200 2794
++apf51 MACH_APF51 APF51 2795
++dr_rc_unit MACH_DR_RC_UNIT DR_RC_UNIT 2796
++bordeaux MACH_BORDEAUX BORDEAUX 2797
++catania_b MACH_CATANIA_B CATANIA_B 2798
++mx51_ocean MACH_MX51_OCEAN MX51_OCEAN 2799
+ ti8168evm MACH_TI8168EVM TI8168EVM 2800
++neocoreomap MACH_NEOCOREOMAP NEOCOREOMAP 2801
++withings_wbp MACH_WITHINGS_WBP WITHINGS_WBP 2802
++dbps MACH_DBPS DBPS 2803
++pcbfp0001 MACH_PCBFP0001 PCBFP0001 2805
++speedy MACH_SPEEDY SPEEDY 2806
++chrysaor MACH_CHRYSAOR CHRYSAOR 2807
++tango MACH_TANGO TANGO 2808
++synology_dsx11 MACH_SYNOLOGY_DSX11 SYNOLOGY_DSX11 2809
++hanlin_v3ext MACH_HANLIN_V3EXT HANLIN_V3EXT 2810
++hanlin_v5 MACH_HANLIN_V5 HANLIN_V5 2811
++hanlin_v3plus MACH_HANLIN_V3PLUS HANLIN_V3PLUS 2812
++iriver_story MACH_IRIVER_STORY IRIVER_STORY 2813
++irex_iliad MACH_IREX_ILIAD IREX_ILIAD 2814
++irex_dr1000 MACH_IREX_DR1000 IREX_DR1000 2815
+ teton_bga MACH_TETON_BGA TETON_BGA 2816
++snapper9g45 MACH_SNAPPER9G45 SNAPPER9G45 2817
++tam3517 MACH_TAM3517 TAM3517 2818
++pdc100 MACH_PDC100 PDC100 2819
+ eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25SD EUKREA_CPUIMX25SD 2820
+ eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
+ eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
+ eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
++p565 MACH_P565 P565 2824
++acer_a4 MACH_ACER_A4 ACER_A4 2825
++davinci_dm368_bip MACH_DAVINCI_DM368_BIP DAVINCI_DM368_BIP 2826
++eshare MACH_ESHARE ESHARE 2827
++wlbargn MACH_WLBARGN WLBARGN 2829
++bm170 MACH_BM170 BM170 2830
++netspace_mini_v2 MACH_NETSPACE_MINI_V2 NETSPACE_MINI_V2 2831
++netspace_plug_v2 MACH_NETSPACE_PLUG_V2 NETSPACE_PLUG_V2 2832
++siemens_l1 MACH_SIEMENS_L1 SIEMENS_L1 2833
++elv_lcu1 MACH_ELV_LCU1 ELV_LCU1 2834
++mcu1 MACH_MCU1 MCU1 2835
++omap3_tao3530 MACH_OMAP3_TAO3530 OMAP3_TAO3530 2836
++omap3_pcutouch MACH_OMAP3_PCUTOUCH OMAP3_PCUTOUCH 2837
+ smdkc210 MACH_SMDKC210 SMDKC210 2838
+-pca102 MACH_PCA102 PCA102 2843
++omap3_braillo MACH_OMAP3_BRAILLO OMAP3_BRAILLO 2839
++spyplug MACH_SPYPLUG SPYPLUG 2840
++ginger MACH_GINGER GINGER 2841
++tny_t3530 MACH_TNY_T3530 TNY_T3530 2842
++pcaal1 MACH_PCAAL1 PCAAL1 2843
++spade MACH_SPADE SPADE 2844
++mxc25_topaz MACH_MXC25_TOPAZ MXC25_TOPAZ 2845
+ t5325 MACH_T5325 T5325 2846
++gw2361 MACH_GW2361 GW2361 2847
++elog MACH_ELOG ELOG 2848
+ income MACH_INCOME INCOME 2849
++bcm589x MACH_BCM589X BCM589X 2850
++etna MACH_ETNA ETNA 2851
++hawks MACH_HAWKS HAWKS 2852
++meson MACH_MESON MESON 2853
++xsbase255 MACH_XSBASE255 XSBASE255 2854
++pvm2030 MACH_PVM2030 PVM2030 2855
++mioa502 MACH_MIOA502 MIOA502 2856
+ vvbox_sdorig2 MACH_VVBOX_SDORIG2 VVBOX_SDORIG2 2857
+ vvbox_sdlite2 MACH_VVBOX_SDLITE2 VVBOX_SDLITE2 2858
+ vvbox_sdpro4 MACH_VVBOX_SDPRO4 VVBOX_SDPRO4 2859
++htc_spv_m700 MACH_HTC_SPV_M700 HTC_SPV_M700 2860
+ mx257sx MACH_MX257SX MX257SX 2861
+ goni MACH_GONI GONI 2862
++msm8x55_svlte_ffa MACH_MSM8X55_SVLTE_FFA MSM8X55_SVLTE_FFA 2863
++msm8x55_svlte_surf MACH_MSM8X55_SVLTE_SURF MSM8X55_SVLTE_SURF 2864
++quickstep MACH_QUICKSTEP QUICKSTEP 2865
++dmw96 MACH_DMW96 DMW96 2866
++hammerhead MACH_HAMMERHEAD HAMMERHEAD 2867
++trident MACH_TRIDENT TRIDENT 2868
++lightning MACH_LIGHTNING LIGHTNING 2869
++iconnect MACH_ICONNECT ICONNECT 2870
++autobot MACH_AUTOBOT AUTOBOT 2871
++coconut MACH_COCONUT COCONUT 2872
++durian MACH_DURIAN DURIAN 2873
++cayenne MACH_CAYENNE CAYENNE 2874
++fuji MACH_FUJI FUJI 2875
++synology_6282 MACH_SYNOLOGY_6282 SYNOLOGY_6282 2876
++em1sy MACH_EM1SY EM1SY 2877
++m502 MACH_M502 M502 2878
++matrix518 MACH_MATRIX518 MATRIX518 2879
++tiny_gurnard MACH_TINY_GURNARD TINY_GURNARD 2880
++spear1310 MACH_SPEAR1310 SPEAR1310 2881
+ bv07 MACH_BV07 BV07 2882
++mxt_td61 MACH_MXT_TD61 MXT_TD61 2883
+ openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884
+ devixp MACH_DEVIXP DEVIXP 2885
+ miccpt MACH_MICCPT MICCPT 2886
+ mic256 MACH_MIC256 MIC256 2887
++as1167 MACH_AS1167 AS1167 2888
++omap3_ibiza MACH_OMAP3_IBIZA OMAP3_IBIZA 2889
+ u5500 MACH_U5500 U5500 2890
++davinci_picto MACH_DAVINCI_PICTO DAVINCI_PICTO 2891
++mecha MACH_MECHA MECHA 2892
++bubba3 MACH_BUBBA3 BUBBA3 2893
++pupitre MACH_PUPITRE PUPITRE 2894
++tegra_vogue MACH_TEGRA_VOGUE TEGRA_VOGUE 2896
++tegra_e1165 MACH_TEGRA_E1165 TEGRA_E1165 2897
++simplenet MACH_SIMPLENET SIMPLENET 2898
++ec4350tbm MACH_EC4350TBM EC4350TBM 2899
++pec_tc MACH_PEC_TC PEC_TC 2900
++pec_hc2 MACH_PEC_HC2 PEC_HC2 2901
++esl_mobilis_a MACH_ESL_MOBILIS_A ESL_MOBILIS_A 2902
++esl_mobilis_b MACH_ESL_MOBILIS_B ESL_MOBILIS_B 2903
++esl_wave_a MACH_ESL_WAVE_A ESL_WAVE_A 2904
++esl_wave_b MACH_ESL_WAVE_B ESL_WAVE_B 2905
++unisense_mmm MACH_UNISENSE_MMM UNISENSE_MMM 2906
++blueshark MACH_BLUESHARK BLUESHARK 2907
++e10 MACH_E10 E10 2908
++app3k_robin MACH_APP3K_ROBIN APP3K_ROBIN 2909
++pov15hd MACH_POV15HD POV15HD 2910
++stella MACH_STELLA STELLA 2911
+ linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913
++netwalker MACH_NETWALKER NETWALKER 2914
++acsx106 MACH_ACSX106 ACSX106 2915
++atlas5_c1 MACH_ATLAS5_C1 ATLAS5_C1 2916
++nsb3ast MACH_NSB3AST NSB3AST 2917
++gnet_slc MACH_GNET_SLC GNET_SLC 2918
++af4000 MACH_AF4000 AF4000 2919
++ark9431 MACH_ARK9431 ARK9431 2920
++fs_s5pc100 MACH_FS_S5PC100 FS_S5PC100 2921
++omap3505nova8 MACH_OMAP3505NOVA8 OMAP3505NOVA8 2922
++omap3621_edp1 MACH_OMAP3621_EDP1 OMAP3621_EDP1 2923
++oratisaes MACH_ORATISAES ORATISAES 2924
+ smdkv310 MACH_SMDKV310 SMDKV310 2925
++siemens_l0 MACH_SIEMENS_L0 SIEMENS_L0 2926
++ventana MACH_VENTANA VENTANA 2927
+ wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928
++ec4350sdb MACH_EC4350SDB EC4350SDB 2929
++mimas MACH_MIMAS MIMAS 2930
++titan MACH_TITAN TITAN 2931
+ craneboard MACH_CRANEBOARD CRANEBOARD 2932
++es2440 MACH_ES2440 ES2440 2933
++najay_a9263 MACH_NAJAY_A9263 NAJAY_A9263 2934
++htctornado MACH_HTCTORNADO HTCTORNADO 2935
++dimm_mx257 MACH_DIMM_MX257 DIMM_MX257 2936
++jigen301 MACH_JIGEN JIGEN 2937
+ smdk6450 MACH_SMDK6450 SMDK6450 2938
++meno_qng MACH_MENO_QNG MENO_QNG 2939
++ns2416 MACH_NS2416 NS2416 2940
++rpc353 MACH_RPC353 RPC353 2941
++tq6410 MACH_TQ6410 TQ6410 2942
++sky6410 MACH_SKY6410 SKY6410 2943
++dynasty MACH_DYNASTY DYNASTY 2944
++vivo MACH_VIVO VIVO 2945
++bury_bl7582 MACH_BURY_BL7582 BURY_BL7582 2946
++bury_bps5270 MACH_BURY_BPS5270 BURY_BPS5270 2947
++basi MACH_BASI BASI 2948
++tn200 MACH_TN200 TN200 2949
++c2mmi MACH_C2MMI C2MMI 2950
++meson_6236m MACH_MESON_6236M MESON_6236M 2951
++meson_8626m MACH_MESON_8626M MESON_8626M 2952
++tube MACH_TUBE TUBE 2953
++messina MACH_MESSINA MESSINA 2954
++mx50_arm2 MACH_MX50_ARM2 MX50_ARM2 2955
++cetus9263 MACH_CETUS9263 CETUS9263 2956
+ brownstone MACH_BROWNSTONE BROWNSTONE 2957
++vmx25 MACH_VMX25 VMX25 2958
++vmx51 MACH_VMX51 VMX51 2959
++abacus MACH_ABACUS ABACUS 2960
++cm4745 MACH_CM4745 CM4745 2961
++oratislink MACH_ORATISLINK ORATISLINK 2962
++davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963
++netviz MACH_NETVIZ NETVIZ 2964
+ flexibity MACH_FLEXIBITY FLEXIBITY 2965
++wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966
++lpc24xx MACH_LPC24XX LPC24XX 2967
++spica MACH_SPICA SPICA 2968
++gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969
++bipnet MACH_BIPNET BIPNET 2970
++overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971
++davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972
++pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973
++ptx7545 MACH_PTX7545 PTX7545 2974
++tm_efdc MACH_TM_EFDC TM_EFDC 2975
++omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977
++flyer MACH_FLYER FLYER 2978
++tornado3240 MACH_TORNADO3240 TORNADO3240 2979
++soli_01 MACH_SOLI_01 SOLI_01 2980
++omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981
++helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982
++netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983
++ssc MACH_SSC SSC 2984
++premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985
++wasabi MACH_WASABI WASABI 2986
+ mx50_rdp MACH_MX50_RDP MX50_RDP 2988
+ universal_c210 MACH_UNIVERSAL_C210 UNIVERSAL_C210 2989
+ real6410 MACH_REAL6410 REAL6410 2990
++spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991
++ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992
++omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993
++thebe MACH_THEBE THEBE 2994
++rv082 MACH_RV082 RV082 2995
++armlguest MACH_ARMLGUEST ARMLGUEST 2996
++tjinc1000 MACH_TJINC1000 TJINC1000 2997
+ dockstar MACH_DOCKSTAR DOCKSTAR 2998
++ax8008 MACH_AX8008 AX8008 2999
++gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000
++pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
++ea20 MACH_EA20 EA20 3002
++awm2 MACH_AWM2 AWM2 3003
+ ti8148evm MACH_TI8148EVM TI8148EVM 3004
+ seaboard MACH_SEABOARD SEABOARD 3005
++linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
++tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
++rubys MACH_RUBYS RUBYS 3008
++aquarius MACH_AQUARIUS AQUARIUS 3009
+ mx53_ard MACH_MX53_ARD MX53_ARD 3010
+ mx53_smd MACH_MX53_SMD MX53_SMD 3011
++lswxl MACH_LSWXL LSWXL 3012
++dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013
++sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014
++jocpu550 MACH_JOCPU550 JOCPU550 3015
+ msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
+ msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
++yanomami MACH_YANOMAMI YANOMAMI 3018
++gta04 MACH_GTA04 GTA04 3019
+ cm_a510 MACH_CM_A510 CM_A510 3020
++omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021
++kx33xx MACH_KX33XX KX33XX 3022
++ptx7510 MACH_PTX7510 PTX7510 3023
++top9000 MACH_TOP9000 TOP9000 3024
++teenote MACH_TEENOTE TEENOTE 3025
++ts3 MACH_TS3 TS3 3026
++a0 MACH_A0 A0 3027
++fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
++fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
++frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030
++remus MACH_REMUS REMUS 3031
++at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032
++at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033
++kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034
++armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036
++spdm MACH_SPDM SPDM 3037
++gtib MACH_GTIB GTIB 3038
++dgm3240 MACH_DGM3240 DGM3240 3039
++htcmega MACH_HTCMEGA HTCMEGA 3041
++tricorder MACH_TRICORDER TRICORDER 3042
+ tx28 MACH_TX28 TX28 3043
++bstbrd MACH_BSTBRD BSTBRD 3044
++pwb3090 MACH_PWB3090 PWB3090 3045
++idea6410 MACH_IDEA6410 IDEA6410 3046
++qbc9263 MACH_QBC9263 QBC9263 3047
++borabora MACH_BORABORA BORABORA 3048
++valdez MACH_VALDEZ VALDEZ 3049
++ls9g20 MACH_LS9G20 LS9G20 3050
++mios_v1 MACH_MIOS_V1 MIOS_V1 3051
++s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052
++controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053
++tin307 MACH_TIN307 TIN307 3054
++tin510 MACH_TIN510 TIN510 3055
++ep3505 MACH_EP3517 EP3517 3056
++bluecheese MACH_BLUECHEESE BLUECHEESE 3057
++tem3x30 MACH_TEM3X30 TEM3X30 3058
++harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059
++msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060
++spear900 MACH_SPEAR900 SPEAR900 3061
+ pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
++rdstor MACH_RDSTOR RDSTOR 3063
++usdloader MACH_USDLOADER USDLOADER 3064
++tsoploader MACH_TSOPLOADER TSOPLOADER 3065
++kronos MACH_KRONOS KRONOS 3066
++ffcore MACH_FFCORE FFCORE 3067
++mone MACH_MONE MONE 3068
++unit2s MACH_UNIT2S UNIT2S 3069
++acer_a5 MACH_ACER_A5 ACER_A5 3070
++etherpro_isp MACH_ETHERPRO_ISP ETHERPRO_ISP 3071
++stretchs7000 MACH_STRETCHS7000 STRETCHS7000 3072
++p87_smartsim MACH_P87_SMARTSIM P87_SMARTSIM 3073
++tulip MACH_TULIP TULIP 3074
++sunflower MACH_SUNFLOWER SUNFLOWER 3075
++rib MACH_RIB RIB 3076
++clod MACH_CLOD CLOD 3077
++rump MACH_RUMP RUMP 3078
++tenderloin MACH_TENDERLOIN TENDERLOIN 3079
++shortloin MACH_SHORTLOIN SHORTLOIN 3080
++antares MACH_ANTARES ANTARES 3082
++wb40n MACH_WB40N WB40N 3083
++herring MACH_HERRING HERRING 3084
++naxy400 MACH_NAXY400 NAXY400 3085
++naxy1200 MACH_NAXY1200 NAXY1200 3086
+ vpr200 MACH_VPR200 VPR200 3087
++bug20 MACH_BUG20 BUG20 3088
++goflexnet MACH_GOFLEXNET GOFLEXNET 3089
+ torbreck MACH_TORBRECK TORBRECK 3090
++saarb_mg1 MACH_SAARB_MG1 SAARB_MG1 3091
++callisto MACH_CALLISTO CALLISTO 3092
++multhsu MACH_MULTHSU MULTHSU 3093
++saluda MACH_SALUDA SALUDA 3094
++pemp_omap3_apollo MACH_PEMP_OMAP3_APOLLO PEMP_OMAP3_APOLLO 3095
++vc0718 MACH_VC0718 VC0718 3096
++mvblx MACH_MVBLX MVBLX 3097
++inhand_apeiron MACH_INHAND_APEIRON INHAND_APEIRON 3098
++inhand_fury MACH_INHAND_FURY INHAND_FURY 3099
++inhand_siren MACH_INHAND_SIREN INHAND_SIREN 3100
++hdnvp MACH_HDNVP HDNVP 3101
++softwinner MACH_SOFTWINNER SOFTWINNER 3102
+ prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103
++nas6210 MACH_NAS6210 NAS6210 3104
++unisdev MACH_UNISDEV UNISDEV 3105
++sbca11 MACH_SBCA11 SBCA11 3106
++saga MACH_SAGA SAGA 3107
++ns_k330 MACH_NS_K330 NS_K330 3108
++tanna MACH_TANNA TANNA 3109
++imate8502 MACH_IMATE8502 IMATE8502 3110
++aspen MACH_ASPEN ASPEN 3111
++daintree_cwac MACH_DAINTREE_CWAC DAINTREE_CWAC 3112
++zmx25 MACH_ZMX25 ZMX25 3113
++maple1 MACH_MAPLE1 MAPLE1 3114
++qsd8x72_surf MACH_QSD8X72_SURF QSD8X72_SURF 3115
++qsd8x72_ffa MACH_QSD8X72_FFA QSD8X72_FFA 3116
++abilene MACH_ABILENE ABILENE 3117
++eigen_ttr MACH_EIGEN_TTR EIGEN_TTR 3118
++iomega_ix2_200 MACH_IOMEGA_IX2_200 IOMEGA_IX2_200 3119
++coretec_vcx7400 MACH_CORETEC_VCX7400 CORETEC_VCX7400 3120
++santiago MACH_SANTIAGO SANTIAGO 3121
++mx257sol MACH_MX257SOL MX257SOL 3122
++strasbourg MACH_STRASBOURG STRASBOURG 3123
++msm8x60_fluid MACH_MSM8X60_FLUID MSM8X60_FLUID 3124
++smartqv5 MACH_SMARTQV5 SMARTQV5 3125
++smartqv3 MACH_SMARTQV3 SMARTQV3 3126
++smartqv7 MACH_SMARTQV7 SMARTQV7 3127
+ paz00 MACH_PAZ00 PAZ00 3128
+ acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129
++fwbd_0404 MACH_FWBD_0404 FWBD_0404 3131
++hdgu MACH_HDGU HDGU 3132
++pyramid MACH_PYRAMID PYRAMID 3133
++epiphan MACH_EPIPHAN EPIPHAN 3134
++omap_bender MACH_OMAP_BENDER OMAP_BENDER 3135
++gurnard MACH_GURNARD GURNARD 3136
++gtl_it5100 MACH_GTL_IT5100 GTL_IT5100 3137
++bcm2708 MACH_BCM2708 BCM2708 3138
++mx51_ggc MACH_MX51_GGC MX51_GGC 3139
++sharespace MACH_SHARESPACE SHARESPACE 3140
++haba_knx_explorer MACH_HABA_KNX_EXPLORER HABA_KNX_EXPLORER 3141
++simtec_kirkmod MACH_SIMTEC_KIRKMOD SIMTEC_KIRKMOD 3142
++crux MACH_CRUX CRUX 3143
++mx51_bravo MACH_MX51_BRAVO MX51_BRAVO 3144
++charon MACH_CHARON CHARON 3145
++picocom3 MACH_PICOCOM3 PICOCOM3 3146
++picocom4 MACH_PICOCOM4 PICOCOM4 3147
++serrano MACH_SERRANO SERRANO 3148
++doubleshot MACH_DOUBLESHOT DOUBLESHOT 3149
++evsy MACH_EVSY EVSY 3150
++huashan MACH_HUASHAN HUASHAN 3151
++lausanne MACH_LAUSANNE LAUSANNE 3152
++emerald MACH_EMERALD EMERALD 3153
++tqma35 MACH_TQMA35 TQMA35 3154
++marvel MACH_MARVEL MARVEL 3155
++manuae MACH_MANUAE MANUAE 3156
++chacha MACH_CHACHA CHACHA 3157
++lemon MACH_LEMON LEMON 3158
++csc MACH_CSC CSC 3159
++gira_knxip_router MACH_GIRA_KNXIP_ROUTER GIRA_KNXIP_ROUTER 3160
++t20 MACH_T20 T20 3161
++hdmini MACH_HDMINI HDMINI 3162
++sciphone_g2 MACH_SCIPHONE_G2 SCIPHONE_G2 3163
++express MACH_EXPRESS EXPRESS 3164
++express_kt MACH_EXPRESS_KT EXPRESS_KT 3165
++maximasp MACH_MAXIMASP MAXIMASP 3166
++nitrogen_imx51 MACH_NITROGEN_IMX51 NITROGEN_IMX51 3167
++nitrogen_imx53 MACH_NITROGEN_IMX53 NITROGEN_IMX53 3168
++sunfire MACH_SUNFIRE SUNFIRE 3169
++arowana MACH_AROWANA AROWANA 3170
++tegra_daytona MACH_TEGRA_DAYTONA TEGRA_DAYTONA 3171
++tegra_swordfish MACH_TEGRA_SWORDFISH TEGRA_SWORDFISH 3172
++edison MACH_EDISON EDISON 3173
++svp8500v1 MACH_SVP8500V1 SVP8500V1 3174
++svp8500v2 MACH_SVP8500V2 SVP8500V2 3175
++svp5500 MACH_SVP5500 SVP5500 3176
++b5500 MACH_B5500 B5500 3177
++s5500 MACH_S5500 S5500 3178
++icon MACH_ICON ICON 3179
++elephant MACH_ELEPHANT ELEPHANT 3180
++shooter MACH_SHOOTER SHOOTER 3182
++spade_lte MACH_SPADE_LTE SPADE_LTE 3183
++philhwani MACH_PHILHWANI PHILHWANI 3184
++gsncomm MACH_GSNCOMM GSNCOMM 3185
++strasbourg_a2 MACH_STRASBOURG_A2 STRASBOURG_A2 3186
++mmm MACH_MMM MMM 3187
++davinci_dm365_bv MACH_DAVINCI_DM365_BV DAVINCI_DM365_BV 3188
+ ag5evm MACH_AG5EVM AG5EVM 3189
++sc575plc MACH_SC575PLC SC575PLC 3190
++sc575hmi MACH_SC575IPC SC575IPC 3191
++omap3_tdm3730 MACH_OMAP3_TDM3730 OMAP3_TDM3730 3192
++top9000_eval MACH_TOP9000_EVAL TOP9000_EVAL 3194
++top9000_su MACH_TOP9000_SU TOP9000_SU 3195
++utm300 MACH_UTM300 UTM300 3196
+ tsunagi MACH_TSUNAGI TSUNAGI 3197
++ts75xx MACH_TS75XX TS75XX 3198
++ts47xx MACH_TS47XX TS47XX 3200
++da850_k5 MACH_DA850_K5 DA850_K5 3201
++ax502 MACH_AX502 AX502 3202
++igep0032 MACH_IGEP0032 IGEP0032 3203
++antero MACH_ANTERO ANTERO 3204
++synergy MACH_SYNERGY SYNERGY 3205
+ ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
+ wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
++punica MACH_PUNICA PUNICA 3208
+ trimslice MACH_TRIMSLICE TRIMSLICE 3209
++mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210
+ mackerel MACH_MACKEREL MACKEREL 3211
++fa9x27 MACH_FA9X27 FA9X27 3213
++ns2816tb MACH_NS2816TB NS2816TB 3214
++ns2816_ntpad MACH_NS2816_NTPAD NS2816_NTPAD 3215
++ns2816_ntnb MACH_NS2816_NTNB NS2816_NTNB 3216
+ kaen MACH_KAEN KAEN 3217
++nv1000 MACH_NV1000 NV1000 3218
++nuc950ts MACH_NUC950TS NUC950TS 3219
+ nokia_rm680 MACH_NOKIA_RM680 NOKIA_RM680 3220
++ast2200 MACH_AST2200 AST2200 3221
++lead MACH_LEAD LEAD 3222
++unino1 MACH_UNINO1 UNINO1 3223
++greeco MACH_GREECO GREECO 3224
++verdi MACH_VERDI VERDI 3225
+ dm6446_adbox MACH_DM6446_ADBOX DM6446_ADBOX 3226
+ quad_salsa MACH_QUAD_SALSA QUAD_SALSA 3227
+ abb_gma_1_1 MACH_ABB_GMA_1_1 ABB_GMA_1_1 3228
+@@ -603,7 +3251,9 @@ isc3 MACH_ISC3 ISC3 3291
+ rascal MACH_RASCAL RASCAL 3292
+ hrefv60 MACH_HREFV60 HREFV60 3293
+ tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294
++pydtd MACH_PYRAMID_TD PYRAMID_TD 3295
+ splendor MACH_SPLENDOR SPLENDOR 3296
++guf_vincell MACH_GUF_PLANET GUF_PLANET 3297
+ msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298
+ htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299
+ athene MACH_ATHENE ATHENE 3300
+@@ -614,6 +3264,7 @@ rfl109145_ssrv MACH_RFL109145_SSRV RFL1
+ nmh MACH_NMH NMH 3305
+ wn802t MACH_WN802T WN802T 3306
+ dragonet MACH_DRAGONET DRAGONET 3307
++geneva_b4 MACH_GENEVA_B GENEVA_B 3308
+ at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309
+ bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310
+ bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311
+@@ -621,11 +3272,13 @@ koi MACH_KOI KOI 3312
+ ts4800 MACH_TS4800 TS4800 3313
+ tqma9263 MACH_TQMA9263 TQMA9263 3314
+ holiday MACH_HOLIDAY HOLIDAY 3315
++dma_6410 MACH_DMA6410 DMA6410 3316
+ pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317
+ hwgw6410 MACH_HWGW6410 HWGW6410 3318
+ shenzhou MACH_SHENZHOU SHENZHOU 3319
+ cwme9210 MACH_CWME9210 CWME9210 3320
+ cwme9210js MACH_CWME9210JS CWME9210JS 3321
++pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322
+ colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323
+ w21 MACH_W21 W21 3324
+ polysat1 MACH_POLYSAT1 POLYSAT1 3325
+@@ -691,11 +3344,15 @@ viprinet MACH_VIPRINET VIPRINET 3385
+ bockw MACH_BOCKW BOCKW 3386
+ eva2000 MACH_EVA2000 EVA2000 3387
+ steelyard MACH_STEELYARD STEELYARD 3388
++ea2468devkit MACH_LPC2468OEM LPC2468OEM 3389
++sdh001 MACH_MACH_SDH001 MACH_SDH001 3390
++fe2478mblox MACH_LPC2478MICROBLOX LPC2478MICROBLOX 3391
+ nsslsboard MACH_NSSLSBOARD NSSLSBOARD 3392
+ geneva_b5 MACH_GENEVA_B5 GENEVA_B5 3393
+ spear1340 MACH_SPEAR1340 SPEAR1340 3394
+ rexmas MACH_REXMAS REXMAS 3395
+ msm8960_cdp MACH_MSM8960_CDP MSM8960_CDP 3396
++msm8960_mtp MACH_MSM8960_MDP MSM8960_MDP 3397
+ msm8960_fluid MACH_MSM8960_FLUID MSM8960_FLUID 3398
+ msm8960_apq MACH_MSM8960_APQ MSM8960_APQ 3399
+ helios_v2 MACH_HELIOS_V2 HELIOS_V2 3400
+@@ -727,6 +3384,7 @@ gt_i5700 MACH_GT_I5700 GT_I5700 3425
+ ctera_plug_c2 MACH_CTERA_PLUG_C2 CTERA_PLUG_C2 3426
+ marvelct MACH_MARVELCT MARVELCT 3427
+ ag11005 MACH_AG11005 AG11005 3428
++omap_tabletblaze MACH_OMAP_BLAZE OMAP_BLAZE 3429
+ vangogh MACH_VANGOGH VANGOGH 3430
+ matrix505 MACH_MATRIX505 MATRIX505 3431
+ oce_nigma MACH_OCE_NIGMA OCE_NIGMA 3432
+@@ -766,6 +3424,7 @@ h1600 MACH_H1600 H1600 3465
+ mini210 MACH_MINI210 MINI210 3466
+ mini8168 MACH_MINI8168 MINI8168 3467
+ pc7308 MACH_PC7308 PC7308 3468
++ge863_pro3_evk MACH_GE863 GE863 3469
+ kmm2m01 MACH_KMM2M01 KMM2M01 3470
+ mx51erebus MACH_MX51EREBUS MX51EREBUS 3471
+ wm8650refboard MACH_WM8650REFBOARD WM8650REFBOARD 3472
+@@ -802,6 +3461,7 @@ shooter_k MACH_SHOOTER_K SHOOTER_K 35
+ nspire MACH_NSPIRE NSPIRE 3503
+ mickxx MACH_MICKXX MICKXX 3504
+ lxmb MACH_LXMB LXMB 3505
++tmdxscbp6618x MACH_TMDXSCBP6616X TMDXSCBP6616X 3506
+ adam MACH_ADAM ADAM 3507
+ b1004 MACH_B1004 B1004 3508
+ oboea MACH_OBOEA OBOEA 3509
+@@ -879,6 +3539,7 @@ bct MACH_BCT BCT 3582
+ tuscan MACH_TUSCAN TUSCAN 3583
+ xbt_sam9g45 MACH_XBT_SAM9G45 XBT_SAM9G45 3584
+ enbw_cmc MACH_ENBW_CMC ENBW_CMC 3585
++msm8x60_dragon MACH_APQ8060_DRAGON APQ8060_DRAGON 3586
+ ch104mx257 MACH_CH104MX257 CH104MX257 3587
+ openpri MACH_OPENPRI OPENPRI 3588
+ am335xevm MACH_AM335XEVM AM335XEVM 3589
+@@ -900,6 +3561,7 @@ cinema MACH_CINEMA CINEMA 3604
+ cinema_tea MACH_CINEMA_TEA CINEMA_TEA 3605
+ cinema_coffee MACH_CINEMA_COFFEE CINEMA_COFFEE 3606
+ cinema_juice MACH_CINEMA_JUICE CINEMA_JUICE 3607
++linux_pad MACH_THEPAD THEPAD 3608
+ mx53_mirage2 MACH_MX53_MIRAGE2 MX53_MIRAGE2 3609
+ mx53_efikasb MACH_MX53_EFIKASB MX53_EFIKASB 3610
+ stm_b2000 MACH_STM_B2000 STM_B2000 3612
+@@ -965,6 +3627,7 @@ pia_am35x MACH_PIA_AM35X PIA_AM35X 36
+ cedar MACH_CEDAR CEDAR 3672
+ picasso_e MACH_PICASSO_E PICASSO_E 3673
+ samsung_e60 MACH_SAMSUNG_E60 SAMSUNG_E60 3674
++msm9615_cdp MACH_MDM9615 MDM9615 3675
+ sdvr_mini MACH_SDVR_MINI SDVR_MINI 3676
+ omap3_ij3k MACH_OMAP3_IJ3K OMAP3_IJ3K 3677
+ modasmc1 MACH_MODASMC1 MODASMC1 3678
+@@ -992,6 +3655,8 @@ fmc_uic MACH_FMC_UIC FMC_UIC 3699
+ fmc_dcm MACH_FMC_DCM FMC_DCM 3700
+ batwm MACH_BATWM BATWM 3701
+ atlas6cb MACH_ATLAS6CB ATLAS6CB 3702
++quattro_f MACH_QUATTROF QUATTROF 3703
++quattro_u MACH_QUATTROU QUATTROU 3704
+ blue MACH_BLUE BLUE 3705
+ colorado MACH_COLORADO COLORADO 3706
+ popc MACH_POPC POPC 3707
+@@ -999,15 +3664,19 @@ promwad_jade MACH_PROMWAD_JADE PROMWAD_
+ amp MACH_AMP AMP 3709
+ gnet_amp MACH_GNET_AMP GNET_AMP 3710
+ toques MACH_TOQUES TOQUES 3711
++apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712
+ dct_storm MACH_DCT_STORM DCT_STORM 3713
++dm8168z3 MACH_Z3 Z3 3714
+ owl MACH_OWL OWL 3715
+ cogent_csb1741 MACH_COGENT_CSB1741 COGENT_CSB1741 3716
++omap3_kiko MACH_OMAP3 OMAP3 3717
+ adillustra610 MACH_ADILLUSTRA610 ADILLUSTRA610 3718
+ ecafe_na04 MACH_ECAFE_NA04 ECAFE_NA04 3719
+ popct MACH_POPCT POPCT 3720
+ omap3_helena MACH_OMAP3_HELENA OMAP3_HELENA 3721
+ ach MACH_ACH ACH 3722
+ module_dtb MACH_MODULE_DTB MODULE_DTB 3723
++ratebox MACH_RACKBOX RACKBOX 3724
+ oslo_elisabeth MACH_OSLO_ELISABETH OSLO_ELISABETH 3725
+ tt01 MACH_TT01 TT01 3726
+ msm8930_cdp MACH_MSM8930_CDP MSM8930_CDP 3727
+@@ -1038,6 +3707,7 @@ ptip_murnau MACH_PTIP_MURNAU PTIP_MURNA
+ ptip_classic MACH_PTIP_CLASSIC PTIP_CLASSIC 3753
+ mx53grb MACH_MX53GRB MX53GRB 3754
+ gagarin MACH_GAGARIN GAGARIN 3755
++msm7627a_qrd1 MACH_MSM7X27A_QRD1 MSM7X27A_QRD1 3756
+ nas2big MACH_NAS2BIG NAS2BIG 3757
+ superfemto MACH_SUPERFEMTO SUPERFEMTO 3758
+ teufel MACH_TEUFEL TEUFEL 3759
+@@ -1087,6 +3757,7 @@ ubisys_g1 MACH_UBISYS_G1 UBISYS_G1 38
+ mx53_pf1 MACH_MX53_PF1 MX53_PF1 3803
+ asanti MACH_ASANTI ASANTI 3804
+ volta MACH_VOLTA VOLTA 3805
++potenza MACH_S5P6450 S5P6450 3806
+ knight MACH_KNIGHT KNIGHT 3807
+ beaglebone MACH_BEAGLEBONE BEAGLEBONE 3808
+ becker MACH_BECKER BECKER 3809
+@@ -1148,6 +3819,7 @@ primou MACH_PRIMOU PRIMOU 3864
+ primoc MACH_PRIMOC PRIMOC 3865
+ primoct MACH_PRIMOCT PRIMOCT 3866
+ a9500 MACH_A9500 A9500 3867
++pue_td MACH_PULSE_TD PULSE_TD 3868
+ pluto MACH_PLUTO PLUTO 3869
+ acfx100 MACH_ACFX100 ACFX100 3870
+ msm8625_rumi3 MACH_MSM8625_RUMI3 MSM8625_RUMI3 3871
+@@ -1161,6 +3833,8 @@ valente_wx MACH_VALENTE_WX VALENTE_WX
+ huangshans MACH_HUANGSHANS HUANGSHANS 3879
+ bosphorus1 MACH_BOSPHORUS1 BOSPHORUS1 3880
+ prima MACH_PRIMA PRIMA 3881
++meson3_skt MACH_M3_SKT M3_SKT 3882
++meson3_ref MACH_M3_REF M3_REF 3883
+ evita_ulk MACH_EVITA_ULK EVITA_ULK 3884
+ merisc600 MACH_MERISC600 MERISC600 3885
+ dolak MACH_DOLAK DOLAK 3886
+@@ -1169,3 +3843,271 @@ elite_ulk MACH_ELITE_ULK ELITE_ULK 38
+ pov2 MACH_POV2 POV2 3889
+ ipod_touch_2g MACH_IPOD_TOUCH_2G IPOD_TOUCH_2G 3890
+ da850_pqab MACH_DA850_PQAB DA850_PQAB 3891
++fermi MACH_FERMI FERMI 3892
++ccardwmx28 MACH_CCARDWMX28 CCARDWMX28 3893
++ccardmx28 MACH_CCARDMX28 CCARDMX28 3894
++fs20_fcm2050 MACH_FS20_FCM2050 FS20_FCM2050 3895
++kinetis MACH_KINETIS KINETIS 3896
++kai MACH_KAI KAI 3897
++bcthb2 MACH_BCTHB2 BCTHB2 3898
++inels3_cu MACH_INELS3_CU INELS3_CU 3899
++da850_juniper MACH_JUNIPER JUNIPER 3900
++da850_apollo MACH_DA850_APOLLO DA850_APOLLO 3901
++tracnas MACH_TRACNAS TRACNAS 3902
++mityarm335x MACH_MITYARM335X MITYARM335X 3903
++xcgz7x MACH_XCGZ7X XCGZ7X 3904
++cubox MACH_CUBOX CUBOX 3905
++terminator MACH_TERMINATOR TERMINATOR 3906
++eye03 MACH_EYE03 EYE03 3907
++kota3 MACH_KOTA3 KOTA3 3908
++mx53_nitrogen_k MACH_MX5 MX5 3909
++pscpe MACH_PSCPE PSCPE 3910
++akt1100 MACH_AKT1100 AKT1100 3911
++pcaaxl2 MACH_PCAAXL2 PCAAXL2 3912
++primodd_ct MACH_PRIMODD_CT PRIMODD_CT 3913
++nsbc MACH_NSBC NSBC 3914
++meson2_skt MACH_MESON2_SKT MESON2_SKT 3915
++meson2_ref MACH_MESON2_REF MESON2_REF 3916
++ccardwmx28js MACH_CCARDWMX28JS CCARDWMX28JS 3917
++ccardmx28js MACH_CCARDMX28JS CCARDMX28JS 3918
++indico MACH_INDICO INDICO 3919
++msm8960dt MACH_MSM8960DT MSM8960DT 3920
++primods MACH_PRIMODS PRIMODS 3921
++beluga_m1388 MACH_BELUGA_M1388 BELUGA_M1388 3922
++primotd MACH_PRIMOTD PRIMOTD 3923
++varan_master MACH_VARAN_MASTER VARAN_MASTER 3924
++primodd MACH_PRIMODD PRIMODD 3925
++jetduo MACH_JETDUO JETDUO 3926
++mx53_umobo MACH_MX53_UMOBO MX53_UMOBO 3927
++trats MACH_TRATS TRATS 3928
++starcraft MACH_STARCRAFT STARCRAFT 3929
++qseven_tegra2 MACH_QSEVEN_TEGRA2 QSEVEN_TEGRA2 3930
++lichee_sun4i_devbd MACH_LICHEE_SUN4I_DEVBD LICHEE_SUN4I_DEVBD 3931
++movenow MACH_MOVENOW MOVENOW 3932
++golf_u MACH_GOLF_U GOLF_U 3933
++msm7627a_evb MACH_MSM7627A_EVB MSM7627A_EVB 3934
++rambo MACH_RAMBO RAMBO 3935
++golfu MACH_GOLFU GOLFU 3936
++mango310 MACH_MANGO310 MANGO310 3937
++dns343 MACH_DNS343 DNS343 3938
++var_som_om44 MACH_VAR_SOM_OM44 VAR_SOM_OM44 3939
++naon MACH_NAON NAON 3940
++vp4000 MACH_VP4000 VP4000 3941
++impcard MACH_IMPCARD IMPCARD 3942
++smoovcam MACH_SMOOVCAM SMOOVCAM 3943
++cobham3725 MACH_COBHAM3725 COBHAM3725 3944
++cobham3730 MACH_COBHAM3730 COBHAM3730 3945
++cobham3703 MACH_COBHAM3703 COBHAM3703 3946
++quetzal MACH_QUETZAL QUETZAL 3947
++apq8064_cdp MACH_APQ8064_CDP APQ8064_CDP 3948
++apq8064_mtp MACH_APQ8064_MTP APQ8064_MTP 3949
++apq8064_fluid MACH_APQ8064_FLUID APQ8064_FLUID 3950
++apq8064_liquid MACH_APQ8064_LIQUID APQ8064_LIQUID 3951
++mango210 MACH_MANGO210 MANGO210 3952
++mango100 MACH_MANGO100 MANGO100 3953
++mango24 MACH_MANGO24 MANGO24 3954
++mango64 MACH_MANGO64 MANGO64 3955
++nsa320 MACH_NSA320 NSA320 3956
++elv_ccu2 MACH_ELV_CCU2 ELV_CCU2 3957
++triton_x00 MACH_TRITON_X00 TRITON_X00 3958
++triton_1500_2000 MACH_TRITON_1500_2000 TRITON_1500_2000 3959
++pogoplugv4 MACH_POGOPLUGV4 POGOPLUGV4 3960
++venus_cl MACH_VENUS_CL VENUS_CL 3961
++vulcano_g20 MACH_VULCANO_G20 VULCANO_G20 3962
++sgs_i9100 MACH_SGS_I9100 SGS_I9100 3963
++stsv2 MACH_STSV2 STSV2 3964
++csb1724 MACH_CSB1724 CSB1724 3965
++omapl138_lcdk MACH_OMAPL138_LCDK OMAPL138_LCDK 3966
++jel_dd MACH_JEWEL_DD JEWEL_DD 3967
++pvd_mx25 MACH_PVD_MX25 PVD_MX25 3968
++meson6_skt MACH_MESON6_SKT MESON6_SKT 3969
++meson6_ref MACH_MESON6_REF MESON6_REF 3970
++pxm MACH_PXM PXM 3971
++stuttgart MACH_S3 S3 3972
++pogoplugv3 MACH_POGOPLUGV3 POGOPLUGV3 3973
++mlp89626 MACH_MLP89626 MLP89626 3974
++iomegahmndce MACH_IOMEGAHMNDCE IOMEGAHMNDCE 3975
++pogoplugv3pci MACH_POGOPLUGV3PCI POGOPLUGV3PCI 3976
++bntv250 MACH_BNTV250 BNTV250 3977
++mx53_qseven MACH_MX53_QSEVEN MX53_QSEVEN 3978
++gtl_it1100 MACH_GTL_IT1100 GTL_IT1100 3979
++mx6q_sabresd MACH_MX6Q_SABRESD MX6Q_SABRESD 3980
++mt4 MACH_MT4 MT4 3981
++jumbo_d MACH_JUMBO_D JUMBO_D 3982
++jumbo_i MACH_JUMBO_I JUMBO_I 3983
++fs20_dmp MACH_FS20_DMP FS20_DMP 3984
++dns320 MACH_DNS320 DNS320 3985
++mx28bacos MACH_MX28BACOS MX28BACOS 3986
++tl80 MACH_TL80 TL80 3987
++polatis_nic_1001 MACH_POLATIS_NIC_1001 POLATIS_NIC_1001 3988
++tely MACH_TELY TELY 3989
++u8520 MACH_U8520 U8520 3990
++manta MACH_MANTA MANTA 3991
++spear1340_lcad MACH_SPEAR_EM_S900 SPEAR_EM_S900 3992
++mpq8064_cdp MACH_MPQ8064_CDP MPQ8064_CDP 3993
++mpq8064_hrd MACH_MPQ8064_STB MPQ8064_STB 3994
++mpq8064_dtv MACH_MPQ8064_DTV MPQ8064_DTV 3995
++dm368som MACH_DM368SOM DM368SOM 3996
++gprisb2 MACH_GPRISB2 GPRISB2 3997
++chammid MACH_CHAMMID CHAMMID 3998
++seoul2 MACH_SEOUL2 SEOUL2 3999
++omap4_nooktablet MACH_OMAP4_NOOKTABLET OMAP4_NOOKTABLET 4000
++aalto MACH_AALTO AALTO 4001
++metro MACH_METRO METRO 4002
++cydm3730 MACH_CYDM3730 CYDM3730 4003
++tqma53 MACH_TQMA53 TQMA53 4004
++msm7627a_qrd3 MACH_MSM7627A_QRD3 MSM7627A_QRD3 4005
++mx28_canby MACH_MX28_CANBY MX28_CANBY 4006
++tiger MACH_TIGER TIGER 4007
++pcats_9307_type_a MACH_PCATS_9307_TYPE_A PCATS_9307_TYPE_A 4008
++pcats_9307_type_o MACH_PCATS_9307_TYPE_O PCATS_9307_TYPE_O 4009
++pcats_9307_type_r MACH_PCATS_9307_TYPE_R PCATS_9307_TYPE_R 4010
++streamplug MACH_STREAMPLUG STREAMPLUG 4011
++icechicken_dev MACH_ICECHICKEN_DEV ICECHICKEN_DEV 4012
++hedgehog MACH_HEDGEHOG HEDGEHOG 4013
++yusend_obc MACH_YUSEND_OBC YUSEND_OBC 4014
++imxninja MACH_IMXNINJA IMXNINJA 4015
++omap4_jarod MACH_OMAP4_JAROD OMAP4_JAROD 4016
++eco5_pk MACH_ECO5_PK ECO5_PK 4017
++qj2440 MACH_QJ2440 QJ2440 4018
++mx6q_mercury MACH_MX6Q_MERCURY MX6Q_MERCURY 4019
++cm6810 MACH_CM6810 CM6810 4020
++omap4_torpedo MACH_OMAP4_TORPEDO OMAP4_TORPEDO 4021
++nsa310 MACH_NSA310 NSA310 4022
++tmx536 MACH_TMX536 TMX536 4023
++ktt20 MACH_KTT20 KTT20 4024
++dragonix MACH_DRAGONIX DRAGONIX 4025
++lungching MACH_LUNGCHING LUNGCHING 4026
++bulogics MACH_BULOGICS BULOGICS 4027
++mx535_sx MACH_MX535_SX MX535_SX 4028
++ngui3250 MACH_NGUI3250 NGUI3250 4029
++salutec_dac MACH_SALUTEC_DAC SALUTEC_DAC 4030
++loco MACH_LOCO LOCO 4031
++ctera_plug_usi MACH_CTERA_PLUG_USI CTERA_PLUG_USI 4032
++scepter MACH_SCEPTER SCEPTER 4033
++sga MACH_SGA SGA 4034
++p_81_j5 MACH_P_81_J5 P_81_J5 4035
++p_81_o4 MACH_P_81_O4 P_81_O4 4036
++msm8625_surf MACH_MSM8625_SURF MSM8625_SURF 4037
++carallon_shark MACH_CARALLON_SHARK CARALLON_SHARK 4038
++lsgc_icam MACH_LSGCICAM LSGCICAM 4039
++ordog MACH_ORDOG ORDOG 4040
++puente_io MACH_PUENTE_IO PUENTE_IO 4041
++msm8625_evb MACH_MSM8625_EVB MSM8625_EVB 4042
++ev_am1707 MACH_EV_AM1707 EV_AM1707 4043
++ev_am1707e2 MACH_EV_AM1707E2 EV_AM1707E2 4044
++ev_am3517e2 MACH_EV_AM3517E2 EV_AM3517E2 4045
++calabria MACH_CALABRIA CALABRIA 4046
++ev_imx287 MACH_EV_IMX287 EV_IMX287 4047
++erau MACH_ERAU ERAU 4048
++sichuan MACH_SICHUAN SICHUAN 4049
++sopdm MACH_WIRMA3 WIRMA3 4050
++davinci_da850 MACH_DAVINCI_DA850 DAVINCI_DA850 4051
++omap138_trunarc MACH_OMAP138_TRUNARC OMAP138_TRUNARC 4052
++bcm4761 MACH_BCM4761 BCM4761 4053
++picasso_e2 MACH_PICASSO_E2 PICASSO_E2 4054
++picasso_mf MACH_PICASSO_MF PICASSO_MF 4055
++miro MACH_MIRO MIRO 4056
++at91sam9g20ewon3 MACH_AT91SAM9G20EWON3 AT91SAM9G20EWON3 4057
++yoyo MACH_YOYO YOYO 4058
++windjkl MACH_WINDJKL WINDJKL 4059
++monarudo MACH_MONARUDO MONARUDO 4060
++batan MACH_BATAN BATAN 4061
++tadao MACH_TADAO TADAO 4062
++baso MACH_BASO BASO 4063
++mahon MACH_MAHON MAHON 4064
++villec2 MACH_VILLEC2 VILLEC2 4065
++asi1230 MACH_ASI1230 ASI1230 4066
++alaska MACH_ALASKA ALASKA 4067
++swarco_shdsl2 MACH_SWARCO_SHDSL2 SWARCO_SHDSL2 4068
++oxrtu MACH_OXRTU OXRTU 4069
++omap5_panda MACH_OMAP5_PANDA OMAP5_PANDA 4070
++imx286 MACH_MX28XDI MX28XDI 4071
++c8000 MACH_C8000 C8000 4072
++bje_display3_5 MACH_BJE_DISPLAY3_5 BJE_DISPLAY3_5 4073
++picomod7 MACH_PICOMOD7 PICOMOD7 4074
++picocom5 MACH_PICOCOM5 PICOCOM5 4075
++qblissa8 MACH_QBLISSA8 QBLISSA8 4076
++armstonea8 MACH_ARMSTONEA8 ARMSTONEA8 4077
++netdcu14 MACH_NETDCU14 NETDCU14 4078
++at91sam9x5_epiphan MACH_AT91SAM9X5_EPIPHAN AT91SAM9X5_EPIPHAN 4079
++p2u MACH_P2U P2U 4080
++doris MACH_DORIS DORIS 4081
++j49 MACH_J49 J49 4082
++vdss2e MACH_VDSS2E VDSS2E 4083
++vc300 MACH_VC300 VC300 4084
++ns115_pad_test MACH_NS115_PAD_TEST NS115_PAD_TEST 4085
++ns115_pad_ref MACH_NS115_PAD_REF NS115_PAD_REF 4086
++ns115_phone_test MACH_NS115_PHONE_TEST NS115_PHONE_TEST 4087
++ns115_phone_ref MACH_NS115_PHONE_REF NS115_PHONE_REF 4088
++golfc MACH_GOLFC GOLFC 4089
++xerox_olympus MACH_XEROX_OLYMPUS XEROX_OLYMPUS 4090
++mx6sl_arm2 MACH_MX6SL_ARM2 MX6SL_ARM2 4091
++csb1701_csb1726 MACH_CSB1701_CSB1726 CSB1701_CSB1726 4092
++at91sam9xeek MACH_AT91SAM9XEEK AT91SAM9XEEK 4093
++ebv210 MACH_EBV210 EBV210 4094
++msm7627a_qrd7 MACH_MSM7627A_QRD7 MSM7627A_QRD7 4095
++svthin MACH_SVTHIN SVTHIN 4096
++duovero MACH_DUOVERO DUOVERO 4097
++chupacabra MACH_CHUPACABRA CHUPACABRA 4098
++scorpion MACH_SCORPION SCORPION 4099
++davinci_he_hmi10 MACH_DAVINCI_HE_HMI10 DAVINCI_HE_HMI10 4100
++topkick MACH_TOPKICK TOPKICK 4101
++m3_auguestrush MACH_M3_AUGUESTRUSH M3_AUGUESTRUSH 4102
++ipc335x MACH_IPC335X IPC335X 4103
++sun4i MACH_SUN4I SUN4I 4104
++imx233_olinuxino MACH_IMX233_OLINUXINO IMX233_OLINUXINO 4105
++k2_wl MACH_K2_WL K2_WL 4106
++k2_ul MACH_K2_UL K2_UL 4107
++k2_cl MACH_K2_CL K2_CL 4108
++minbari_w MACH_MINBARI_W MINBARI_W 4109
++minbari_m MACH_MINBARI_M MINBARI_M 4110
++k035 MACH_K035 K035 4111
++ariel MACH_ARIEL ARIEL 4112
++arielsaarc MACH_ARIELSAARC ARIELSAARC 4113
++arieldkb MACH_ARIELDKB ARIELDKB 4114
++armadillo810 MACH_ARMADILLO810 ARMADILLO810 4115
++tam335x MACH_TAM335X TAM335X 4116
++grouper MACH_GROUPER GROUPER 4117
++mpcsa21_9g20 MACH_MPCSA21_9G20 MPCSA21_9G20 4118
++m6u_cpu MACH_M6U_CPU M6U_CPU 4119
++davinci_dp10 MACH_DAVINCI_DP10 DAVINCI_DP10 4120
++ginkgo MACH_GINKGO GINKGO 4121
++cgt_qmx6 MACH_CGT_QMX6 CGT_QMX6 4122
++profpga MACH_PROFPGA PROFPGA 4123
++acfx100oc MACH_ACFX100OC ACFX100OC 4124
++acfx100nb MACH_ACFX100NB ACFX100NB 4125
++capricorn MACH_CAPRICORN CAPRICORN 4126
++pisces MACH_PISCES PISCES 4127
++aries MACH_ARIES ARIES 4128
++cancer MACH_CANCER CANCER 4129
++leo MACH_LEO LEO 4130
++virgo MACH_VIRGO VIRGO 4131
++sagittarius MACH_SAGITTARIUS SAGITTARIUS 4132
++devil MACH_DEVIL DEVIL 4133
++ballantines MACH_BALLANTINES BALLANTINES 4134
++omap3_procerusvpu MACH_OMAP3_PROCERUSVPU OMAP3_PROCERUSVPU 4135
++my27 MACH_MY27 MY27 4136
++sun6i MACH_SUN6I SUN6I 4137
++sun5i MACH_SUN5I SUN5I 4138
++mx512_mx MACH_MX512_MX MX512_MX 4139
++kzm9g MACH_KZM9G KZM9G 4140
++vdstbn MACH_VDSTBN VDSTBN 4141
++cfa10036 MACH_CFA10036 CFA10036 4142
++cfa10049 MACH_CFA10049 CFA10049 4143
++pcm051 MACH_PCM051 PCM051 4144
++vybrid_vf7xx MACH_VYBRID_VF7XX VYBRID_VF7XX 4145
++vybrid_vf6xx MACH_VYBRID_VF6XX VYBRID_VF6XX 4146
++vybrid_vf5xx MACH_VYBRID_VF5XX VYBRID_VF5XX 4147
++vybrid_vf4xx MACH_VYBRID_VF4XX VYBRID_VF4XX 4148
++aria_g25 MACH_ARIA_G25 ARIA_G25 4149
++bcm21553 MACH_BCM21553 BCM21553 4150
++smdk5410 MACH_SMDK5410 SMDK5410 4151
++lpc18xx MACH_LPC18XX LPC18XX 4152
++oratisparty MACH_ORATISPARTY ORATISPARTY 4153
++qseven MACH_QSEVEN QSEVEN 4154
++gmv_generic MACH_GMV_GENERIC GMV_GENERIC 4155
++th_link_eth MACH_TH_LINK_ETH TH_LINK_ETH 4156
++tn_muninn MACH_TN_MUNINN TN_MUNINN 4157
++rampage MACH_RAMPAGE RAMPAGE 4158
++visstrim_mv10 MACH_VISSTRIM_MV10 VISSTRIM_MV10 4159
diff --git a/target/linux/generic/patches-3.3/992-mpcore_wdt_fix_watchdog_counter_loading.patch b/target/linux/generic/patches-3.3/992-mpcore_wdt_fix_watchdog_counter_loading.patch
new file mode 100644
index 000000000..fb16e2aa6
--- /dev/null
+++ b/target/linux/generic/patches-3.3/992-mpcore_wdt_fix_watchdog_counter_loading.patch
@@ -0,0 +1,64 @@
+Although the commit "98af057092f8f0dabe63c5df08adc2bbfbddb1d2
+ ARM: 6126/1: ARM mpcore_wdt: fix build failure and other fixes"
+resolved long standing mpcore_wdt driver build problems, it
+introduced an error in the relationship between the MPcore watchdog
+timer clock rate and mpcore_margin, "MPcore timer margin in seconds",
+such that watchdog timeouts are now arbitrary rather than the number
+of seconds specified by mpcore_margin.
+
+This change restores mpcore_wdt_keepalive() to its equivalent
+implementation prior to commit 98af057 such that watchdog timeouts now
+occur as specified by mpcore_margin.
+
+The variable 'mpcore_timer_rate' which caused that build failure was
+replaced by 'twd_timer_rate'. Adding exported function to obtain
+'twd_timer_rate' value in mpcore_wdt driver.
+
+MPCORE_WATCHDOG needed to build 'mpcore_wdt' already depends on
+HAVE_ARM_TWD needed to build 'smp_twd', so from the point of view of
+'mpcore_wdt' driver the exported function will always exist.
+
+Signed-off-by: Valentine Barshak <vbarshak@mvista.com>
+Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com>
+---
+
+ arch/arm/include/asm/smp_twd.h | 1 +
+ arch/arm/kernel/smp_twd.c | 7 +++++++
+ drivers/watchdog/mpcore_wdt.c | 4 +---
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/include/asm/smp_twd.h
++++ b/arch/arm/include/asm/smp_twd.h
+@@ -24,5 +24,6 @@ extern void __iomem *twd_base;
+
+ void twd_timer_setup(struct clock_event_device *);
+ void twd_timer_stop(struct clock_event_device *);
++unsigned long twd_timer_get_rate(void);
+
+ #endif
+--- a/arch/arm/kernel/smp_twd.c
++++ b/arch/arm/kernel/smp_twd.c
+@@ -268,3 +268,10 @@ void __cpuinit twd_timer_setup(struct cl
+ 0xf, 0xffffffff);
+ enable_percpu_irq(clk->irq, 0);
+ }
++
++/* Needed by mpcore_wdt */
++unsigned long twd_timer_get_rate(void)
++{
++ return twd_timer_rate;
++}
++EXPORT_SYMBOL_GPL(twd_timer_get_rate);
+--- a/drivers/watchdog/mpcore_wdt.c
++++ b/drivers/watchdog/mpcore_wdt.c
+@@ -99,9 +99,7 @@ static void mpcore_wdt_keepalive(struct
+
+ spin_lock(&wdt_lock);
+ /* Assume prescale is set to 256 */
+- count = __raw_readl(wdt->base + TWD_WDOG_COUNTER);
+- count = (0xFFFFFFFFU - count) * (HZ / 5);
+- count = (count / 256) * mpcore_margin;
++ count = (twd_timer_get_rate() / 256) * mpcore_margin;
+
+ /* Reload the counter */
+ writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
diff --git a/target/linux/generic/patches-3.3/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch b/target/linux/generic/patches-3.3/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch
new file mode 100644
index 000000000..fa261ce64
--- /dev/null
+++ b/target/linux/generic/patches-3.3/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch
@@ -0,0 +1,29 @@
+According to the include/linux/watchdog.h WDIOC_SETOPTIONS is
+classified as 'read from device' ioctl call:
+ #define WDIOC_SETOPTIONS _IOR(WATCHDOG_IOCTL_BASE, 4, int)
+
+However, the driver 'mpcore_wdt' performs 'copy_from_user' only if
+_IOC_WRITE is set, thus the local variable 'uarg' which is used in
+WDIOC_SETOPTIONS handling remains uninitialized.
+
+The proper way to fix this is to bind WDIOC_SETOPTIONS to _IOW,
+but this will break compatibility.
+So adding additional condition for performing 'copy_from_user'.
+
+Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com>
+---
+ drivers/watchdog/mpcore_wdt.c | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+--- a/drivers/watchdog/mpcore_wdt.c
++++ b/drivers/watchdog/mpcore_wdt.c
+@@ -233,7 +233,8 @@ static long mpcore_wdt_ioctl(struct file
+ if (_IOC_DIR(cmd) && _IOC_SIZE(cmd) > sizeof(uarg))
+ return -ENOTTY;
+
+- if (_IOC_DIR(cmd) & _IOC_WRITE) {
++ if ((_IOC_DIR(cmd) & _IOC_WRITE)
++ || cmd == WDIOC_SETOPTIONS) {
+ ret = copy_from_user(&uarg, (void __user *)arg, _IOC_SIZE(cmd));
+ if (ret)
+ return -EFAULT;
diff --git a/target/linux/generic/patches-3.3/994-mpcore_wdt_fix_timer_mode_setup.patch b/target/linux/generic/patches-3.3/994-mpcore_wdt_fix_timer_mode_setup.patch
new file mode 100644
index 000000000..009092376
--- /dev/null
+++ b/target/linux/generic/patches-3.3/994-mpcore_wdt_fix_timer_mode_setup.patch
@@ -0,0 +1,57 @@
+Allow watchdog to set its iterrupt as pending when it is configured
+for timer mode (in other words, allow emitting interrupt).
+Also add macros for all Watchdog Control Register flags.
+
+Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com>
+---
+ arch/arm/include/asm/smp_twd.h | 6 ++++++
+ drivers/watchdog/mpcore_wdt.c | 15 +++++++++++----
+ 2 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/smp_twd.h
++++ b/arch/arm/include/asm/smp_twd.h
+@@ -18,6 +18,12 @@
+ #define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
+ #define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
+
++#define TWD_WDOG_CONTROL_ENABLE (1 << 0)
++#define TWD_WDOG_CONTROL_PERIODIC (1 << 1)
++#define TWD_WDOG_CONTROL_IT_ENABLE (1 << 2)
++#define TWD_WDOG_CONTROL_TIMER_MODE (0 << 3)
++#define TWD_WDOG_CONTROL_WATCHDOG_MODE (1 << 3)
++
+ struct clock_event_device;
+
+ extern void __iomem *twd_base;
+--- a/drivers/watchdog/mpcore_wdt.c
++++ b/drivers/watchdog/mpcore_wdt.c
+@@ -118,18 +118,25 @@ static void mpcore_wdt_stop(struct mpcor
+
+ static void mpcore_wdt_start(struct mpcore_wdt *wdt)
+ {
++ u32 mode;
++
+ dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n");
+
+ /* This loads the count register but does NOT start the count yet */
+ mpcore_wdt_keepalive(wdt);
+
++ /* Setup watchdog - prescale=256, enable=1 */
++ mode = (255 << 8) | TWD_WDOG_CONTROL_ENABLE;
++
+ if (mpcore_noboot) {
+- /* Enable watchdog - prescale=256, watchdog mode=0, enable=1 */
+- writel(0x0000FF01, wdt->base + TWD_WDOG_CONTROL);
++ /* timer mode, send interrupt */
++ mode |= TWD_WDOG_CONTROL_TIMER_MODE |
++ TWD_WDOG_CONTROL_IT_ENABLE;
+ } else {
+- /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */
+- writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL);
++ /* watchdog mode */
++ mode |= TWD_WDOG_CONTROL_WATCHDOG_MODE;
+ }
++ writel(mode, wdt->base + TWD_WDOG_CONTROL);
+ }
+
+ static int mpcore_wdt_set_heartbeat(int t)